From a29e7fb2fe1a7aa8de8687e2c2e74a5256a9345d Mon Sep 17 00:00:00 2001 From: Justin Donnelly Date: Sun, 8 Nov 2020 10:04:30 -0500 Subject: [PATCH] init --- keycloak-values.yaml | 398 +++++ mongodb-consolo.yaml | 521 +++++++ mongodb-consolo.zip | Bin 0 -> 110544 bytes mongodb/.helmignore | 21 + mongodb/Chart.yaml | 24 + mongodb/README.md | 565 +++++++ mongodb/charts/common/.helmignore | 22 + mongodb/charts/common/Chart.yaml | 22 + mongodb/charts/common/README.md | 286 ++++ .../charts/common/templates/_affinities.tpl | 94 ++ .../charts/common/templates/_capabilities.tpl | 33 + mongodb/charts/common/templates/_errors.tpl | 20 + mongodb/charts/common/templates/_images.tpl | 43 + mongodb/charts/common/templates/_labels.tpl | 18 + mongodb/charts/common/templates/_names.tpl | 32 + mongodb/charts/common/templates/_secrets.tpl | 49 + mongodb/charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + mongodb/charts/common/templates/_utils.tpl | 45 + .../charts/common/templates/_validations.tpl | 278 ++++ mongodb/charts/common/templates/_warnings.tpl | 14 + mongodb/charts/common/values.yaml | 3 + mongodb/mongodb-values.yaml | 460 ++++++ mongodb/mongodb/.helmignore | 21 + mongodb/mongodb/Chart.yaml | 21 + mongodb/mongodb/README.md | 318 ++++ .../docker-entrypoint-initdb.d/README.md | 3 + mongodb/mongodb/templates/NOTES.txt | 75 + mongodb/mongodb/templates/_helpers.tpl | 265 ++++ mongodb/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 305 ++++ mongodb/mongodb/templates/ingress.yaml | 33 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 27 + .../poddisruptionbudget-secondary-rs.yaml | 27 + .../templates/prometheus-alerting-rule.yaml | 17 + .../templates/prometheus-service-monitor.yaml | 35 + mongodb/mongodb/templates/pvc-standalone.yaml | 20 + mongodb/mongodb/templates/secrets.yaml | 32 + mongodb/mongodb/templates/serviceaccount.yml | 13 + .../templates/statefulset-arbiter-rs.yaml | 191 +++ .../templates/statefulset-primary-rs.yaml | 317 ++++ .../templates/statefulset-secondary-rs.yaml | 285 ++++ .../mongodb/templates/svc-headless-rs.yaml | 23 + mongodb/mongodb/templates/svc-primary-rs.yaml | 44 + mongodb/mongodb/templates/svc-standalone.yaml | 43 + mongodb/mongodb/values-production.yaml | 513 +++++++ mongodb/mongodb/values.schema.json | 147 ++ mongodb/mongodb/values.yaml | 515 +++++++ mongodb/requirements.lock | 6 + mongodb/requirements.yaml | 6 + mongodb/templates/NOTES.txt | 193 +++ mongodb/templates/_helpers.tpl | 266 ++++ mongodb/templates/arbiter/configmap.yaml | 12 + mongodb/templates/arbiter/headless-svc.yaml | 21 + mongodb/templates/arbiter/pdb.yaml | 19 + mongodb/templates/arbiter/statefulset.yaml | 181 +++ mongodb/templates/configmap.yaml | 12 + .../templates/initialization-configmap.yaml | 11 + mongodb/templates/metrics-svc.yaml | 21 + mongodb/templates/prometheusrule.yaml | 14 + .../replicaset/external-access-svc.yaml | 45 + .../templates/replicaset/headless-svc.yaml | 22 + mongodb/templates/replicaset/pdb.yaml | 19 + .../replicaset/scripts-configmap.yaml | 88 ++ mongodb/templates/replicaset/statefulset.yaml | 375 +++++ mongodb/templates/role.yaml | 17 + mongodb/templates/rolebinding.yaml | 16 + mongodb/templates/secrets.yaml | 30 + mongodb/templates/serviceaccount.yaml | 10 + mongodb/templates/servicemonitor.yaml | 26 + mongodb/templates/standalone/dep-sts.yaml | 313 ++++ mongodb/templates/standalone/pvc.yaml | 18 + mongodb/templates/standalone/svc.yaml | 37 + mongodb/values-production.yaml | 908 +++++++++++ mongodb/values.schema.json | 167 +++ mongodb/values.yaml | 908 +++++++++++ qliksense/.helmignore | 2 + qliksense/Chart.yaml | 7 + qliksense/README.md | 179 +++ qliksense/charts/api-keys/.helmignore | 2 + qliksense/charts/api-keys/Chart.yaml | 9 + qliksense/charts/api-keys/README.md | 85 ++ .../api-keys/charts/messaging/.helmignore | 1 + .../api-keys/charts/messaging/Chart.yaml | 17 + .../api-keys/charts/messaging/README.md | 288 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../charts/message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 137 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/pvc.yaml | 13 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../charts/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 194 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 114 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 163 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 306 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 137 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/pvc.yaml | 13 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../messaging/nats-streaming/values.yaml | 328 ++++ .../api-keys/charts/messaging/nats/Chart.yaml | 17 + .../api-keys/charts/messaging/nats/README.md | 194 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 114 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 163 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/nats/values.yaml | 306 ++++ .../charts/messaging/requirements.yaml | 19 + .../charts/messaging/templates/_helper.tpl | 38 + .../message-delivery-monitor-secret.yaml | 10 + .../messaging/templates/nats-secret.yaml | 15 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../api-keys/charts/messaging/values.yaml | 474 ++++++ .../api-keys/charts/mongodb/.helmignore | 1 + .../charts/api-keys/charts/mongodb/Chart.yaml | 20 + .../charts/api-keys/charts/mongodb/OWNERS | 12 + .../charts/api-keys/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../api-keys/charts/mongodb/values.yaml | 213 +++ .../api-keys/charts/qlikcommon/.helmignore | 21 + .../api-keys/charts/qlikcommon/Chart.yaml | 9 + .../api-keys/charts/qlikcommon/README.md | 837 +++++++++++ .../qlikcommon/templates/_certificates.tpl | 32 + .../charts/qlikcommon/templates/_chartref.tpl | 14 + .../qlikcommon/templates/_configmap.yaml | 32 + .../qlikcommon/templates/_container.yaml | 98 ++ .../qlikcommon/templates/_deployment.yaml | 93 ++ .../charts/qlikcommon/templates/_envvar.tpl | 32 + .../charts/qlikcommon/templates/_fullname.tpl | 42 + .../charts/qlikcommon/templates/_hpa.yaml | 31 + .../charts/qlikcommon/templates/_image.tpl | 21 + .../charts/qlikcommon/templates/_ingress.yaml | 49 + .../qlikcommon/templates/_initContainer.yaml | 74 + .../qlikcommon/templates/_metadata.yaml | 35 + .../templates/_metadata_annotations.tpl | 23 + .../qlikcommon/templates/_metadata_labels.tpl | 28 + .../charts/qlikcommon/templates/_name.tpl | 29 + .../qlikcommon/templates/_networkpolicy.yaml | 52 + .../templates/_persistentvolumeclaim.yaml | 47 + .../templates/_persistentvolumeclaims.yaml | 27 + .../templates/_podSecurityPolicy.yaml | 55 + .../charts/qlikcommon/templates/_role.yaml | 23 + .../qlikcommon/templates/_rolebinding.yaml | 19 + .../charts/qlikcommon/templates/_secret.yaml | 45 + .../charts/qlikcommon/templates/_service.yaml | 25 + .../qlikcommon/templates/_serviceaccount.yaml | 11 + .../qlikcommon/templates/_statefulset.yaml | 44 + .../qlikcommon/templates/_transformers.tpl | 41 + .../charts/qlikcommon/templates/_util.tpl | 15 + .../charts/qlikcommon/templates/_volume.tpl | 62 + .../api-keys/charts/qlikcommon/values.yaml | 4 + .../charts/api-keys/charts/redis/.helmignore | 3 + .../charts/api-keys/charts/redis/Chart.yaml | 20 + .../charts/api-keys/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../api-keys/charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../api-keys/charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../api-keys/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../api-keys/charts/redis/values.schema.json | 168 +++ .../charts/api-keys/charts/redis/values.yaml | 631 ++++++++ qliksense/charts/api-keys/requirements.yaml | 17 + .../charts/api-keys/templates/manifest.yaml | 69 + qliksense/charts/api-keys/values.yaml | 158 ++ qliksense/charts/audit/.helmignore | 1 + qliksense/charts/audit/Chart.yaml | 10 + qliksense/charts/audit/README.md | 115 ++ .../charts/audit/charts/messaging/Chart.yaml | 17 + .../charts/audit/charts/messaging/README.md | 241 +++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 133 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../charts/nats-streaming/values.yaml | 290 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 191 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 87 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 160 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 302 ++++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 133 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../messaging/nats-streaming/values.yaml | 290 ++++ .../audit/charts/messaging/nats/Chart.yaml | 17 + .../audit/charts/messaging/nats/README.md | 191 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 87 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 160 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../audit/charts/messaging/nats/values.yaml | 302 ++++ .../audit/charts/messaging/requirements.yaml | 13 + .../charts/messaging/templates/_helper.tpl | 38 + .../messaging/templates/nats-secret.yaml | 19 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../charts/audit/charts/messaging/values.yaml | 320 ++++ .../charts/audit/charts/minio/.helmignore | 21 + .../charts/audit/charts/minio/Chart.yaml | 19 + qliksense/charts/audit/charts/minio/README.md | 227 +++ .../audit/charts/minio/templates/NOTES.txt | 44 + .../minio/templates/_helper_create_bucket.txt | 75 + .../audit/charts/minio/templates/_helpers.tpl | 43 + .../charts/minio/templates/configmap.yaml | 140 ++ .../charts/minio/templates/deployment.yaml | 134 ++ .../audit/charts/minio/templates/ingress.yaml | 39 + .../charts/minio/templates/networkpolicy.yaml | 25 + .../post-install-create-bucket-job.yaml | 47 + .../audit/charts/minio/templates/pvc.yaml | 27 + .../audit/charts/minio/templates/secrets.yaml | 16 + .../audit/charts/minio/templates/service.yaml | 42 + .../charts/minio/templates/statefulset.yaml | 99 ++ .../charts/audit/charts/minio/values.yaml | 239 +++ .../charts/audit/charts/mongodb/.helmignore | 1 + .../charts/audit/charts/mongodb/Chart.yaml | 20 + qliksense/charts/audit/charts/mongodb/OWNERS | 12 + .../charts/audit/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../audit/charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/audit/charts/mongodb/values.yaml | 213 +++ .../audit/charts/qlikcommon/.helmignore | 21 + .../charts/audit/charts/qlikcommon/Chart.yaml | 9 + .../charts/audit/charts/qlikcommon/README.md | 837 +++++++++++ .../qlikcommon/templates/_certificates.tpl | 32 + .../charts/qlikcommon/templates/_chartref.tpl | 14 + .../qlikcommon/templates/_configmap.yaml | 32 + .../qlikcommon/templates/_container.yaml | 93 ++ .../qlikcommon/templates/_deployment.yaml | 72 + .../charts/qlikcommon/templates/_envvar.tpl | 32 + .../charts/qlikcommon/templates/_fullname.tpl | 42 + .../charts/qlikcommon/templates/_hpa.yaml | 31 + .../charts/qlikcommon/templates/_image.tpl | 21 + .../charts/qlikcommon/templates/_ingress.yaml | 49 + .../qlikcommon/templates/_metadata.yaml | 35 + .../templates/_metadata_annotations.tpl | 23 + .../qlikcommon/templates/_metadata_labels.tpl | 28 + .../charts/qlikcommon/templates/_name.tpl | 29 + .../qlikcommon/templates/_networkpolicy.yaml | 52 + .../templates/_persistentvolumeclaim.yaml | 42 + .../templates/_persistentvolumeclaims.yaml | 19 + .../templates/_podSecurityPolicy.yaml | 55 + .../charts/qlikcommon/templates/_role.yaml | 23 + .../qlikcommon/templates/_rolebinding.yaml | 19 + .../charts/qlikcommon/templates/_secret.yaml | 45 + .../charts/qlikcommon/templates/_service.yaml | 25 + .../qlikcommon/templates/_serviceaccount.yaml | 11 + .../qlikcommon/templates/_statefulset.yaml | 44 + .../qlikcommon/templates/_transformers.tpl | 41 + .../charts/qlikcommon/templates/_util.tpl | 15 + .../charts/qlikcommon/templates/_volume.tpl | 39 + .../audit/charts/qlikcommon/values.yaml | 4 + qliksense/charts/audit/requirements.yaml | 18 + .../charts/audit/templates/manifest.yaml | 56 + qliksense/charts/audit/values.yaml | 188 +++ qliksense/charts/chronos-worker/.helmignore | 1 + qliksense/charts/chronos-worker/Chart.yaml | 9 + qliksense/charts/chronos-worker/README.md | 86 ++ .../chronos-worker/charts/redis/.helmignore | 3 + .../chronos-worker/charts/redis/Chart.yaml | 20 + .../chronos-worker/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../charts/redis/values.schema.json | 168 +++ .../chronos-worker/charts/redis/values.yaml | 631 ++++++++ .../charts/chronos-worker/requirements.yaml | 5 + .../charts/chronos-worker/templates/NOTES.txt | 0 .../chronos-worker/templates/_helpers.tpl | 51 + .../deny-external-egress-traffic.yaml | 29 + .../chronos-worker/templates/deployment.yaml | 68 + .../templates/redis-secret.yaml | 21 + .../charts/chronos-worker/templates/svc.yaml | 21 + qliksense/charts/chronos-worker/values.yaml | 150 ++ qliksense/charts/chronos/.helmignore | 1 + qliksense/charts/chronos/Chart.yaml | 9 + qliksense/charts/chronos/README.md | 86 ++ .../charts/chronos/charts/mongodb/.helmignore | 1 + .../charts/chronos/charts/mongodb/Chart.yaml | 20 + .../charts/chronos/charts/mongodb/OWNERS | 12 + .../charts/chronos/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/chronos/charts/mongodb/values.yaml | 213 +++ .../charts/chronos/charts/redis/.helmignore | 3 + .../charts/chronos/charts/redis/Chart.yaml | 20 + .../charts/chronos/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../chronos/charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../chronos/charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../chronos/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../chronos/charts/redis/values.schema.json | 168 +++ .../charts/chronos/charts/redis/values.yaml | 631 ++++++++ qliksense/charts/chronos/requirements.yaml | 9 + qliksense/charts/chronos/templates/NOTES.txt | 0 .../charts/chronos/templates/_helpers.tpl | 68 + .../charts/chronos/templates/deployment.yaml | 113 ++ .../chronos/templates/mongo-secret.yaml | 20 + qliksense/charts/chronos/templates/rbac.yaml | 39 + .../chronos/templates/redis-secret.yaml | 20 + qliksense/charts/chronos/templates/sa.yaml | 9 + qliksense/charts/chronos/templates/svc.yaml | 21 + qliksense/charts/chronos/values.yaml | 161 ++ qliksense/charts/collections/.helmignore | 2 + qliksense/charts/collections/Chart.yaml | 8 + qliksense/charts/collections/README.md | 79 + .../collections/charts/messaging/Chart.yaml | 17 + .../collections/charts/messaging/README.md | 235 +++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 123 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../charts/nats-streaming/values.yaml | 287 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 190 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 85 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 160 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 297 ++++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 123 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../messaging/nats-streaming/values.yaml | 287 ++++ .../charts/messaging/nats/Chart.yaml | 17 + .../charts/messaging/nats/README.md | 190 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 85 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 160 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/nats/values.yaml | 297 ++++ .../charts/messaging/requirements.yaml | 13 + .../charts/messaging/templates/_helper.tpl | 38 + .../messaging/templates/nats-secret.yaml | 19 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../collections/charts/messaging/values.yaml | 218 +++ .../collections/charts/mongodb/.helmignore | 1 + .../collections/charts/mongodb/Chart.yaml | 20 + .../charts/collections/charts/mongodb/OWNERS | 12 + .../collections/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../collections/charts/mongodb/values.yaml | 213 +++ .../charts/collections/requirements.yaml | 9 + .../charts/collections/templates/_helper.tpl | 53 + .../collections/templates/deployment.yaml | 131 ++ .../charts/collections/templates/hpa.yml | 26 + .../charts/collections/templates/ingress.yaml | 31 + .../collections/templates/mongo-secret.yaml | 13 + .../charts/collections/templates/service.yaml | 24 + .../collections/templates/token-secret.yaml | 13 + qliksense/charts/collections/values.yaml | 204 +++ qliksense/charts/data-connections/.helmignore | 1 + qliksense/charts/data-connections/Chart.yaml | 8 + qliksense/charts/data-connections/README.md | 85 ++ .../charts/mongodb/.helmignore | 1 + .../charts/mongodb/Chart.yaml | 20 + .../data-connections/charts/mongodb/OWNERS | 12 + .../data-connections/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/mongodb/values.yaml | 213 +++ .../charts/data-connections/requirements.yaml | 5 + .../data-connections/templates/NOTES.txt | 19 + .../data-connections/templates/_helpers.tpl | 60 + .../templates/deployment.yaml | 130 ++ .../templates/encryption-secret.yaml | 9 + .../data-connections/templates/ingress.yaml | 30 + .../templates/keys-secret.yaml | 8 + .../templates/mongo-secret.yaml | 13 + .../data-connections/templates/service.yaml | 27 + qliksense/charts/data-connections/values.yaml | 182 +++ .../charts/data-connector-common/Chart.yaml | 7 + .../charts/data-connector-common/README.md | 53 + .../charts/qlikcommon/.helmignore | 21 + .../charts/qlikcommon/Chart.yaml | 9 + .../charts/qlikcommon/README.md | 837 +++++++++++ .../qlikcommon/templates/_certificates.tpl | 32 + .../charts/qlikcommon/templates/_chartref.tpl | 14 + .../qlikcommon/templates/_configmap.yaml | 32 + .../qlikcommon/templates/_container.yaml | 98 ++ .../qlikcommon/templates/_deployment.yaml | 93 ++ .../charts/qlikcommon/templates/_envvar.tpl | 32 + .../charts/qlikcommon/templates/_fullname.tpl | 42 + .../charts/qlikcommon/templates/_hpa.yaml | 31 + .../charts/qlikcommon/templates/_image.tpl | 21 + .../charts/qlikcommon/templates/_ingress.yaml | 49 + .../qlikcommon/templates/_initContainer.yaml | 74 + .../qlikcommon/templates/_metadata.yaml | 35 + .../templates/_metadata_annotations.tpl | 23 + .../qlikcommon/templates/_metadata_labels.tpl | 28 + .../charts/qlikcommon/templates/_name.tpl | 29 + .../qlikcommon/templates/_networkpolicy.yaml | 52 + .../templates/_persistentvolumeclaim.yaml | 47 + .../templates/_persistentvolumeclaims.yaml | 27 + .../templates/_podSecurityPolicy.yaml | 55 + .../charts/qlikcommon/templates/_role.yaml | 23 + .../qlikcommon/templates/_rolebinding.yaml | 19 + .../charts/qlikcommon/templates/_secret.yaml | 45 + .../charts/qlikcommon/templates/_service.yaml | 25 + .../qlikcommon/templates/_serviceaccount.yaml | 11 + .../qlikcommon/templates/_statefulset.yaml | 44 + .../qlikcommon/templates/_transformers.tpl | 41 + .../charts/qlikcommon/templates/_util.tpl | 15 + .../charts/qlikcommon/templates/_volume.tpl | 62 + .../charts/qlikcommon/values.yaml | 4 + .../data-connector-common/requirements.yaml | 5 + .../templates/manifest.yaml | 71 + .../charts/data-connector-common/values.yaml | 54 + .../charts/data-connector-nfs/.helmignore | 1 + .../charts/data-connector-nfs/Chart.yaml | 10 + qliksense/charts/data-connector-nfs/README.md | 183 +++ .../charts/qlikcommon/.helmignore | 21 + .../charts/qlikcommon/Chart.yaml | 9 + .../charts/qlikcommon/README.md | 837 +++++++++++ .../qlikcommon/templates/_certificates.tpl | 32 + .../charts/qlikcommon/templates/_chartref.tpl | 14 + .../qlikcommon/templates/_configmap.yaml | 32 + .../qlikcommon/templates/_container.yaml | 98 ++ .../qlikcommon/templates/_deployment.yaml | 93 ++ .../charts/qlikcommon/templates/_envvar.tpl | 32 + .../charts/qlikcommon/templates/_fullname.tpl | 42 + .../charts/qlikcommon/templates/_hpa.yaml | 31 + .../charts/qlikcommon/templates/_image.tpl | 21 + .../charts/qlikcommon/templates/_ingress.yaml | 49 + .../qlikcommon/templates/_initContainer.yaml | 74 + .../qlikcommon/templates/_metadata.yaml | 35 + .../templates/_metadata_annotations.tpl | 23 + .../qlikcommon/templates/_metadata_labels.tpl | 28 + .../charts/qlikcommon/templates/_name.tpl | 29 + .../qlikcommon/templates/_networkpolicy.yaml | 52 + .../templates/_persistentvolumeclaim.yaml | 47 + .../templates/_persistentvolumeclaims.yaml | 27 + .../templates/_podSecurityPolicy.yaml | 55 + .../charts/qlikcommon/templates/_role.yaml | 23 + .../qlikcommon/templates/_rolebinding.yaml | 19 + .../charts/qlikcommon/templates/_secret.yaml | 45 + .../charts/qlikcommon/templates/_service.yaml | 25 + .../qlikcommon/templates/_serviceaccount.yaml | 11 + .../qlikcommon/templates/_statefulset.yaml | 44 + .../qlikcommon/templates/_transformers.tpl | 41 + .../charts/qlikcommon/templates/_util.tpl | 15 + .../charts/qlikcommon/templates/_volume.tpl | 62 + .../charts/qlikcommon/values.yaml | 4 + .../data-connector-nfs/requirements.yaml | 5 + .../templates/manifest.yaml | 65 + .../charts/data-connector-nfs/values.yaml | 104 ++ .../charts/data-connector-odbc/.helmignore | 1 + .../charts/data-connector-odbc/Chart.yaml | 7 + .../charts/data-connector-odbc/README.md | 55 + .../templates/_helpers.tpl | 61 + .../templates/deployment-cmd.yaml | 79 + .../templates/deployment-rld.yaml | 85 ++ .../templates/hpa-rld.yaml | 32 + .../templates/network.yaml | 35 + .../templates/service-cmd.yaml | 30 + .../templates/service-rld.yaml | 30 + .../charts/data-connector-odbc/values.yaml | 94 ++ .../charts/data-connector-qwc/.helmignore | 1 + .../charts/data-connector-qwc/Chart.yaml | 7 + qliksense/charts/data-connector-qwc/README.md | 66 + .../charts/redis/.helmignore | 3 + .../charts/redis/Chart.yaml | 20 + .../data-connector-qwc/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../charts/redis/values.schema.json | 168 +++ .../charts/redis/values.yaml | 631 ++++++++ .../data-connector-qwc/requirements.yaml | 5 + .../data-connector-qwc/templates/_helpers.tpl | 78 + .../templates/deployment-cmd.yaml | 118 ++ .../templates/deployment-rld.yaml | 133 ++ .../templates/deployment-web.yaml | 48 + .../data-connector-qwc/templates/hpa-rld.yaml | 32 + .../data-connector-qwc/templates/ingress.yaml | 36 + .../data-connector-qwc/templates/network.yaml | 78 + .../templates/secrets-connector-cfg.yaml | 9 + .../templates/secrets-keys.yaml | 17 + .../templates/service-cmd.yaml | 30 + .../templates/service-rld.yaml | 30 + .../templates/service-web.yaml | 26 + .../charts/data-connector-qwc/values.yaml | 179 +++ .../charts/data-connector-rest/.helmignore | 1 + .../charts/data-connector-rest/Chart.yaml | 7 + .../charts/data-connector-rest/README.md | 54 + .../templates/_helpers.tpl | 61 + .../templates/deployment-cmd.yaml | 81 + .../templates/deployment-rld.yaml | 94 ++ .../templates/hpa-rld.yaml | 32 + .../templates/network.yaml | 38 + .../templates/service-cmd.yaml | 30 + .../templates/service-rld.yaml | 30 + .../charts/data-connector-rest/values.yaml | 99 ++ .../charts/data-connector-sap-sql/.helmignore | 1 + .../charts/data-connector-sap-sql/Chart.yaml | 7 + .../charts/data-connector-sap-sql/README.md | 51 + .../templates/_helpers.tpl | 61 + .../templates/deployment-cmd.yaml | 72 + .../templates/deployment-rld.yaml | 80 + .../templates/hpa-rld.yaml | 32 + .../templates/network.yaml | 44 + .../templates/service-cmd.yaml | 30 + .../templates/service-rld.yaml | 30 + .../charts/data-connector-sap-sql/values.yaml | 88 ++ .../charts/data-connector-sfdc/.helmignore | 1 + .../charts/data-connector-sfdc/Chart.yaml | 7 + .../charts/data-connector-sfdc/README.md | 54 + .../templates/_helpers.tpl | 61 + .../templates/deployment-cmd.yaml | 75 + .../templates/deployment-rld.yaml | 88 ++ .../templates/hpa-rld.yaml | 32 + .../templates/network.yaml | 34 + .../templates/service-cmd.yaml | 30 + .../templates/service-rld.yaml | 30 + .../charts/data-connector-sfdc/values.yaml | 98 ++ qliksense/charts/data-prep/.helmignore | 1 + qliksense/charts/data-prep/Chart.yaml | 9 + qliksense/charts/data-prep/README.md | 78 + .../charts/data-prep/charts/redis/.helmignore | 3 + .../charts/data-prep/charts/redis/Chart.yaml | 20 + .../charts/data-prep/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../data-prep/charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../data-prep/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../data-prep/charts/redis/values.schema.json | 168 +++ .../charts/data-prep/charts/redis/values.yaml | 631 ++++++++ qliksense/charts/data-prep/requirements.yaml | 6 + .../charts/data-prep/templates/_helpers.tpl | 61 + .../data-prep/templates/deployment.yaml | 119 ++ qliksense/charts/data-prep/templates/hpa.yaml | 22 + .../charts/data-prep/templates/ingress.yaml | 35 + qliksense/charts/data-prep/templates/pvc.yaml | 19 + .../data-prep/templates/redis-secret.yaml | 20 + .../charts/data-prep/templates/service.yaml | 24 + qliksense/charts/data-prep/values.yaml | 201 +++ qliksense/charts/data-rest-source/.helmignore | 1 + qliksense/charts/data-rest-source/Chart.yaml | 8 + qliksense/charts/data-rest-source/README.md | 61 + .../data-rest-source/templates/_helpers.tpl | 50 + .../templates/deployment.yaml | 68 + .../data-rest-source/templates/hpa.yaml | 18 + .../templates/pre-stop-hook.yaml | 18 + .../data-rest-source/templates/service.yaml | 24 + qliksense/charts/data-rest-source/values.yaml | 75 + qliksense/charts/dcaas-web/.helmignore | 1 + qliksense/charts/dcaas-web/Chart.yaml | 7 + qliksense/charts/dcaas-web/README.md | 51 + .../charts/dcaas-web/templates/_helpers.tpl | 60 + .../dcaas-web/templates/deployment.yaml | 41 + .../charts/dcaas-web/templates/ingress.yaml | 34 + .../charts/dcaas-web/templates/service.yaml | 24 + qliksense/charts/dcaas-web/values.yaml | 79 + qliksense/charts/dcaas/.helmignore | 1 + qliksense/charts/dcaas/Chart.yaml | 7 + qliksense/charts/dcaas/README.md | 66 + .../charts/dcaas/charts/redis/.helmignore | 3 + .../charts/dcaas/charts/redis/Chart.yaml | 20 + qliksense/charts/dcaas/charts/redis/README.md | 497 ++++++ .../dcaas/charts/redis/ci/default-values.yaml | 1 + .../dcaas/charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../dcaas/charts/redis/templates/NOTES.txt | 104 ++ .../dcaas/charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../dcaas/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../dcaas/charts/redis/templates/secret.yaml | 14 + .../dcaas/charts/redis/values-production.yaml | 630 ++++++++ .../dcaas/charts/redis/values.schema.json | 168 +++ .../charts/dcaas/charts/redis/values.yaml | 631 ++++++++ qliksense/charts/dcaas/requirements.yaml | 6 + qliksense/charts/dcaas/templates/_helpers.tpl | 61 + .../charts/dcaas/templates/deployment.yaml | 70 + qliksense/charts/dcaas/templates/ingress.yaml | 38 + .../charts/dcaas/templates/redis-secret.yaml | 24 + qliksense/charts/dcaas/templates/service.yaml | 25 + qliksense/charts/dcaas/values.yaml | 129 ++ qliksense/charts/edge-auth/.helmignore | 2 + qliksense/charts/edge-auth/Chart.yaml | 7 + qliksense/charts/edge-auth/README.md | 175 +++ .../edge-auth/charts/messaging/.helmignore | 1 + .../edge-auth/charts/messaging/Chart.yaml | 17 + .../edge-auth/charts/messaging/README.md | 288 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../charts/message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 137 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/pvc.yaml | 13 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../charts/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 194 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 114 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 163 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 306 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 137 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/pvc.yaml | 13 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../messaging/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/nats/Chart.yaml | 17 + .../edge-auth/charts/messaging/nats/README.md | 194 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 114 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 163 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/nats/values.yaml | 306 ++++ .../charts/messaging/requirements.yaml | 19 + .../charts/messaging/templates/_helper.tpl | 38 + .../message-delivery-monitor-secret.yaml | 10 + .../messaging/templates/nats-secret.yaml | 15 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../edge-auth/charts/messaging/values.yaml | 474 ++++++ .../edge-auth/charts/mongodb/.helmignore | 1 + .../edge-auth/charts/mongodb/Chart.yaml | 20 + .../charts/edge-auth/charts/mongodb/OWNERS | 12 + .../charts/edge-auth/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../edge-auth/charts/mongodb/values.yaml | 213 +++ .../charts/nginx-ingress/.helmignore | 21 + .../edge-auth/charts/nginx-ingress/Chart.yaml | 18 + .../edge-auth/charts/nginx-ingress/README.md | 187 +++ .../charts/nginx-ingress/templates/NOTES.txt | 64 + .../nginx-ingress/templates/_helpers.tpl | 61 + .../nginx-ingress/templates/clusterrole.yaml | 69 + .../templates/clusterrolebinding.yaml | 19 + .../templates/controller-configmap.yaml | 18 + .../templates/controller-daemonset.yaml | 174 +++ .../templates/controller-deployment.yaml | 167 +++ .../templates/controller-hpa.yaml | 22 + .../templates/controller-metrics-service.yaml | 38 + .../controller-poddisruptionbudget.yaml | 17 + .../templates/controller-service.yaml | 65 + .../templates/controller-stats-service.yaml | 38 + .../templates/default-backend-deployment.yaml | 66 + .../default-backend-poddisruptionbudget.yaml | 17 + .../templates/default-backend-service.yaml | 37 + .../templates/headers-configmap.yaml | 14 + .../charts/nginx-ingress/templates/role.yaml | 44 + .../nginx-ingress/templates/rolebinding.yaml | 19 + .../templates/serviceaccount.yaml | 11 + .../templates/tcp-configmap.yaml | 14 + .../templates/udp-configmap.yaml | 14 + .../charts/nginx-ingress/values.yaml | 305 ++++ .../edge-auth/charts/qlikcommon/.helmignore | 21 + .../edge-auth/charts/qlikcommon/Chart.yaml | 9 + .../edge-auth/charts/qlikcommon/README.md | 837 +++++++++++ .../qlikcommon/templates/_certificates.tpl | 32 + .../charts/qlikcommon/templates/_chartref.tpl | 14 + .../qlikcommon/templates/_configmap.yaml | 32 + .../qlikcommon/templates/_container.yaml | 99 ++ .../qlikcommon/templates/_deployment.yaml | 94 ++ .../charts/qlikcommon/templates/_envvar.tpl | 32 + .../charts/qlikcommon/templates/_fullname.tpl | 42 + .../charts/qlikcommon/templates/_hpa.yaml | 31 + .../charts/qlikcommon/templates/_image.tpl | 21 + .../charts/qlikcommon/templates/_ingress.yaml | 49 + .../qlikcommon/templates/_initContainer.yaml | 74 + .../qlikcommon/templates/_metadata.yaml | 35 + .../templates/_metadata_annotations.tpl | 23 + .../qlikcommon/templates/_metadata_labels.tpl | 28 + .../charts/qlikcommon/templates/_name.tpl | 29 + .../qlikcommon/templates/_networkpolicy.tpl | 32 + .../qlikcommon/templates/_networkpolicy.yaml | 89 ++ .../templates/_persistentvolumeclaim.yaml | 47 + .../templates/_persistentvolumeclaims.yaml | 27 + .../templates/_podSecurityPolicy.yaml | 55 + .../charts/qlikcommon/templates/_role.yaml | 23 + .../qlikcommon/templates/_rolebinding.yaml | 19 + .../charts/qlikcommon/templates/_secret.yaml | 45 + .../charts/qlikcommon/templates/_service.yaml | 25 + .../qlikcommon/templates/_serviceaccount.yaml | 11 + .../qlikcommon/templates/_statefulset.yaml | 44 + .../qlikcommon/templates/_storageclass.yaml | 38 + .../qlikcommon/templates/_transformers.tpl | 41 + .../charts/qlikcommon/templates/_util.tpl | 15 + .../charts/qlikcommon/templates/_volume.tpl | 62 + .../edge-auth/charts/qlikcommon/values.yaml | 4 + .../charts/edge-auth/charts/redis/.helmignore | 3 + .../charts/edge-auth/charts/redis/Chart.yaml | 20 + .../charts/edge-auth/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../edge-auth/charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../edge-auth/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../edge-auth/charts/redis/values.schema.json | 168 +++ .../charts/edge-auth/charts/redis/values.yaml | 631 ++++++++ qliksense/charts/edge-auth/requirements.yaml | 26 + .../charts/edge-auth/templates/manifest.yaml | 136 ++ qliksense/charts/edge-auth/values.yaml | 250 ++++ qliksense/charts/elastic-infra/.helmignore | 1 + qliksense/charts/elastic-infra/Chart.yaml | 7 + qliksense/charts/elastic-infra/README.md | 61 + .../elastic-infra/charts/mongodb/.helmignore | 1 + .../elastic-infra/charts/mongodb/Chart.yaml | 20 + .../elastic-infra/charts/mongodb/OWNERS | 12 + .../elastic-infra/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../elastic-infra/charts/mongodb/values.yaml | 213 +++ .../charts/nginx-ingress/.helmignore | 21 + .../charts/nginx-ingress/Chart.yaml | 13 + .../charts/nginx-ingress/README.md | 361 +++++ .../charts/nginx-ingress/templates/NOTES.txt | 71 + .../nginx-ingress/templates/_helpers.tpl | 181 +++ .../templates/addheaders-configmap.yaml | 14 + .../job-patch/clusterrole.yaml | 30 + .../job-patch/clusterrolebinding.yaml | 23 + .../job-patch/job-createSecret.yaml | 55 + .../job-patch/job-patchWebhook.yaml | 57 + .../admission-webhooks/job-patch/psp.yaml | 39 + .../admission-webhooks/job-patch/role.yaml | 23 + .../job-patch/rolebinding.yaml | 23 + .../job-patch/serviceaccount.yaml | 15 + .../validating-webhook.yaml | 31 + .../nginx-ingress/templates/clusterrole.yaml | 71 + .../templates/clusterrolebinding.yaml | 19 + .../templates/controller-configmap.yaml | 22 + .../templates/controller-daemonset.yaml | 257 ++++ .../templates/controller-deployment.yaml | 255 ++++ .../templates/controller-hpa.yaml | 34 + .../templates/controller-metrics-service.yaml | 45 + .../controller-poddisruptionbudget.yaml | 21 + .../templates/controller-prometheusrules.yaml | 24 + .../templates/controller-psp.yaml | 80 + .../templates/controller-role.yaml | 91 ++ .../templates/controller-rolebinding.yaml | 19 + .../templates/controller-service.yaml | 92 ++ .../templates/controller-serviceaccount.yaml | 11 + .../templates/controller-servicemonitor.yaml | 38 + .../templates/controller-webhook-service.yaml | 42 + .../templates/default-backend-deployment.yaml | 110 ++ .../default-backend-poddisruptionbudget.yaml | 19 + .../templates/default-backend-psp.yaml | 35 + .../templates/default-backend-role.yaml | 16 + .../default-backend-rolebinding.yaml | 19 + .../templates/default-backend-service.yaml | 43 + .../default-backend-serviceaccount.yaml | 11 + .../templates/proxyheaders-configmap.yaml | 18 + .../templates/tcp-configmap.yaml | 14 + .../templates/udp-configmap.yaml | 14 + .../charts/nginx-ingress/values.yaml | 576 +++++++ .../elastic-infra/nginx-ingress/.helmignore | 21 + .../elastic-infra/nginx-ingress/Chart.yaml | 14 + .../elastic-infra/nginx-ingress/README.md | 361 +++++ .../nginx-ingress/templates/NOTES.txt | 71 + .../nginx-ingress/templates/_helpers.tpl | 181 +++ .../templates/addheaders-configmap.yaml | 14 + .../job-patch/clusterrole.yaml | 30 + .../job-patch/clusterrolebinding.yaml | 23 + .../job-patch/job-createSecret.yaml | 55 + .../job-patch/job-patchWebhook.yaml | 57 + .../admission-webhooks/job-patch/psp.yaml | 39 + .../admission-webhooks/job-patch/role.yaml | 23 + .../job-patch/rolebinding.yaml | 23 + .../job-patch/serviceaccount.yaml | 15 + .../validating-webhook.yaml | 31 + .../nginx-ingress/templates/clusterrole.yaml | 71 + .../templates/clusterrolebinding.yaml | 19 + .../templates/controller-configmap.yaml | 22 + .../templates/controller-daemonset.yaml | 257 ++++ .../templates/controller-deployment.yaml | 255 ++++ .../templates/controller-hpa.yaml | 34 + .../templates/controller-metrics-service.yaml | 45 + .../controller-poddisruptionbudget.yaml | 21 + .../templates/controller-prometheusrules.yaml | 24 + .../templates/controller-psp.yaml | 80 + .../templates/controller-role.yaml | 91 ++ .../templates/controller-rolebinding.yaml | 19 + .../templates/controller-service.yaml | 92 ++ .../templates/controller-serviceaccount.yaml | 11 + .../templates/controller-servicemonitor.yaml | 38 + .../templates/controller-webhook-service.yaml | 42 + .../templates/default-backend-deployment.yaml | 110 ++ .../default-backend-poddisruptionbudget.yaml | 19 + .../templates/default-backend-psp.yaml | 35 + .../templates/default-backend-role.yaml | 16 + .../default-backend-rolebinding.yaml | 19 + .../templates/default-backend-service.yaml | 43 + .../default-backend-serviceaccount.yaml | 11 + .../templates/proxyheaders-configmap.yaml | 18 + .../templates/tcp-configmap.yaml | 14 + .../templates/udp-configmap.yaml | 14 + .../elastic-infra/nginx-ingress/values.yaml | 576 +++++++ .../charts/elastic-infra/requirements.yaml | 9 + .../elastic-infra/templates/_helper.tpl | 31 + .../elastic-infra/templates/ingress.yaml | 560 +++++++ .../elastic-infra/templates/tls-secret.yaml | 18 + .../elastic-infra/templates/tlscert.yaml | 23 + qliksense/charts/elastic-infra/values.yaml | 177 +++ qliksense/charts/encryption/.helmignore | 1 + qliksense/charts/encryption/Chart.yaml | 7 + qliksense/charts/encryption/README.md | 77 + .../charts/encryption/templates/_helpers.tpl | 50 + .../encryption/templates/deployment.yaml | 117 ++ .../charts/encryption/templates/service.yaml | 24 + .../encryption/templates/serviceaccount.yaml | 10 + .../encryption/templates/token-secret.yaml | 9 + qliksense/charts/encryption/values.yaml | 121 ++ qliksense/charts/engine/.helmignore | 22 + qliksense/charts/engine/Chart.yaml | 8 + qliksense/charts/engine/README.md | 277 ++++ .../charts/engine/charts/messaging/Chart.yaml | 17 + .../charts/engine/charts/messaging/README.md | 284 ++++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 133 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 254 ++++ .../charts/nats-streaming/values.yaml | 290 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 194 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 99 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 161 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 306 ++++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 133 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 254 ++++ .../messaging/nats-streaming/values.yaml | 290 ++++ .../engine/charts/messaging/nats/Chart.yaml | 17 + .../engine/charts/messaging/nats/README.md | 194 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 99 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 161 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../engine/charts/messaging/nats/values.yaml | 306 ++++ .../engine/charts/messaging/requirements.yaml | 13 + .../charts/messaging/templates/_helper.tpl | 38 + .../messaging/templates/nats-secret.yaml | 15 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../engine/charts/messaging/values.yaml | 343 +++++ .../charts/engine/charts/redis-ha/Chart.yaml | 20 + .../charts/engine/charts/redis-ha/OWNERS | 6 + .../charts/engine/charts/redis-ha/README.md | 228 +++ .../redis-ha/ci/haproxy-enabled-values.yaml | 10 + .../charts/redis-ha/templates/NOTES.txt | 25 + .../charts/redis-ha/templates/_configs.tpl | 275 ++++ .../charts/redis-ha/templates/_helpers.tpl | 83 ++ .../redis-ha/templates/redis-auth-secret.yaml | 12 + .../templates/redis-ha-announce-service.yaml | 41 + .../templates/redis-ha-configmap.yaml | 25 + .../redis-ha-exporter-script-configmap.yaml | 11 + .../redis-ha/templates/redis-ha-pdb.yaml | 15 + .../redis-ha/templates/redis-ha-role.yaml | 19 + .../templates/redis-ha-rolebinding.yaml | 19 + .../redis-ha/templates/redis-ha-service.yaml | 35 + .../templates/redis-ha-serviceaccount.yaml | 12 + .../templates/redis-ha-servicemonitor.yaml | 35 + .../templates/redis-ha-statefulset.yaml | 318 ++++ .../templates/redis-haproxy-deployment.yaml | 149 ++ .../templates/redis-haproxy-service.yaml | 42 + .../redis-haproxy-serviceaccount.yaml | 12 + .../redis-haproxy-servicemonitor.yaml | 34 + .../tests/test-redis-ha-configmap.yaml | 27 + .../templates/tests/test-redis-ha-pod.yaml | 20 + .../charts/engine/charts/redis-ha/values.yaml | 359 +++++ .../charts/engine/charts/redis/.helmignore | 3 + .../charts/engine/charts/redis/Chart.yaml | 20 + .../charts/engine/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../engine/charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../engine/charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../engine/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../engine/charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../engine/charts/redis/values.schema.json | 168 +++ .../charts/engine/charts/redis/values.yaml | 631 ++++++++ qliksense/charts/engine/requirements.yaml | 15 + qliksense/charts/engine/templates/NOTES.txt | 36 + .../charts/engine/templates/_helpers.tpl | 125 ++ .../engine/templates/deployment-args.yaml | 499 +++++++ .../templates/deployment-stateless.yaml | 163 ++ .../charts/engine/templates/deployments.yaml | 252 ++++ .../engine/templates/engine-reload.yaml | 416 ++++++ .../engine/templates/engine-template.yaml | 21 + .../engine/templates/engine-variants.yaml | 29 + .../charts/engine/templates/engines.yaml | 21 + .../engine/templates/hpa-stateless.yaml | 26 + qliksense/charts/engine/templates/hpa.yaml | 22 + .../charts/engine/templates/ingress.yaml | 57 + .../engine/templates/networkpolicy.yaml | 90 ++ .../engine/templates/pre-stop-hook-cm.yaml | 18 + qliksense/charts/engine/templates/pvc.yaml | 13 + .../charts/engine/templates/rules-cm.yaml | 9 + qliksense/charts/engine/templates/sc.yaml | 7 + .../charts/engine/templates/secret-jwt.yaml | 7 + qliksense/charts/engine/templates/secret.yaml | 43 + .../charts/engine/templates/service.yaml | 37 + qliksense/charts/engine/values.yaml | 807 ++++++++++ qliksense/charts/eventing/.helmignore | 1 + qliksense/charts/eventing/Chart.yaml | 11 + qliksense/charts/eventing/README.md | 73 + .../eventing/charts/messaging/.helmignore | 1 + .../eventing/charts/messaging/Chart.yaml | 17 + .../eventing/charts/messaging/README.md | 288 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../charts/message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 137 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/pvc.yaml | 13 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../charts/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 194 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 114 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 163 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 306 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 137 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/pvc.yaml | 13 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../messaging/nats-streaming/values.yaml | 328 ++++ .../eventing/charts/messaging/nats/Chart.yaml | 17 + .../eventing/charts/messaging/nats/README.md | 194 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 114 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 163 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/nats/values.yaml | 306 ++++ .../charts/messaging/requirements.yaml | 19 + .../charts/messaging/templates/_helper.tpl | 38 + .../message-delivery-monitor-secret.yaml | 10 + .../messaging/templates/nats-secret.yaml | 15 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../eventing/charts/messaging/values.yaml | 489 ++++++ qliksense/charts/eventing/requirements.yaml | 5 + .../charts/eventing/templates/_helpers.tpl | 60 + .../charts/eventing/templates/deployment.yaml | 104 ++ .../charts/eventing/templates/ingress.yaml | 26 + .../charts/eventing/templates/secret.yaml | 7 + .../charts/eventing/templates/service.yaml | 24 + qliksense/charts/eventing/values.yaml | 135 ++ qliksense/charts/feature-flags/.helmignore | 1 + qliksense/charts/feature-flags/Chart.yaml | 7 + qliksense/charts/feature-flags/README.md | 158 ++ .../feature-flags/templates/_helpers.tpl | 60 + .../feature-flags/templates/configmap.yaml | 9 + .../feature-flags/templates/deployment.yaml | 82 + .../charts/feature-flags/templates/hpa.yaml | 32 + .../feature-flags/templates/ingress.yaml | 30 + .../feature-flags/templates/service.yaml | 27 + qliksense/charts/feature-flags/values.yaml | 348 +++++ qliksense/charts/generic-links/Chart.yaml | 8 + qliksense/charts/generic-links/README.md | 63 + .../generic-links/charts/messaging/Chart.yaml | 17 + .../generic-links/charts/messaging/README.md | 241 +++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 133 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../charts/nats-streaming/values.yaml | 290 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 191 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 87 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 160 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 302 ++++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 133 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../messaging/nats-streaming/values.yaml | 290 ++++ .../charts/messaging/nats/Chart.yaml | 17 + .../charts/messaging/nats/README.md | 191 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 87 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 160 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/nats/values.yaml | 302 ++++ .../charts/messaging/requirements.yaml | 13 + .../charts/messaging/templates/_helper.tpl | 38 + .../messaging/templates/nats-secret.yaml | 19 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../charts/messaging/values.yaml | 320 ++++ .../generic-links/charts/mongodb/.helmignore | 1 + .../generic-links/charts/mongodb/Chart.yaml | 20 + .../generic-links/charts/mongodb/OWNERS | 12 + .../generic-links/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../generic-links/charts/mongodb/values.yaml | 213 +++ .../charts/generic-links/requirements.yaml | 9 + .../generic-links/templates/_helper.tpl | 53 + .../generic-links/templates/deployment.yaml | 118 ++ .../generic-links/templates/ingress.yml | 28 + .../generic-links/templates/mongo-secret.yaml | 13 + .../generic-links/templates/service.yaml | 24 + .../generic-links/templates/token-secret.yaml | 13 + .../generic-links/templates/webrisk.yaml | 9 + qliksense/charts/generic-links/values.yaml | 177 +++ qliksense/charts/geo-operations/.helmignore | 1 + qliksense/charts/geo-operations/Chart.yaml | 7 + qliksense/charts/geo-operations/README.md | 63 + .../geo-operations/templates/_helpers.tpl | 50 + .../geo-operations/templates/deployment.yaml | 77 + .../charts/geo-operations/templates/hpa.yaml | 32 + .../geo-operations/templates/network.yaml | 62 + .../geo-operations/templates/service.yaml | 28 + qliksense/charts/geo-operations/values.yaml | 122 ++ qliksense/charts/groups/.helmignore | 2 + qliksense/charts/groups/Chart.yaml | 9 + qliksense/charts/groups/README.md | 79 + .../charts/groups/charts/mongodb/.helmignore | 1 + .../charts/groups/charts/mongodb/Chart.yaml | 20 + qliksense/charts/groups/charts/mongodb/OWNERS | 12 + .../charts/groups/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../groups/charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/groups/charts/mongodb/values.yaml | 213 +++ .../groups/charts/qlikcommon/.helmignore | 21 + .../groups/charts/qlikcommon/Chart.yaml | 9 + .../charts/groups/charts/qlikcommon/README.md | 837 +++++++++++ .../qlikcommon/templates/_certificates.tpl | 32 + .../charts/qlikcommon/templates/_chartref.tpl | 14 + .../qlikcommon/templates/_configmap.yaml | 32 + .../qlikcommon/templates/_container.yaml | 98 ++ .../qlikcommon/templates/_deployment.yaml | 93 ++ .../charts/qlikcommon/templates/_envvar.tpl | 32 + .../charts/qlikcommon/templates/_fullname.tpl | 42 + .../charts/qlikcommon/templates/_hpa.yaml | 31 + .../charts/qlikcommon/templates/_image.tpl | 21 + .../charts/qlikcommon/templates/_ingress.yaml | 49 + .../qlikcommon/templates/_initContainer.yaml | 74 + .../qlikcommon/templates/_metadata.yaml | 35 + .../templates/_metadata_annotations.tpl | 23 + .../qlikcommon/templates/_metadata_labels.tpl | 28 + .../charts/qlikcommon/templates/_name.tpl | 29 + .../qlikcommon/templates/_networkpolicy.yaml | 52 + .../templates/_persistentvolumeclaim.yaml | 47 + .../templates/_persistentvolumeclaims.yaml | 27 + .../templates/_podSecurityPolicy.yaml | 55 + .../charts/qlikcommon/templates/_role.yaml | 23 + .../qlikcommon/templates/_rolebinding.yaml | 19 + .../charts/qlikcommon/templates/_secret.yaml | 45 + .../charts/qlikcommon/templates/_service.yaml | 25 + .../qlikcommon/templates/_serviceaccount.yaml | 11 + .../qlikcommon/templates/_statefulset.yaml | 44 + .../qlikcommon/templates/_transformers.tpl | 41 + .../charts/qlikcommon/templates/_util.tpl | 15 + .../charts/qlikcommon/templates/_volume.tpl | 62 + .../groups/charts/qlikcommon/values.yaml | 4 + .../charts/groups/charts/redis/.helmignore | 3 + .../charts/groups/charts/redis/Chart.yaml | 20 + .../charts/groups/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../groups/charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../groups/charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../groups/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../groups/charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../groups/charts/redis/values.schema.json | 168 +++ .../charts/groups/charts/redis/values.yaml | 631 ++++++++ qliksense/charts/groups/requirements.yaml | 13 + .../charts/groups/templates/manifest.yaml | 76 + qliksense/charts/groups/values.yaml | 132 ++ qliksense/charts/hub/.helmignore | 1 + qliksense/charts/hub/Chart.yaml | 7 + qliksense/charts/hub/README.md | 57 + qliksense/charts/hub/templates/_helper.tpl | 54 + .../charts/hub/templates/deployment.yaml | 50 + qliksense/charts/hub/templates/ingress.yaml | 35 + qliksense/charts/hub/templates/service.yaml | 24 + qliksense/charts/hub/values.yaml | 78 + .../charts/identity-providers/.helmignore | 1 + .../charts/identity-providers/Chart.yaml | 8 + qliksense/charts/identity-providers/README.md | 166 +++ .../charts/mongodb/.helmignore | 1 + .../charts/mongodb/Chart.yaml | 20 + .../identity-providers/charts/mongodb/OWNERS | 12 + .../charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/mongodb/values.yaml | 213 +++ .../identity-providers/requirements.yaml | 5 + .../identity-providers/templates/_helpers.tpl | 101 ++ .../templates/deployment.yaml | 120 ++ .../templates/ext-deployment.yaml | 113 ++ .../identity-providers/templates/ext-hpa.yaml | 27 + .../templates/ext-ingress.yaml | 61 + .../templates/ext-mongo-secret.yaml | 15 + .../templates/ext-secret.yaml | 11 + .../templates/ext-service.yaml | 29 + .../identity-providers/templates/hpa.yaml | 26 + .../identity-providers/templates/secret.yaml | 10 + .../identity-providers/templates/service.yaml | 27 + .../charts/identity-providers/values.yaml | 303 ++++ qliksense/charts/insights/.helmignore | 1 + qliksense/charts/insights/Chart.yaml | 8 + qliksense/charts/insights/README.md | 98 ++ .../insights/charts/mongodb/.helmignore | 1 + .../charts/insights/charts/mongodb/Chart.yaml | 20 + .../charts/insights/charts/mongodb/OWNERS | 12 + .../charts/insights/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../insights/charts/mongodb/values.yaml | 213 +++ qliksense/charts/insights/requirements.yaml | 5 + qliksense/charts/insights/templates/NOTES.txt | 0 .../charts/insights/templates/_helpers.tpl | 60 + .../charts/insights/templates/deployment.yaml | 96 ++ .../charts/insights/templates/ingress.yaml | 101 ++ .../charts/insights/templates/insights.yaml | 21 + .../insights/templates/mongo-secret.yaml | 18 + .../templates/prune-graph-cronjob.yaml | 43 + qliksense/charts/insights/values.yaml | 154 ++ qliksense/charts/keys/.helmignore | 1 + qliksense/charts/keys/Chart.yaml | 6 + qliksense/charts/keys/README.md | 96 ++ qliksense/charts/keys/templates/_helpers.tpl | 50 + .../charts/keys/templates/configmap.yaml | 12 + .../charts/keys/templates/deployment.yaml | 88 ++ qliksense/charts/keys/templates/hpa.yaml | 26 + qliksense/charts/keys/templates/service.yaml | 27 + qliksense/charts/keys/values.yaml | 183 +++ qliksense/charts/licenses/.helmignore | 1 + qliksense/charts/licenses/Chart.yaml | 7 + qliksense/charts/licenses/README.md | 93 ++ .../licenses/charts/mongodb/.helmignore | 1 + .../charts/licenses/charts/mongodb/Chart.yaml | 20 + .../charts/licenses/charts/mongodb/OWNERS | 12 + .../charts/licenses/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../licenses/charts/mongodb/values.yaml | 213 +++ qliksense/charts/licenses/requirements.yaml | 5 + .../charts/licenses/templates/_helper.tpl | 78 + .../charts/licenses/templates/deployment.yaml | 175 +++ qliksense/charts/licenses/templates/hpa.yaml | 22 + .../charts/licenses/templates/ingress.yaml | 28 + .../licenses/templates/mongo-secret.yaml | 13 + .../templates/proxy-password-secret.yaml | 11 + qliksense/charts/licenses/templates/rbac.yaml | 40 + .../licenses/templates/rollbar-secret.yaml | 11 + .../charts/licenses/templates/service.yaml | 24 + .../licenses/templates/serviceaccount.yaml | 10 + qliksense/charts/licenses/values.yaml | 270 ++++ qliksense/charts/locale/.helmignore | 1 + qliksense/charts/locale/Chart.yaml | 8 + qliksense/charts/locale/README.md | 57 + qliksense/charts/locale/templates/_helper.tpl | 53 + .../charts/locale/templates/deployment.yaml | 35 + .../charts/locale/templates/ingress.yaml | 27 + .../charts/locale/templates/service.yaml | 24 + qliksense/charts/locale/values.yaml | 76 + .../charts/management-console/.helmignore | 1 + .../charts/management-console/Chart.yaml | 7 + qliksense/charts/management-console/README.md | 58 + .../management-console/templates/NOTES.txt | 1 + .../management-console/templates/_helper.tpl | 53 + .../templates/deployment.yaml | 50 + .../management-console/templates/ingress.yaml | 36 + .../management-console/templates/service.yaml | 24 + .../charts/management-console/values.yaml | 88 ++ qliksense/charts/messaging/.helmignore | 1 + qliksense/charts/messaging/Chart.yaml | 17 + qliksense/charts/messaging/README.md | 290 ++++ .../message-delivery-monitor/Chart.yaml | 9 + .../charts/message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../message-delivery-monitor-secret.yaml | 10 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 93 ++ .../charts/nats-streaming/.minikube_ignore | 0 .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 182 +++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 110 ++ .../templates/monitoring-svc.yaml | 28 + .../networkpolicy-nats-streaming.yaml | 51 + .../charts/nats-streaming/templates/pvc.yaml | 13 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../charts/nats-streaming/values.yaml | 326 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 339 +++++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 77 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 114 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/nats-secret.yaml | 15 + .../nats/templates/networkpolicy-nats.yaml | 51 + .../charts/nats/templates/statefulset.yaml | 163 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 545 +++++++ qliksense/charts/messaging/requirements.yaml | 16 + .../charts/messaging/templates/_helper.tpl | 38 + qliksense/charts/messaging/values.yaml | 21 + qliksense/charts/mongodb/.helmignore | 1 + qliksense/charts/mongodb/Chart.yaml | 20 + qliksense/charts/mongodb/OWNERS | 12 + qliksense/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + qliksense/charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ qliksense/charts/mongodb/values.yaml | 213 +++ qliksense/charts/nl-broker/.helmignore | 1 + qliksense/charts/nl-broker/Chart.yaml | 11 + qliksense/charts/nl-broker/README.md | 68 + .../nl-broker/charts/mongodb/.helmignore | 1 + .../nl-broker/charts/mongodb/Chart.yaml | 20 + .../charts/nl-broker/charts/mongodb/OWNERS | 12 + .../charts/nl-broker/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../nl-broker/charts/mongodb/values.yaml | 213 +++ qliksense/charts/nl-broker/requirements.yaml | 5 + .../charts/nl-broker/templates/_helper.tpl | 61 + .../nl-broker/templates/deployment.yaml | 100 ++ .../charts/nl-broker/templates/ingress.yaml | 28 + .../nl-broker/templates/mongo-secret.yaml | 13 + .../charts/nl-broker/templates/service.yaml | 24 + qliksense/charts/nl-broker/values.yaml | 127 ++ qliksense/charts/nl-parser/.helmignore | 1 + qliksense/charts/nl-parser/Chart.yaml | 11 + qliksense/charts/nl-parser/README.md | 65 + .../charts/nl-parser/templates/_helper.tpl | 61 + .../nl-parser/templates/deployment.yaml | 55 + .../charts/nl-parser/templates/ingress.yaml | 28 + .../charts/nl-parser/templates/service.yaml | 24 + qliksense/charts/nl-parser/values.yaml | 93 ++ qliksense/charts/notification-prep/Chart.yaml | 10 + qliksense/charts/notification-prep/README.md | 57 + .../charts/messaging/Chart.yaml | 17 + .../charts/messaging/README.md | 235 +++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 123 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../charts/nats-streaming/values.yaml | 287 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 190 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 85 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 160 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 297 ++++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 123 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../messaging/nats-streaming/values.yaml | 287 ++++ .../charts/messaging/nats/Chart.yaml | 17 + .../charts/messaging/nats/README.md | 190 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 85 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 160 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/nats/values.yaml | 297 ++++ .../charts/messaging/requirements.yaml | 13 + .../charts/messaging/templates/_helper.tpl | 38 + .../messaging/templates/nats-secret.yaml | 19 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../charts/messaging/values.yaml | 215 +++ .../notification-prep/requirements.yaml | 5 + .../notification-prep/templates/_helper.tpl | 53 + .../templates/deployment.yaml | 99 ++ .../notification-prep/templates/hpa.yaml | 26 + .../notification-prep/templates/ingress.yaml | 26 + .../notification-prep/templates/secret.yaml | 10 + .../notification-prep/templates/service.yaml | 24 + .../charts/notification-prep/values.yaml | 132 ++ qliksense/charts/odag/.helmignore | 1 + qliksense/charts/odag/Chart.yaml | 10 + qliksense/charts/odag/README.md | 75 + .../charts/odag/charts/mongodb/.helmignore | 1 + .../charts/odag/charts/mongodb/Chart.yaml | 20 + qliksense/charts/odag/charts/mongodb/OWNERS | 12 + .../charts/odag/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../odag/charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/odag/charts/mongodb/values.yaml | 213 +++ qliksense/charts/odag/requirements.yaml | 5 + qliksense/charts/odag/templates/NOTES.txt | 19 + qliksense/charts/odag/templates/_helpers.tpl | 81 + .../charts/odag/templates/deployment.yaml | 131 ++ qliksense/charts/odag/templates/ingress.yaml | 72 + .../charts/odag/templates/mongo-secret.yaml | 13 + qliksense/charts/odag/templates/rbac.yaml | 39 + qliksense/charts/odag/templates/sa.yaml | 9 + qliksense/charts/odag/templates/service.yaml | 23 + qliksense/charts/odag/values.yaml | 138 ++ qliksense/charts/policy-decisions/.helmignore | 1 + qliksense/charts/policy-decisions/Chart.yaml | 7 + qliksense/charts/policy-decisions/README.md | 84 ++ .../charts/messaging/.helmignore | 1 + .../charts/messaging/Chart.yaml | 17 + .../charts/messaging/README.md | 288 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../charts/message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 137 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/pvc.yaml | 13 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../charts/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 194 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 114 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 163 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 306 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 137 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/pvc.yaml | 13 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../messaging/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/nats/Chart.yaml | 17 + .../charts/messaging/nats/README.md | 194 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 114 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 163 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/nats/values.yaml | 306 ++++ .../charts/messaging/requirements.yaml | 19 + .../charts/messaging/templates/_helper.tpl | 38 + .../message-delivery-monitor-secret.yaml | 10 + .../messaging/templates/nats-secret.yaml | 15 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../charts/messaging/values.yaml | 493 ++++++ .../charts/mongodb/.helmignore | 1 + .../charts/mongodb/Chart.yaml | 20 + .../policy-decisions/charts/mongodb/OWNERS | 12 + .../policy-decisions/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/mongodb/values.yaml | 213 +++ .../policy-decisions/charts/redis/.helmignore | 3 + .../policy-decisions/charts/redis/Chart.yaml | 20 + .../policy-decisions/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../charts/redis/values.schema.json | 168 +++ .../policy-decisions/charts/redis/values.yaml | 631 ++++++++ .../charts/policy-decisions/requirements.yaml | 13 + .../policy-decisions/templates/_helper.tpl | 53 + .../policy-decisions/templates/configmap.yaml | 9 + .../templates/deployment.yaml | 159 ++ .../charts/policy-decisions/templates/hpa.yml | 26 + .../policy-decisions/templates/ingress.yaml | 34 + .../templates/mongo-secret.yaml | 13 + .../templates/redis-secret.yaml | 27 + .../policy-decisions/templates/service.yaml | 24 + .../templates/token-secret.yaml | 13 + qliksense/charts/policy-decisions/values.yaml | 203 +++ qliksense/charts/precedents/.helmignore | 1 + qliksense/charts/precedents/Chart.yaml | 7 + qliksense/charts/precedents/README.md | 101 ++ .../precedents/charts/mongodb/.helmignore | 1 + .../precedents/charts/mongodb/Chart.yaml | 20 + .../charts/precedents/charts/mongodb/OWNERS | 12 + .../precedents/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../precedents/charts/mongodb/values.yaml | 213 +++ qliksense/charts/precedents/requirements.yaml | 5 + .../charts/precedents/templates/_helpers.tpl | 60 + .../precedents/templates/deployment.yaml | 138 ++ .../charts/precedents/templates/hpa.yaml | 26 + .../charts/precedents/templates/ingress.yaml | 27 + .../precedents/templates/mongo-secret.yaml | 13 + .../templates/private-key-secret.yaml | 7 + .../templates/prune-graph-cronjob.yaml | 50 + .../precedents/templates/service-cayley.yaml | 24 + .../charts/precedents/templates/service.yaml | 24 + qliksense/charts/precedents/values.yaml | 168 +++ .../charts/qix-data-connection/.helmignore | 1 + .../charts/qix-data-connection/Chart.yaml | 8 + .../charts/qix-data-connection/README.md | 81 + .../charts/mongodb/.helmignore | 1 + .../charts/mongodb/Chart.yaml | 20 + .../qix-data-connection/charts/mongodb/OWNERS | 12 + .../charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/mongodb/values.yaml | 213 +++ .../qix-data-connection/requirements.yaml | 5 + .../qix-data-connection/templates/NOTES.txt | 19 + .../templates/_helpers.tpl | 60 + .../templates/deployment.yaml | 126 ++ .../templates/ingress.yaml | 26 + .../templates/keys-secret.yaml | 10 + .../templates/mongo-secret.yaml | 13 + .../templates/service.yaml | 27 + .../charts/qix-data-connection/values.yaml | 156 ++ qliksense/charts/qix-datafiles/.helmignore | 1 + qliksense/charts/qix-datafiles/Chart.yaml | 10 + qliksense/charts/qix-datafiles/README.md | 91 ++ .../qix-datafiles/charts/mongodb/.helmignore | 1 + .../qix-datafiles/charts/mongodb/Chart.yaml | 20 + .../qix-datafiles/charts/mongodb/OWNERS | 12 + .../qix-datafiles/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../qix-datafiles/charts/mongodb/values.yaml | 213 +++ .../charts/qix-datafiles/requirements.yaml | 5 + .../qix-datafiles/templates/_helpers.tpl | 83 ++ .../qix-datafiles/templates/deployment.yaml | 116 ++ .../charts/qix-datafiles/templates/hpa.yaml | 18 + .../qix-datafiles/templates/ingress.yaml | 30 + .../qix-datafiles/templates/mongo-secret.yaml | 13 + .../charts/qix-datafiles/templates/pvc.yaml | 13 + .../charts/qix-datafiles/templates/sc.yaml | 7 + .../qix-datafiles/templates/service.yaml | 28 + .../qix-datafiles/templates/token-secret.yaml | 13 + qliksense/charts/qix-datafiles/values.yaml | 178 +++ qliksense/charts/qix-sessions/.helmignore | 1 + qliksense/charts/qix-sessions/Chart.yaml | 8 + qliksense/charts/qix-sessions/README.md | 123 ++ .../qix-sessions/charts/mongodb/.helmignore | 1 + .../qix-sessions/charts/mongodb/Chart.yaml | 20 + .../charts/qix-sessions/charts/mongodb/OWNERS | 12 + .../qix-sessions/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../qix-sessions/charts/mongodb/values.yaml | 213 +++ .../qix-sessions/charts/redis/.helmignore | 3 + .../qix-sessions/charts/redis/Chart.yaml | 20 + .../qix-sessions/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../charts/redis/values.schema.json | 168 +++ .../qix-sessions/charts/redis/values.yaml | 631 ++++++++ .../charts/qix-sessions/requirements.yaml | 9 + .../charts/qix-sessions/templates/_helper.tpl | 52 + .../qix-sessions/templates/app-secrets.yaml | 14 + .../qix-sessions/templates/configmaps.yaml | 17 + .../charts/qix-sessions/templates/crd.yaml | 22 + .../qix-sessions/templates/deployment.yaml | 75 + .../qix-sessions/templates/env-variables.yaml | 133 ++ .../charts/qix-sessions/templates/hpa.yaml | 17 + .../qix-sessions/templates/ingress.yaml | 31 + .../qix-sessions/templates/mongo-secret.yaml | 10 + .../charts/qix-sessions/templates/rbac.yaml | 83 ++ .../qix-sessions/templates/redis-secret.yaml | 27 + .../charts/qix-sessions/templates/sa.yaml | 9 + .../qix-sessions/templates/service.yaml | 86 ++ qliksense/charts/qix-sessions/values.yaml | 358 +++++ qliksense/charts/qlikcommon/.helmignore | 21 + qliksense/charts/qlikcommon/Chart.yaml | 9 + qliksense/charts/qlikcommon/README.md | 837 +++++++++++ .../qlikcommon/templates/_certificates.tpl | 32 + .../charts/qlikcommon/templates/_chartref.tpl | 14 + .../qlikcommon/templates/_configmap.yaml | 32 + .../qlikcommon/templates/_container.yaml | 99 ++ .../qlikcommon/templates/_deployment.yaml | 94 ++ .../charts/qlikcommon/templates/_envvar.tpl | 32 + .../charts/qlikcommon/templates/_fullname.tpl | 42 + .../charts/qlikcommon/templates/_hpa.yaml | 31 + .../charts/qlikcommon/templates/_image.tpl | 21 + .../charts/qlikcommon/templates/_ingress.yaml | 49 + .../qlikcommon/templates/_initContainer.yaml | 74 + .../qlikcommon/templates/_metadata.yaml | 35 + .../templates/_metadata_annotations.tpl | 23 + .../qlikcommon/templates/_metadata_labels.tpl | 28 + .../charts/qlikcommon/templates/_name.tpl | 29 + .../qlikcommon/templates/_networkpolicy.tpl | 32 + .../qlikcommon/templates/_networkpolicy.yaml | 89 ++ .../templates/_persistentvolumeclaim.yaml | 47 + .../templates/_persistentvolumeclaims.yaml | 27 + .../templates/_podSecurityPolicy.yaml | 55 + .../charts/qlikcommon/templates/_role.yaml | 23 + .../qlikcommon/templates/_rolebinding.yaml | 19 + .../charts/qlikcommon/templates/_secret.yaml | 45 + .../charts/qlikcommon/templates/_service.yaml | 25 + .../qlikcommon/templates/_serviceaccount.yaml | 11 + .../qlikcommon/templates/_statefulset.yaml | 44 + .../qlikcommon/templates/_storageclass.yaml | 38 + .../qlikcommon/templates/_transformers.tpl | 41 + .../charts/qlikcommon/templates/_util.tpl | 15 + .../charts/qlikcommon/templates/_volume.tpl | 62 + qliksense/charts/qlikcommon/values.yaml | 4 + qliksense/charts/qlikview-client/.helmignore | 1 + qliksense/charts/qlikview-client/Chart.yaml | 7 + qliksense/charts/qlikview-client/README.md | 58 + .../qlikview-client/templates/NOTES.txt | 8 + .../qlikview-client/templates/_helper.tpl | 53 + .../qlikview-client/templates/deployment.yaml | 39 + .../qlikview-client/templates/ingress.yaml | 60 + .../qlikview-client/templates/service.yaml | 24 + qliksense/charts/qlikview-client/values.yaml | 75 + qliksense/charts/quotas/.helmignore | 1 + qliksense/charts/quotas/Chart.yaml | 7 + qliksense/charts/quotas/README.md | 71 + qliksense/charts/quotas/templates/_helper.tpl | 53 + .../charts/quotas/templates/deployment.yaml | 83 ++ .../charts/quotas/templates/ingress.yaml | 27 + .../charts/quotas/templates/service.yaml | 24 + qliksense/charts/quotas/values.yaml | 117 ++ qliksense/charts/redis/.helmignore | 3 + qliksense/charts/redis/Chart.yaml | 20 + qliksense/charts/redis/README.md | 497 ++++++ qliksense/charts/redis/ci/default-values.yaml | 1 + qliksense/charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + qliksense/charts/redis/templates/NOTES.txt | 104 ++ qliksense/charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + qliksense/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + qliksense/charts/redis/templates/secret.yaml | 14 + qliksense/charts/redis/values-production.yaml | 630 ++++++++ qliksense/charts/redis/values.schema.json | 168 +++ qliksense/charts/redis/values.yaml | 631 ++++++++ qliksense/charts/reload-tasks/.helmignore | 1 + qliksense/charts/reload-tasks/Chart.yaml | 7 + qliksense/charts/reload-tasks/README.md | 90 ++ .../reload-tasks/charts/mongodb/.helmignore | 1 + .../reload-tasks/charts/mongodb/Chart.yaml | 20 + .../charts/reload-tasks/charts/mongodb/OWNERS | 12 + .../reload-tasks/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../reload-tasks/charts/mongodb/values.yaml | 213 +++ .../charts/reload-tasks/requirements.yaml | 5 + .../charts/reload-tasks/templates/_helper.tpl | 60 + .../reload-tasks/templates/deployment.yaml | 121 ++ .../charts/reload-tasks/templates/hpa.yaml | 32 + .../reload-tasks/templates/ingress.yaml | 27 + .../reload-tasks/templates/mongo-secret.yaml | 13 + .../templates/private-key-secret.yaml | 7 + .../reload-tasks/templates/service.yaml | 24 + qliksense/charts/reload-tasks/values.yaml | 156 ++ qliksense/charts/reloads/.helmignore | 1 + qliksense/charts/reloads/Chart.yaml | 7 + qliksense/charts/reloads/README.md | 107 ++ .../reloads/charts/messaging/Chart.yaml | 17 + .../charts/reloads/charts/messaging/README.md | 100 ++ .../charts/nats-operator/.helmignore | 24 + .../messaging/charts/nats-operator/Chart.yaml | 19 + .../nats-operator/config/client-auth.json | 25 + .../charts/nats-operator/templates/NOTES.txt | 17 + .../nats-operator/templates/_helpers.tpl | 21 + .../nats-operator/templates/clusterrole.yaml | 32 + .../templates/clusterrolebinding.yaml | 14 + .../templates/customresourcedefinition.yaml | 35 + .../nats-operator/templates/deployment.yaml | 116 ++ .../nats-operator/templates/natscluster.yaml | 29 + .../nats-operator/templates/secret.yaml | 15 + .../templates/serviceaccount.yaml | 6 + .../charts/nats-operator/values.yaml | 140 ++ .../charts/nats-streaming-operator/Chart.yaml | 17 + .../charts/nats-streaming-operator/README.md | 1 + .../templates/_helpers.tpl | 21 + .../templates/clusterrole.yaml | 41 + .../templates/clusterrolebinding.yaml | 14 + .../templates/customresourcedefinition.yaml | 17 + .../templates/deployment.yaml | 50 + .../templates/natsstreamingcluster.yaml | 9 + .../templates/serviceaccount.yaml | 6 + .../nats-streaming-operator/values.yaml | 59 + .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 123 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 56 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/statefulset.yaml | 193 +++ .../charts/nats-streaming/values.yaml | 246 +++ .../charts/messaging/charts/nats/.helmignore | 21 + .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/OWNERS | 12 + .../charts/messaging/charts/nats/README.md | 165 ++ .../messaging/charts/nats/templates/NOTES.txt | 78 + .../charts/nats/templates/_helpers.tpl | 40 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 84 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 129 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/nats/values-production.yaml | 251 ++++ .../charts/messaging/charts/nats/values.yaml | 255 ++++ .../charts/messaging/config/client-auth.json | 25 + .../messaging/nats-operator/.helmignore | 24 + .../charts/messaging/nats-operator/Chart.yaml | 18 + .../charts/messaging/nats-operator/README.md | 138 ++ .../nats-operator/config/client-auth.json | 25 + .../nats-operator/templates/NOTES.txt | 17 + .../nats-operator/templates/_helpers.tpl | 21 + .../nats-operator/templates/clusterrole.yaml | 32 + .../templates/clusterrolebinding.yaml | 14 + .../templates/customresourcedefinition.yaml | 35 + .../nats-operator/templates/deployment.yaml | 116 ++ .../nats-operator/templates/natscluster.yaml | 29 + .../nats-operator/templates/secret.yaml | 15 + .../templates/serviceaccount.yaml | 6 + .../messaging/nats-operator/values.yaml | 140 ++ .../nats-streaming-operator/Chart.yaml | 15 + .../nats-streaming-operator/README.md | 1 + .../templates/_helpers.tpl | 21 + .../templates/clusterrole.yaml | 41 + .../templates/clusterrolebinding.yaml | 14 + .../templates/customresourcedefinition.yaml | 17 + .../templates/deployment.yaml | 50 + .../templates/natsstreamingcluster.yaml | 9 + .../templates/serviceaccount.yaml | 6 + .../nats-streaming-operator/values.yaml | 59 + .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 123 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 56 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/statefulset.yaml | 193 +++ .../messaging/nats-streaming/values.yaml | 246 +++ .../charts/messaging/requirements.yaml | 17 + .../charts/messaging/templates/_helper.tpl | 38 + .../messaging/templates/nats-secret.yaml | 21 + .../templates/natscluster-secret.yaml | 15 + .../messaging/templates/natscluster.yaml | 15 + .../templates/natsstreamingcluster.yaml | 14 + .../networkpolicy-nats-streaming.yaml | 45 + .../templates/networkpolicy-nats.yaml | 47 + .../reloads/charts/messaging/values.yaml | 162 ++ .../charts/reloads/charts/mongodb/.helmignore | 1 + .../charts/reloads/charts/mongodb/Chart.yaml | 20 + .../charts/reloads/charts/mongodb/OWNERS | 12 + .../charts/reloads/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/reloads/charts/mongodb/values.yaml | 213 +++ qliksense/charts/reloads/requirements.yaml | 9 + .../charts/reloads/templates/_helper.tpl | 59 + .../charts/reloads/templates/deployment.yaml | 170 +++ qliksense/charts/reloads/templates/hpa.yaml | 26 + .../charts/reloads/templates/ingress.yaml | 27 + .../reloads/templates/mongo-secret.yaml | 13 + .../reloads/templates/pre-stop-hook.yaml | 18 + .../reloads/templates/private-key-secret.yaml | 7 + .../charts/reloads/templates/service.yaml | 24 + qliksense/charts/reloads/values.yaml | 193 +++ qliksense/charts/reporting/.helmignore | 1 + qliksense/charts/reporting/Chart.yaml | 11 + qliksense/charts/reporting/README.md | 155 ++ .../charts/reporting/charts/redis/.helmignore | 3 + .../charts/reporting/charts/redis/Chart.yaml | 20 + .../charts/reporting/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../reporting/charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../reporting/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../reporting/charts/redis/values.schema.json | 168 +++ .../charts/reporting/charts/redis/values.yaml | 631 ++++++++ qliksense/charts/reporting/requirements.yaml | 5 + .../charts/reporting/templates/NOTES.txt | 0 .../charts/reporting/templates/_helpers.tpl | 115 ++ .../reporting/templates/deployment.yaml | 174 +++ qliksense/charts/reporting/templates/hpa.yaml | 22 + .../charts/reporting/templates/ingress.yaml | 40 + .../templates/redis-network-policy.yaml | 41 + .../reporting/templates/redis-secret.yaml | 21 + .../reporting/templates/service-cmp.yaml | 18 + .../reporting/templates/service-rpr.yaml | 18 + .../reporting/templates/service-rwr.yaml | 13 + .../charts/reporting/templates/service.yaml | 29 + qliksense/charts/reporting/values.yaml | 340 +++++ qliksense/charts/resource-library/.helmignore | 1 + qliksense/charts/resource-library/Chart.yaml | 8 + qliksense/charts/resource-library/README.md | 84 ++ .../charts/mongodb/.helmignore | 1 + .../charts/mongodb/Chart.yaml | 20 + .../resource-library/charts/mongodb/OWNERS | 12 + .../resource-library/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/mongodb/values.yaml | 213 +++ .../charts/resource-library/requirements.yaml | 5 + .../resource-library/templates/NOTES.txt | 19 + .../resource-library/templates/_helper.tpl | 83 ++ .../templates/deployment.yaml | 131 ++ .../resource-library/templates/ingress.yaml | 39 + .../templates/mongo-secret.yaml | 13 + .../resource-library/templates/pvc.yaml | 13 + .../charts/resource-library/templates/sc.yaml | 7 + .../resource-library/templates/secret.yaml | 7 + .../resource-library/templates/service.yaml | 27 + qliksense/charts/resource-library/values.yaml | 221 +++ qliksense/charts/sense-client/.helmignore | 1 + qliksense/charts/sense-client/Chart.yaml | 8 + qliksense/charts/sense-client/README.md | 59 + .../charts/sense-client/templates/NOTES.txt | 8 + .../charts/sense-client/templates/_helper.tpl | 60 + .../sense-client/templates/configmap.yaml | 17 + .../sense-client/templates/deployment.yaml | 69 + .../sense-client/templates/ingress.yaml | 64 + .../sense-client/templates/service.yaml | 24 + qliksense/charts/sense-client/values.yaml | 105 ++ qliksense/charts/sharing/.helmignore | 1 + qliksense/charts/sharing/Chart.yaml | 9 + qliksense/charts/sharing/README.md | 98 ++ .../charts/sharing/charts/mongodb/.helmignore | 1 + .../charts/sharing/charts/mongodb/Chart.yaml | 20 + .../charts/sharing/charts/mongodb/OWNERS | 12 + .../charts/sharing/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/sharing/charts/mongodb/values.yaml | 213 +++ .../charts/sharing/charts/redis/.helmignore | 3 + .../charts/sharing/charts/redis/Chart.yaml | 20 + .../charts/sharing/charts/redis/README.md | 497 ++++++ .../charts/redis/ci/default-values.yaml | 1 + .../sharing/charts/redis/ci/dev-values.yaml | 9 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/insecure-sentinel-values.yaml | 524 +++++++ .../redis/ci/production-sentinel-values.yaml | 524 +++++++ .../charts/redis/ci/production-values.yaml | 525 +++++++ .../charts/redis/ci/redis-lib-values.yaml | 13 + .../redis/ci/redisgraph-module-values.yaml | 10 + .../sharing/charts/redis/templates/NOTES.txt | 104 ++ .../charts/redis/templates/_helpers.tpl | 355 +++++ .../charts/redis/templates/configmap.yaml | 52 + .../charts/redis/templates/headless-svc.yaml | 24 + .../redis/templates/health-configmap.yaml | 134 ++ .../redis/templates/metrics-prometheus.yaml | 30 + .../charts/redis/templates/metrics-svc.yaml | 30 + .../charts/redis/templates/networkpolicy.yaml | 73 + .../redis/templates/prometheusrule.yaml | 23 + .../sharing/charts/redis/templates/psp.yaml | 42 + .../templates/redis-master-statefulset.yaml | 419 ++++++ .../redis/templates/redis-master-svc.yaml | 39 + .../charts/redis/templates/redis-role.yaml | 21 + .../redis/templates/redis-rolebinding.yaml | 18 + .../redis/templates/redis-serviceaccount.yaml | 11 + .../templates/redis-slave-statefulset.yaml | 437 ++++++ .../redis/templates/redis-slave-svc.yaml | 40 + .../templates/redis-with-sentinel-svc.yaml | 40 + .../charts/redis/templates/secret.yaml | 14 + .../charts/redis/values-production.yaml | 630 ++++++++ .../sharing/charts/redis/values.schema.json | 168 +++ .../charts/sharing/charts/redis/values.yaml | 631 ++++++++ qliksense/charts/sharing/requirements.yaml | 9 + .../charts/sharing/templates/_helpers.tpl | 85 ++ .../charts/sharing/templates/deployment.yaml | 149 ++ qliksense/charts/sharing/templates/hpa.yaml | 26 + .../charts/sharing/templates/ingress.yaml | 48 + .../sharing/templates/mongo-secret.yaml | 20 + .../templates/prune-graph-cronjob.yaml | 45 + qliksense/charts/sharing/templates/pvc.yaml | 13 + .../sharing/templates/redis-secret.yaml | 21 + qliksense/charts/sharing/templates/sc.yaml | 7 + .../charts/sharing/templates/service.yaml | 24 + .../sharing/templates/token-secret.yaml | 8 + qliksense/charts/sharing/values.yaml | 316 ++++ qliksense/charts/spaces/.helmignore | 1 + qliksense/charts/spaces/Chart.yaml | 9 + qliksense/charts/spaces/README.md | 87 ++ .../charts/spaces/charts/messaging/Chart.yaml | 17 + .../charts/spaces/charts/messaging/README.md | 241 +++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 133 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../charts/nats-streaming/values.yaml | 290 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 191 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 87 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 160 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 302 ++++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 133 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../messaging/nats-streaming/values.yaml | 290 ++++ .../spaces/charts/messaging/nats/Chart.yaml | 17 + .../spaces/charts/messaging/nats/README.md | 191 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 87 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 160 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../spaces/charts/messaging/nats/values.yaml | 302 ++++ .../spaces/charts/messaging/requirements.yaml | 13 + .../charts/messaging/templates/_helper.tpl | 38 + .../messaging/templates/nats-secret.yaml | 19 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../spaces/charts/messaging/values.yaml | 320 ++++ .../charts/spaces/charts/mongodb/.helmignore | 1 + .../charts/spaces/charts/mongodb/Chart.yaml | 20 + qliksense/charts/spaces/charts/mongodb/OWNERS | 12 + .../charts/spaces/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../spaces/charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/spaces/charts/mongodb/values.yaml | 213 +++ qliksense/charts/spaces/requirements.yaml | 10 + qliksense/charts/spaces/templates/_helper.tpl | 53 + .../charts/spaces/templates/deployment.yaml | 142 ++ qliksense/charts/spaces/templates/hpa.yaml | 26 + .../charts/spaces/templates/ingress.yaml | 60 + .../charts/spaces/templates/mongo-secret.yaml | 13 + .../charts/spaces/templates/service.yaml | 24 + .../charts/spaces/templates/token-secret.yaml | 13 + qliksense/charts/spaces/values.yaml | 237 +++ qliksense/charts/subscriptions/Chart.yaml | 11 + qliksense/charts/subscriptions/README.md | 69 + .../subscriptions/charts/mongodb/.helmignore | 1 + .../subscriptions/charts/mongodb/Chart.yaml | 20 + .../subscriptions/charts/mongodb/OWNERS | 12 + .../subscriptions/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../subscriptions/charts/mongodb/values.yaml | 213 +++ .../charts/subscriptions/requirements.yaml | 5 + .../subscriptions/templates/_helpers.tpl | 60 + .../subscriptions/templates/deployment.yaml | 117 ++ .../subscriptions/templates/ingress.yaml | 26 + .../subscriptions/templates/mongo-secret.yml | 13 + .../subscriptions/templates/secret.yaml | 7 + .../subscriptions/templates/service.yaml | 24 + qliksense/charts/subscriptions/values.yaml | 148 ++ .../charts/temporary-contents/.helmignore | 1 + .../charts/temporary-contents/Chart.yaml | 9 + qliksense/charts/temporary-contents/README.md | 131 ++ .../charts/mongodb/.helmignore | 1 + .../charts/mongodb/Chart.yaml | 20 + .../temporary-contents/charts/mongodb/OWNERS | 12 + .../charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/mongodb/values.yaml | 213 +++ .../temporary-contents/requirements.yaml | 5 + .../temporary-contents/templates/_helper.tpl | 76 + .../templates/deployment.yaml | 125 ++ .../temporary-contents/templates/ingress.yaml | 64 + .../templates/mongo-secret.yaml | 13 + .../temporary-contents/templates/pvc.yaml | 13 + .../temporary-contents/templates/sc.yaml | 7 + .../temporary-contents/templates/service.yaml | 24 + .../charts/temporary-contents/values.yaml | 202 +++ qliksense/charts/tenants/.helmignore | 2 + qliksense/charts/tenants/Chart.yaml | 9 + qliksense/charts/tenants/README.md | 68 + .../tenants/charts/messaging/.helmignore | 1 + .../tenants/charts/messaging/Chart.yaml | 17 + .../charts/tenants/charts/messaging/README.md | 288 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../charts/message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 137 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/pvc.yaml | 13 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../charts/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 194 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 114 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 163 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 306 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 137 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/pvc.yaml | 13 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 256 ++++ .../messaging/nats-streaming/values.yaml | 328 ++++ .../tenants/charts/messaging/nats/Chart.yaml | 17 + .../tenants/charts/messaging/nats/README.md | 194 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 114 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 163 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../tenants/charts/messaging/nats/values.yaml | 306 ++++ .../charts/messaging/requirements.yaml | 19 + .../charts/messaging/templates/_helper.tpl | 38 + .../message-delivery-monitor-secret.yaml | 10 + .../messaging/templates/nats-secret.yaml | 15 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../tenants/charts/messaging/values.yaml | 474 ++++++ .../charts/tenants/charts/mongodb/.helmignore | 1 + .../charts/tenants/charts/mongodb/Chart.yaml | 20 + .../charts/tenants/charts/mongodb/OWNERS | 12 + .../charts/tenants/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/tenants/charts/mongodb/values.yaml | 213 +++ .../tenants/charts/qlikcommon/.helmignore | 21 + .../tenants/charts/qlikcommon/Chart.yaml | 9 + .../tenants/charts/qlikcommon/README.md | 837 +++++++++++ .../qlikcommon/templates/_certificates.tpl | 32 + .../charts/qlikcommon/templates/_chartref.tpl | 14 + .../qlikcommon/templates/_configmap.yaml | 32 + .../qlikcommon/templates/_container.yaml | 98 ++ .../qlikcommon/templates/_deployment.yaml | 93 ++ .../charts/qlikcommon/templates/_envvar.tpl | 32 + .../charts/qlikcommon/templates/_fullname.tpl | 42 + .../charts/qlikcommon/templates/_hpa.yaml | 31 + .../charts/qlikcommon/templates/_image.tpl | 21 + .../charts/qlikcommon/templates/_ingress.yaml | 49 + .../qlikcommon/templates/_initContainer.yaml | 74 + .../qlikcommon/templates/_metadata.yaml | 35 + .../templates/_metadata_annotations.tpl | 23 + .../qlikcommon/templates/_metadata_labels.tpl | 28 + .../charts/qlikcommon/templates/_name.tpl | 29 + .../qlikcommon/templates/_networkpolicy.yaml | 52 + .../templates/_persistentvolumeclaim.yaml | 47 + .../templates/_persistentvolumeclaims.yaml | 27 + .../templates/_podSecurityPolicy.yaml | 55 + .../charts/qlikcommon/templates/_role.yaml | 23 + .../qlikcommon/templates/_rolebinding.yaml | 19 + .../charts/qlikcommon/templates/_secret.yaml | 45 + .../charts/qlikcommon/templates/_service.yaml | 25 + .../qlikcommon/templates/_serviceaccount.yaml | 11 + .../qlikcommon/templates/_statefulset.yaml | 44 + .../qlikcommon/templates/_transformers.tpl | 41 + .../charts/qlikcommon/templates/_util.tpl | 15 + .../charts/qlikcommon/templates/_volume.tpl | 62 + .../tenants/charts/qlikcommon/values.yaml | 4 + qliksense/charts/tenants/requirements.yaml | 13 + .../charts/tenants/templates/manifest.yaml | 73 + qliksense/charts/tenants/values.yaml | 130 ++ qliksense/charts/transport/Chart.yaml | 9 + qliksense/charts/transport/README.md | 69 + .../transport/charts/messaging/Chart.yaml | 17 + .../transport/charts/messaging/README.md | 235 +++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 123 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../charts/nats-streaming/values.yaml | 287 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 190 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 85 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 160 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 297 ++++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 123 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../messaging/nats-streaming/values.yaml | 287 ++++ .../charts/messaging/nats/Chart.yaml | 17 + .../transport/charts/messaging/nats/README.md | 190 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 85 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 160 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/nats/values.yaml | 297 ++++ .../charts/messaging/requirements.yaml | 13 + .../charts/messaging/templates/_helper.tpl | 38 + .../messaging/templates/nats-secret.yaml | 19 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../transport/charts/messaging/values.yaml | 215 +++ .../transport/charts/mongodb/.helmignore | 1 + .../transport/charts/mongodb/Chart.yaml | 20 + .../charts/transport/charts/mongodb/OWNERS | 12 + .../charts/transport/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../transport/charts/mongodb/values.yaml | 213 +++ qliksense/charts/transport/requirements.yaml | 9 + .../charts/transport/templates/_helper.tpl | 53 + .../transport/templates/deployment.yaml | 109 ++ qliksense/charts/transport/templates/hpa.yaml | 26 + .../charts/transport/templates/ingress.yaml | 26 + .../transport/templates/mongo-secret.yaml | 13 + .../charts/transport/templates/network.yaml | 52 + .../charts/transport/templates/service.yaml | 24 + qliksense/charts/transport/values.yaml | 200 +++ qliksense/charts/users/.helmignore | 4 + qliksense/charts/users/Chart.yaml | 9 + qliksense/charts/users/README.md | 60 + .../charts/users/charts/messaging/Chart.yaml | 17 + .../charts/users/charts/messaging/README.md | 241 +++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 133 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../charts/nats-streaming/values.yaml | 290 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 191 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 87 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 160 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 302 ++++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 133 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 77 + .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 247 +++ .../messaging/nats-streaming/values.yaml | 290 ++++ .../users/charts/messaging/nats/Chart.yaml | 17 + .../users/charts/messaging/nats/README.md | 191 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 87 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 160 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../users/charts/messaging/nats/values.yaml | 302 ++++ .../users/charts/messaging/requirements.yaml | 13 + .../charts/messaging/templates/_helper.tpl | 38 + .../messaging/templates/nats-secret.yaml | 19 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../charts/users/charts/messaging/values.yaml | 320 ++++ .../charts/users/charts/mongodb/.helmignore | 1 + .../charts/users/charts/mongodb/Chart.yaml | 20 + qliksense/charts/users/charts/mongodb/OWNERS | 12 + .../charts/users/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../users/charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/users/charts/mongodb/values.yaml | 213 +++ .../users/charts/qlikcommon/.helmignore | 21 + .../charts/users/charts/qlikcommon/Chart.yaml | 9 + .../charts/users/charts/qlikcommon/README.md | 837 +++++++++++ .../qlikcommon/templates/_certificates.tpl | 32 + .../charts/qlikcommon/templates/_chartref.tpl | 14 + .../qlikcommon/templates/_configmap.yaml | 32 + .../qlikcommon/templates/_container.yaml | 98 ++ .../qlikcommon/templates/_deployment.yaml | 93 ++ .../charts/qlikcommon/templates/_envvar.tpl | 32 + .../charts/qlikcommon/templates/_fullname.tpl | 42 + .../charts/qlikcommon/templates/_hpa.yaml | 31 + .../charts/qlikcommon/templates/_image.tpl | 21 + .../charts/qlikcommon/templates/_ingress.yaml | 49 + .../qlikcommon/templates/_initContainer.yaml | 74 + .../qlikcommon/templates/_metadata.yaml | 35 + .../templates/_metadata_annotations.tpl | 23 + .../qlikcommon/templates/_metadata_labels.tpl | 28 + .../charts/qlikcommon/templates/_name.tpl | 29 + .../qlikcommon/templates/_networkpolicy.yaml | 52 + .../templates/_persistentvolumeclaim.yaml | 47 + .../templates/_persistentvolumeclaims.yaml | 27 + .../templates/_podSecurityPolicy.yaml | 55 + .../charts/qlikcommon/templates/_role.yaml | 23 + .../qlikcommon/templates/_rolebinding.yaml | 19 + .../charts/qlikcommon/templates/_secret.yaml | 45 + .../charts/qlikcommon/templates/_service.yaml | 25 + .../qlikcommon/templates/_serviceaccount.yaml | 11 + .../qlikcommon/templates/_statefulset.yaml | 44 + .../qlikcommon/templates/_transformers.tpl | 41 + .../charts/qlikcommon/templates/_util.tpl | 15 + .../charts/qlikcommon/templates/_volume.tpl | 62 + .../users/charts/qlikcommon/values.yaml | 4 + qliksense/charts/users/requirements.yaml | 13 + .../charts/users/templates/manifest.yaml | 70 + qliksense/charts/users/values.yaml | 134 ++ .../charts/web-notifications/.helmignore | 1 + qliksense/charts/web-notifications/Chart.yaml | 9 + qliksense/charts/web-notifications/README.md | 68 + .../charts/messaging/.helmignore | 1 + .../charts/messaging/Chart.yaml | 17 + .../charts/messaging/README.md | 288 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../charts/message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 137 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/pvc.yaml | 13 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 254 ++++ .../charts/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 194 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 114 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 161 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 306 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 137 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/pvc.yaml | 13 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 254 ++++ .../messaging/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/nats/Chart.yaml | 17 + .../charts/messaging/nats/README.md | 194 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 114 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 161 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/nats/values.yaml | 306 ++++ .../charts/messaging/requirements.yaml | 19 + .../charts/messaging/templates/_helper.tpl | 38 + .../message-delivery-monitor-secret.yaml | 10 + .../messaging/templates/nats-secret.yaml | 15 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../charts/messaging/values.yaml | 429 ++++++ .../charts/mongodb/.helmignore | 1 + .../charts/mongodb/Chart.yaml | 20 + .../web-notifications/charts/mongodb/OWNERS | 12 + .../charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../charts/mongodb/values.yaml | 213 +++ .../web-notifications/requirements.yaml | 9 + .../web-notifications/templates/_helpers.tpl | 60 + .../templates/deployment.yaml | 115 ++ .../web-notifications/templates/ingress.yaml | 26 + .../templates/mongo-secret.yaml | 13 + .../web-notifications/templates/secret.yaml | 7 + .../web-notifications/templates/service.yaml | 24 + .../charts/web-notifications/values.yaml | 144 ++ qliksense/charts/web-security/Chart.yaml | 8 + qliksense/charts/web-security/README.md | 86 ++ .../web-security/charts/messaging/.helmignore | 1 + .../web-security/charts/messaging/Chart.yaml | 17 + .../web-security/charts/messaging/README.md | 288 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../charts/message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../charts/nats-streaming/Chart.yaml | 14 + .../messaging/charts/nats-streaming/README.md | 137 ++ .../charts/nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../charts/nats-streaming/templates/pvc.yaml | 13 + .../charts/nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 254 ++++ .../charts/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/charts/nats/Chart.yaml | 17 + .../charts/messaging/charts/nats/README.md | 194 +++ .../messaging/charts/nats/templates/NOTES.txt | 88 ++ .../charts/nats/templates/_helpers.tpl | 73 + .../charts/nats/templates/client-svc.yaml | 28 + .../charts/nats/templates/cluster-svc.yaml | 28 + .../charts/nats/templates/configmap.yaml | 114 ++ .../charts/nats/templates/headless-svc.yaml | 22 + .../charts/nats/templates/ingress.yaml | 36 + .../charts/nats/templates/monitoring-svc.yaml | 28 + .../charts/nats/templates/networkpolicy.yaml | 30 + .../charts/nats/templates/statefulset.yaml | 161 ++ .../charts/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/charts/nats/values.yaml | 306 ++++ .../message-delivery-monitor/Chart.yaml | 8 + .../message-delivery-monitor/README.md | 70 + .../templates/_helper.tpl | 47 + .../templates/deployment.yaml | 73 + .../templates/service.yaml | 24 + .../message-delivery-monitor/values.yaml | 90 ++ .../messaging/nats-streaming/Chart.yaml | 14 + .../charts/messaging/nats-streaming/README.md | 137 ++ .../nats-streaming/templates/NOTES.txt | 25 + .../nats-streaming/templates/_helpers.tpl | 101 ++ .../templates/monitoring-svc.yaml | 28 + .../templates/networkpolicy.yaml | 20 + .../nats-streaming/templates/pvc.yaml | 13 + .../nats-streaming/templates/sc.yaml | 7 + .../nats-streaming/templates/statefulset.yaml | 254 ++++ .../messaging/nats-streaming/values.yaml | 328 ++++ .../charts/messaging/nats/Chart.yaml | 17 + .../charts/messaging/nats/README.md | 194 +++ .../charts/messaging/nats/templates/NOTES.txt | 88 ++ .../messaging/nats/templates/_helpers.tpl | 73 + .../messaging/nats/templates/client-svc.yaml | 28 + .../messaging/nats/templates/cluster-svc.yaml | 28 + .../messaging/nats/templates/configmap.yaml | 114 ++ .../nats/templates/headless-svc.yaml | 22 + .../messaging/nats/templates/ingress.yaml | 36 + .../nats/templates/monitoring-svc.yaml | 28 + .../nats/templates/networkpolicy.yaml | 30 + .../messaging/nats/templates/statefulset.yaml | 161 ++ .../messaging/nats/templates/tls-secret.yaml | 18 + .../charts/messaging/nats/values.yaml | 306 ++++ .../charts/messaging/requirements.yaml | 19 + .../charts/messaging/templates/_helper.tpl | 38 + .../message-delivery-monitor-secret.yaml | 10 + .../messaging/templates/nats-secret.yaml | 15 + .../networkpolicy-nats-streaming.yaml | 51 + .../templates/networkpolicy-nats.yaml | 51 + .../web-security/charts/messaging/values.yaml | 470 ++++++ .../web-security/charts/mongodb/.helmignore | 1 + .../web-security/charts/mongodb/Chart.yaml | 20 + .../charts/web-security/charts/mongodb/OWNERS | 12 + .../web-security/charts/mongodb/README.md | 158 ++ .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/mongodb/templates/NOTES.txt | 66 + .../charts/mongodb/templates/_helpers.tpl | 77 + .../charts/mongodb/templates/configmap.yaml | 14 + .../templates/deployment-standalone.yaml | 143 ++ .../mongodb/templates/headless-svc-rs.yaml | 24 + .../templates/initialization-configmap.yaml | 13 + .../poddisruptionbudget-arbiter-rs.yaml | 18 + .../poddisruptionbudget-primary-rs.yaml | 18 + .../poddisruptionbudget-secondary-rs.yaml | 18 + .../mongodb/templates/pvc-standalone.yaml | 26 + .../charts/mongodb/templates/secrets.yaml | 34 + .../templates/statefulset-arbiter-rs.yaml | 121 ++ .../templates/statefulset-primary-rs.yaml | 174 +++ .../templates/statefulset-secondary-rs.yaml | 157 ++ .../mongodb/templates/svc-primary-rs.yaml | 28 + .../mongodb/templates/svc-standalone.yaml | 27 + .../charts/mongodb/values-production.yaml | 213 +++ .../web-security/charts/mongodb/values.yaml | 213 +++ .../charts/sense-client/.helmignore | 1 + .../charts/sense-client/Chart.yaml | 8 + .../charts/sense-client/README.md | 59 + .../charts/sense-client/templates/NOTES.txt | 8 + .../charts/sense-client/templates/_helper.tpl | 60 + .../sense-client/templates/configmap.yaml | 17 + .../sense-client/templates/deployment.yaml | 69 + .../sense-client/templates/ingress.yaml | 64 + .../sense-client/templates/service.yaml | 24 + .../charts/sense-client/values.yaml | 105 ++ .../charts/web-security/requirements.yaml | 13 + .../charts/web-security/templates/_helper.tpl | 53 + .../web-security/templates/deployment.yaml | 126 ++ .../charts/web-security/templates/hpa.yaml | 26 + .../charts/web-security/templates/ingress.yml | 31 + .../web-security/templates/mongo-secret.yaml | 13 + .../templates/rollbar-secret.yaml | 11 + .../web-security/templates/service.yaml | 24 + .../web-security/templates/token-secret.yaml | 13 + qliksense/charts/web-security/values.yaml | 194 +++ qliksense/requirements.yaml | 247 +++ qliksense/templates/NOTES.txt | 304 ++++ qliksense/templates/_helpers.tpl | 41 + qliksense/templates/ca-cert-configmap.yaml | 24 + qliksense/templates/certs-job.yaml | 42 + qliksense/templates/certs-pvc.yaml | 16 + qliksense/templates/mongo-secret.yaml | 13 + qliksense/templates/qliksense-secrets.yaml | 30 + qliksense/templates/redis-secret.yaml | 26 + qliksense/templates/sc.yaml | 13 + qliksense/values.yaml | 1327 +++++++++++++++++ qseok-values.yaml | 146 ++ 3423 files changed, 286809 insertions(+) create mode 100644 keycloak-values.yaml create mode 100644 mongodb-consolo.yaml create mode 100644 mongodb-consolo.zip create mode 100755 mongodb/.helmignore create mode 100755 mongodb/Chart.yaml create mode 100755 mongodb/README.md create mode 100755 mongodb/charts/common/.helmignore create mode 100755 mongodb/charts/common/Chart.yaml create mode 100755 mongodb/charts/common/README.md create mode 100755 mongodb/charts/common/templates/_affinities.tpl create mode 100755 mongodb/charts/common/templates/_capabilities.tpl create mode 100755 mongodb/charts/common/templates/_errors.tpl create mode 100755 mongodb/charts/common/templates/_images.tpl create mode 100755 mongodb/charts/common/templates/_labels.tpl create mode 100755 mongodb/charts/common/templates/_names.tpl create mode 100755 mongodb/charts/common/templates/_secrets.tpl create mode 100755 mongodb/charts/common/templates/_storage.tpl create mode 100755 mongodb/charts/common/templates/_tplvalues.tpl create mode 100755 mongodb/charts/common/templates/_utils.tpl create mode 100755 mongodb/charts/common/templates/_validations.tpl create mode 100755 mongodb/charts/common/templates/_warnings.tpl create mode 100755 mongodb/charts/common/values.yaml create mode 100644 mongodb/mongodb-values.yaml create mode 100755 mongodb/mongodb/.helmignore create mode 100755 mongodb/mongodb/Chart.yaml create mode 100755 mongodb/mongodb/README.md create mode 100755 mongodb/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100755 mongodb/mongodb/templates/NOTES.txt create mode 100755 mongodb/mongodb/templates/_helpers.tpl create mode 100755 mongodb/mongodb/templates/configmap.yaml create mode 100755 mongodb/mongodb/templates/deployment-standalone.yaml create mode 100755 mongodb/mongodb/templates/ingress.yaml create mode 100755 mongodb/mongodb/templates/initialization-configmap.yaml create mode 100755 mongodb/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100755 mongodb/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100755 mongodb/mongodb/templates/prometheus-alerting-rule.yaml create mode 100755 mongodb/mongodb/templates/prometheus-service-monitor.yaml create mode 100755 mongodb/mongodb/templates/pvc-standalone.yaml create mode 100755 mongodb/mongodb/templates/secrets.yaml create mode 100755 mongodb/mongodb/templates/serviceaccount.yml create mode 100755 mongodb/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100755 mongodb/mongodb/templates/statefulset-primary-rs.yaml create mode 100755 mongodb/mongodb/templates/statefulset-secondary-rs.yaml create mode 100755 mongodb/mongodb/templates/svc-headless-rs.yaml create mode 100755 mongodb/mongodb/templates/svc-primary-rs.yaml create mode 100755 mongodb/mongodb/templates/svc-standalone.yaml create mode 100755 mongodb/mongodb/values-production.yaml create mode 100755 mongodb/mongodb/values.schema.json create mode 100755 mongodb/mongodb/values.yaml create mode 100755 mongodb/requirements.lock create mode 100755 mongodb/requirements.yaml create mode 100755 mongodb/templates/NOTES.txt create mode 100755 mongodb/templates/_helpers.tpl create mode 100755 mongodb/templates/arbiter/configmap.yaml create mode 100755 mongodb/templates/arbiter/headless-svc.yaml create mode 100755 mongodb/templates/arbiter/pdb.yaml create mode 100755 mongodb/templates/arbiter/statefulset.yaml create mode 100755 mongodb/templates/configmap.yaml create mode 100755 mongodb/templates/initialization-configmap.yaml create mode 100755 mongodb/templates/metrics-svc.yaml create mode 100755 mongodb/templates/prometheusrule.yaml create mode 100755 mongodb/templates/replicaset/external-access-svc.yaml create mode 100755 mongodb/templates/replicaset/headless-svc.yaml create mode 100755 mongodb/templates/replicaset/pdb.yaml create mode 100755 mongodb/templates/replicaset/scripts-configmap.yaml create mode 100755 mongodb/templates/replicaset/statefulset.yaml create mode 100755 mongodb/templates/role.yaml create mode 100755 mongodb/templates/rolebinding.yaml create mode 100755 mongodb/templates/secrets.yaml create mode 100755 mongodb/templates/serviceaccount.yaml create mode 100755 mongodb/templates/servicemonitor.yaml create mode 100755 mongodb/templates/standalone/dep-sts.yaml create mode 100755 mongodb/templates/standalone/pvc.yaml create mode 100755 mongodb/templates/standalone/svc.yaml create mode 100755 mongodb/values-production.yaml create mode 100755 mongodb/values.schema.json create mode 100755 mongodb/values.yaml create mode 100644 qliksense/.helmignore create mode 100644 qliksense/Chart.yaml create mode 100644 qliksense/README.md create mode 100644 qliksense/charts/api-keys/.helmignore create mode 100644 qliksense/charts/api-keys/Chart.yaml create mode 100644 qliksense/charts/api-keys/README.md create mode 100644 qliksense/charts/api-keys/charts/messaging/.helmignore create mode 100644 qliksense/charts/api-keys/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/README.md create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/README.md create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/README.md create mode 100644 qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/README.md create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/api-keys/charts/messaging/templates/message-delivery-monitor-secret.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/api-keys/charts/messaging/values.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/.helmignore create mode 100644 qliksense/charts/api-keys/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/OWNERS create mode 100644 qliksense/charts/api-keys/charts/mongodb/README.md create mode 100644 qliksense/charts/api-keys/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/api-keys/charts/mongodb/values.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/.helmignore create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/Chart.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/README.md create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_certificates.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_chartref.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_configmap.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_container.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_deployment.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_envvar.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_fullname.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_hpa.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_image.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_ingress.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_initContainer.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata_annotations.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata_labels.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_name.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_networkpolicy.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_persistentvolumeclaim.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_persistentvolumeclaims.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_podSecurityPolicy.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_role.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_rolebinding.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_secret.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_service.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_serviceaccount.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_statefulset.yaml create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_transformers.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_util.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/templates/_volume.tpl create mode 100644 qliksense/charts/api-keys/charts/qlikcommon/values.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/.helmignore create mode 100644 qliksense/charts/api-keys/charts/redis/Chart.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/README.md create mode 100644 qliksense/charts/api-keys/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/api-keys/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/api-keys/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/values-production.yaml create mode 100644 qliksense/charts/api-keys/charts/redis/values.schema.json create mode 100644 qliksense/charts/api-keys/charts/redis/values.yaml create mode 100644 qliksense/charts/api-keys/requirements.yaml create mode 100644 qliksense/charts/api-keys/templates/manifest.yaml create mode 100644 qliksense/charts/api-keys/values.yaml create mode 100644 qliksense/charts/audit/.helmignore create mode 100644 qliksense/charts/audit/Chart.yaml create mode 100644 qliksense/charts/audit/README.md create mode 100644 qliksense/charts/audit/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/audit/charts/messaging/README.md create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/audit/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/audit/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/audit/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/audit/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/README.md create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/audit/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/audit/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/audit/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/audit/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/audit/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/audit/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/audit/charts/messaging/values.yaml create mode 100644 qliksense/charts/audit/charts/minio/.helmignore create mode 100644 qliksense/charts/audit/charts/minio/Chart.yaml create mode 100644 qliksense/charts/audit/charts/minio/README.md create mode 100644 qliksense/charts/audit/charts/minio/templates/NOTES.txt create mode 100644 qliksense/charts/audit/charts/minio/templates/_helper_create_bucket.txt create mode 100644 qliksense/charts/audit/charts/minio/templates/_helpers.tpl create mode 100644 qliksense/charts/audit/charts/minio/templates/configmap.yaml create mode 100644 qliksense/charts/audit/charts/minio/templates/deployment.yaml create mode 100644 qliksense/charts/audit/charts/minio/templates/ingress.yaml create mode 100644 qliksense/charts/audit/charts/minio/templates/networkpolicy.yaml create mode 100644 qliksense/charts/audit/charts/minio/templates/post-install-create-bucket-job.yaml create mode 100644 qliksense/charts/audit/charts/minio/templates/pvc.yaml create mode 100644 qliksense/charts/audit/charts/minio/templates/secrets.yaml create mode 100644 qliksense/charts/audit/charts/minio/templates/service.yaml create mode 100644 qliksense/charts/audit/charts/minio/templates/statefulset.yaml create mode 100644 qliksense/charts/audit/charts/minio/values.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/.helmignore create mode 100644 qliksense/charts/audit/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/OWNERS create mode 100644 qliksense/charts/audit/charts/mongodb/README.md create mode 100644 qliksense/charts/audit/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/audit/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/audit/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/audit/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/audit/charts/mongodb/values.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/.helmignore create mode 100644 qliksense/charts/audit/charts/qlikcommon/Chart.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/README.md create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_certificates.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_chartref.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_configmap.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_container.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_deployment.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_envvar.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_fullname.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_hpa.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_image.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_ingress.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_metadata.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_metadata_annotations.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_metadata_labels.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_name.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_networkpolicy.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_persistentvolumeclaim.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_persistentvolumeclaims.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_podSecurityPolicy.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_role.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_rolebinding.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_secret.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_service.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_serviceaccount.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_statefulset.yaml create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_transformers.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_util.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/templates/_volume.tpl create mode 100644 qliksense/charts/audit/charts/qlikcommon/values.yaml create mode 100644 qliksense/charts/audit/requirements.yaml create mode 100644 qliksense/charts/audit/templates/manifest.yaml create mode 100644 qliksense/charts/audit/values.yaml create mode 100644 qliksense/charts/chronos-worker/.helmignore create mode 100644 qliksense/charts/chronos-worker/Chart.yaml create mode 100644 qliksense/charts/chronos-worker/README.md create mode 100644 qliksense/charts/chronos-worker/charts/redis/.helmignore create mode 100644 qliksense/charts/chronos-worker/charts/redis/Chart.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/README.md create mode 100644 qliksense/charts/chronos-worker/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/values-production.yaml create mode 100644 qliksense/charts/chronos-worker/charts/redis/values.schema.json create mode 100644 qliksense/charts/chronos-worker/charts/redis/values.yaml create mode 100644 qliksense/charts/chronos-worker/requirements.yaml create mode 100644 qliksense/charts/chronos-worker/templates/NOTES.txt create mode 100644 qliksense/charts/chronos-worker/templates/_helpers.tpl create mode 100644 qliksense/charts/chronos-worker/templates/deny-external-egress-traffic.yaml create mode 100644 qliksense/charts/chronos-worker/templates/deployment.yaml create mode 100644 qliksense/charts/chronos-worker/templates/redis-secret.yaml create mode 100644 qliksense/charts/chronos-worker/templates/svc.yaml create mode 100644 qliksense/charts/chronos-worker/values.yaml create mode 100644 qliksense/charts/chronos/.helmignore create mode 100644 qliksense/charts/chronos/Chart.yaml create mode 100644 qliksense/charts/chronos/README.md create mode 100644 qliksense/charts/chronos/charts/mongodb/.helmignore create mode 100644 qliksense/charts/chronos/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/OWNERS create mode 100644 qliksense/charts/chronos/charts/mongodb/README.md create mode 100644 qliksense/charts/chronos/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/chronos/charts/mongodb/values.yaml create mode 100644 qliksense/charts/chronos/charts/redis/.helmignore create mode 100644 qliksense/charts/chronos/charts/redis/Chart.yaml create mode 100644 qliksense/charts/chronos/charts/redis/README.md create mode 100644 qliksense/charts/chronos/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/chronos/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/chronos/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/chronos/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/chronos/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/chronos/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/chronos/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/chronos/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/chronos/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/chronos/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/chronos/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/chronos/charts/redis/values-production.yaml create mode 100644 qliksense/charts/chronos/charts/redis/values.schema.json create mode 100644 qliksense/charts/chronos/charts/redis/values.yaml create mode 100644 qliksense/charts/chronos/requirements.yaml create mode 100644 qliksense/charts/chronos/templates/NOTES.txt create mode 100644 qliksense/charts/chronos/templates/_helpers.tpl create mode 100644 qliksense/charts/chronos/templates/deployment.yaml create mode 100644 qliksense/charts/chronos/templates/mongo-secret.yaml create mode 100644 qliksense/charts/chronos/templates/rbac.yaml create mode 100644 qliksense/charts/chronos/templates/redis-secret.yaml create mode 100644 qliksense/charts/chronos/templates/sa.yaml create mode 100644 qliksense/charts/chronos/templates/svc.yaml create mode 100644 qliksense/charts/chronos/values.yaml create mode 100644 qliksense/charts/collections/.helmignore create mode 100644 qliksense/charts/collections/Chart.yaml create mode 100644 qliksense/charts/collections/README.md create mode 100644 qliksense/charts/collections/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/collections/charts/messaging/README.md create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/collections/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/collections/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/collections/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/collections/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/README.md create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/collections/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/collections/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/collections/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/collections/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/collections/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/collections/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/collections/charts/messaging/values.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/.helmignore create mode 100644 qliksense/charts/collections/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/OWNERS create mode 100644 qliksense/charts/collections/charts/mongodb/README.md create mode 100644 qliksense/charts/collections/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/collections/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/collections/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/collections/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/collections/charts/mongodb/values.yaml create mode 100644 qliksense/charts/collections/requirements.yaml create mode 100644 qliksense/charts/collections/templates/_helper.tpl create mode 100644 qliksense/charts/collections/templates/deployment.yaml create mode 100644 qliksense/charts/collections/templates/hpa.yml create mode 100644 qliksense/charts/collections/templates/ingress.yaml create mode 100644 qliksense/charts/collections/templates/mongo-secret.yaml create mode 100644 qliksense/charts/collections/templates/service.yaml create mode 100644 qliksense/charts/collections/templates/token-secret.yaml create mode 100644 qliksense/charts/collections/values.yaml create mode 100644 qliksense/charts/data-connections/.helmignore create mode 100644 qliksense/charts/data-connections/Chart.yaml create mode 100644 qliksense/charts/data-connections/README.md create mode 100644 qliksense/charts/data-connections/charts/mongodb/.helmignore create mode 100644 qliksense/charts/data-connections/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/OWNERS create mode 100644 qliksense/charts/data-connections/charts/mongodb/README.md create mode 100644 qliksense/charts/data-connections/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/data-connections/charts/mongodb/values.yaml create mode 100644 qliksense/charts/data-connections/requirements.yaml create mode 100644 qliksense/charts/data-connections/templates/NOTES.txt create mode 100644 qliksense/charts/data-connections/templates/_helpers.tpl create mode 100644 qliksense/charts/data-connections/templates/deployment.yaml create mode 100644 qliksense/charts/data-connections/templates/encryption-secret.yaml create mode 100644 qliksense/charts/data-connections/templates/ingress.yaml create mode 100644 qliksense/charts/data-connections/templates/keys-secret.yaml create mode 100644 qliksense/charts/data-connections/templates/mongo-secret.yaml create mode 100644 qliksense/charts/data-connections/templates/service.yaml create mode 100644 qliksense/charts/data-connections/values.yaml create mode 100644 qliksense/charts/data-connector-common/Chart.yaml create mode 100644 qliksense/charts/data-connector-common/README.md create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/.helmignore create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/Chart.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/README.md create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_certificates.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_chartref.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_configmap.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_container.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_deployment.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_envvar.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_fullname.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_hpa.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_image.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_ingress.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_initContainer.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata_annotations.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata_labels.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_name.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_networkpolicy.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_persistentvolumeclaim.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_persistentvolumeclaims.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_podSecurityPolicy.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_role.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_rolebinding.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_secret.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_service.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_serviceaccount.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_statefulset.yaml create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_transformers.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_util.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/templates/_volume.tpl create mode 100644 qliksense/charts/data-connector-common/charts/qlikcommon/values.yaml create mode 100644 qliksense/charts/data-connector-common/requirements.yaml create mode 100644 qliksense/charts/data-connector-common/templates/manifest.yaml create mode 100644 qliksense/charts/data-connector-common/values.yaml create mode 100644 qliksense/charts/data-connector-nfs/.helmignore create mode 100644 qliksense/charts/data-connector-nfs/Chart.yaml create mode 100644 qliksense/charts/data-connector-nfs/README.md create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/.helmignore create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/Chart.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/README.md create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_certificates.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_chartref.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_configmap.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_container.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_deployment.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_envvar.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_fullname.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_hpa.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_image.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_ingress.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_initContainer.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata_annotations.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata_labels.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_name.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_networkpolicy.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_persistentvolumeclaim.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_persistentvolumeclaims.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_podSecurityPolicy.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_role.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_rolebinding.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_secret.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_service.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_serviceaccount.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_statefulset.yaml create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_transformers.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_util.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_volume.tpl create mode 100644 qliksense/charts/data-connector-nfs/charts/qlikcommon/values.yaml create mode 100644 qliksense/charts/data-connector-nfs/requirements.yaml create mode 100644 qliksense/charts/data-connector-nfs/templates/manifest.yaml create mode 100644 qliksense/charts/data-connector-nfs/values.yaml create mode 100644 qliksense/charts/data-connector-odbc/.helmignore create mode 100644 qliksense/charts/data-connector-odbc/Chart.yaml create mode 100644 qliksense/charts/data-connector-odbc/README.md create mode 100644 qliksense/charts/data-connector-odbc/templates/_helpers.tpl create mode 100644 qliksense/charts/data-connector-odbc/templates/deployment-cmd.yaml create mode 100644 qliksense/charts/data-connector-odbc/templates/deployment-rld.yaml create mode 100644 qliksense/charts/data-connector-odbc/templates/hpa-rld.yaml create mode 100644 qliksense/charts/data-connector-odbc/templates/network.yaml create mode 100644 qliksense/charts/data-connector-odbc/templates/service-cmd.yaml create mode 100644 qliksense/charts/data-connector-odbc/templates/service-rld.yaml create mode 100644 qliksense/charts/data-connector-odbc/values.yaml create mode 100644 qliksense/charts/data-connector-qwc/.helmignore create mode 100644 qliksense/charts/data-connector-qwc/Chart.yaml create mode 100644 qliksense/charts/data-connector-qwc/README.md create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/.helmignore create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/Chart.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/README.md create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/values-production.yaml create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/values.schema.json create mode 100644 qliksense/charts/data-connector-qwc/charts/redis/values.yaml create mode 100644 qliksense/charts/data-connector-qwc/requirements.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/_helpers.tpl create mode 100644 qliksense/charts/data-connector-qwc/templates/deployment-cmd.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/deployment-rld.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/deployment-web.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/hpa-rld.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/ingress.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/network.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/secrets-connector-cfg.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/secrets-keys.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/service-cmd.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/service-rld.yaml create mode 100644 qliksense/charts/data-connector-qwc/templates/service-web.yaml create mode 100644 qliksense/charts/data-connector-qwc/values.yaml create mode 100644 qliksense/charts/data-connector-rest/.helmignore create mode 100644 qliksense/charts/data-connector-rest/Chart.yaml create mode 100644 qliksense/charts/data-connector-rest/README.md create mode 100644 qliksense/charts/data-connector-rest/templates/_helpers.tpl create mode 100644 qliksense/charts/data-connector-rest/templates/deployment-cmd.yaml create mode 100644 qliksense/charts/data-connector-rest/templates/deployment-rld.yaml create mode 100644 qliksense/charts/data-connector-rest/templates/hpa-rld.yaml create mode 100644 qliksense/charts/data-connector-rest/templates/network.yaml create mode 100644 qliksense/charts/data-connector-rest/templates/service-cmd.yaml create mode 100644 qliksense/charts/data-connector-rest/templates/service-rld.yaml create mode 100644 qliksense/charts/data-connector-rest/values.yaml create mode 100644 qliksense/charts/data-connector-sap-sql/.helmignore create mode 100644 qliksense/charts/data-connector-sap-sql/Chart.yaml create mode 100644 qliksense/charts/data-connector-sap-sql/README.md create mode 100644 qliksense/charts/data-connector-sap-sql/templates/_helpers.tpl create mode 100644 qliksense/charts/data-connector-sap-sql/templates/deployment-cmd.yaml create mode 100644 qliksense/charts/data-connector-sap-sql/templates/deployment-rld.yaml create mode 100644 qliksense/charts/data-connector-sap-sql/templates/hpa-rld.yaml create mode 100644 qliksense/charts/data-connector-sap-sql/templates/network.yaml create mode 100644 qliksense/charts/data-connector-sap-sql/templates/service-cmd.yaml create mode 100644 qliksense/charts/data-connector-sap-sql/templates/service-rld.yaml create mode 100644 qliksense/charts/data-connector-sap-sql/values.yaml create mode 100644 qliksense/charts/data-connector-sfdc/.helmignore create mode 100644 qliksense/charts/data-connector-sfdc/Chart.yaml create mode 100644 qliksense/charts/data-connector-sfdc/README.md create mode 100644 qliksense/charts/data-connector-sfdc/templates/_helpers.tpl create mode 100644 qliksense/charts/data-connector-sfdc/templates/deployment-cmd.yaml create mode 100644 qliksense/charts/data-connector-sfdc/templates/deployment-rld.yaml create mode 100644 qliksense/charts/data-connector-sfdc/templates/hpa-rld.yaml create mode 100644 qliksense/charts/data-connector-sfdc/templates/network.yaml create mode 100644 qliksense/charts/data-connector-sfdc/templates/service-cmd.yaml create mode 100644 qliksense/charts/data-connector-sfdc/templates/service-rld.yaml create mode 100644 qliksense/charts/data-connector-sfdc/values.yaml create mode 100644 qliksense/charts/data-prep/.helmignore create mode 100644 qliksense/charts/data-prep/Chart.yaml create mode 100644 qliksense/charts/data-prep/README.md create mode 100644 qliksense/charts/data-prep/charts/redis/.helmignore create mode 100644 qliksense/charts/data-prep/charts/redis/Chart.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/README.md create mode 100644 qliksense/charts/data-prep/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/data-prep/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/data-prep/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/values-production.yaml create mode 100644 qliksense/charts/data-prep/charts/redis/values.schema.json create mode 100644 qliksense/charts/data-prep/charts/redis/values.yaml create mode 100644 qliksense/charts/data-prep/requirements.yaml create mode 100644 qliksense/charts/data-prep/templates/_helpers.tpl create mode 100644 qliksense/charts/data-prep/templates/deployment.yaml create mode 100644 qliksense/charts/data-prep/templates/hpa.yaml create mode 100644 qliksense/charts/data-prep/templates/ingress.yaml create mode 100644 qliksense/charts/data-prep/templates/pvc.yaml create mode 100644 qliksense/charts/data-prep/templates/redis-secret.yaml create mode 100644 qliksense/charts/data-prep/templates/service.yaml create mode 100644 qliksense/charts/data-prep/values.yaml create mode 100644 qliksense/charts/data-rest-source/.helmignore create mode 100644 qliksense/charts/data-rest-source/Chart.yaml create mode 100644 qliksense/charts/data-rest-source/README.md create mode 100644 qliksense/charts/data-rest-source/templates/_helpers.tpl create mode 100644 qliksense/charts/data-rest-source/templates/deployment.yaml create mode 100644 qliksense/charts/data-rest-source/templates/hpa.yaml create mode 100644 qliksense/charts/data-rest-source/templates/pre-stop-hook.yaml create mode 100644 qliksense/charts/data-rest-source/templates/service.yaml create mode 100644 qliksense/charts/data-rest-source/values.yaml create mode 100644 qliksense/charts/dcaas-web/.helmignore create mode 100644 qliksense/charts/dcaas-web/Chart.yaml create mode 100644 qliksense/charts/dcaas-web/README.md create mode 100644 qliksense/charts/dcaas-web/templates/_helpers.tpl create mode 100644 qliksense/charts/dcaas-web/templates/deployment.yaml create mode 100644 qliksense/charts/dcaas-web/templates/ingress.yaml create mode 100644 qliksense/charts/dcaas-web/templates/service.yaml create mode 100644 qliksense/charts/dcaas-web/values.yaml create mode 100644 qliksense/charts/dcaas/.helmignore create mode 100644 qliksense/charts/dcaas/Chart.yaml create mode 100644 qliksense/charts/dcaas/README.md create mode 100644 qliksense/charts/dcaas/charts/redis/.helmignore create mode 100644 qliksense/charts/dcaas/charts/redis/Chart.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/README.md create mode 100644 qliksense/charts/dcaas/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/dcaas/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/dcaas/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/values-production.yaml create mode 100644 qliksense/charts/dcaas/charts/redis/values.schema.json create mode 100644 qliksense/charts/dcaas/charts/redis/values.yaml create mode 100644 qliksense/charts/dcaas/requirements.yaml create mode 100644 qliksense/charts/dcaas/templates/_helpers.tpl create mode 100644 qliksense/charts/dcaas/templates/deployment.yaml create mode 100644 qliksense/charts/dcaas/templates/ingress.yaml create mode 100644 qliksense/charts/dcaas/templates/redis-secret.yaml create mode 100644 qliksense/charts/dcaas/templates/service.yaml create mode 100644 qliksense/charts/dcaas/values.yaml create mode 100644 qliksense/charts/edge-auth/.helmignore create mode 100644 qliksense/charts/edge-auth/Chart.yaml create mode 100644 qliksense/charts/edge-auth/README.md create mode 100644 qliksense/charts/edge-auth/charts/messaging/.helmignore create mode 100644 qliksense/charts/edge-auth/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/README.md create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/README.md create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/README.md create mode 100644 qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/README.md create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/edge-auth/charts/messaging/templates/message-delivery-monitor-secret.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/edge-auth/charts/messaging/values.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/.helmignore create mode 100644 qliksense/charts/edge-auth/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/OWNERS create mode 100644 qliksense/charts/edge-auth/charts/mongodb/README.md create mode 100644 qliksense/charts/edge-auth/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/edge-auth/charts/mongodb/values.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/.helmignore create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/README.md create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/NOTES.txt create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/_helpers.tpl create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/clusterrole.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/clusterrolebinding.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-daemonset.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-deployment.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-hpa.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-metrics-service.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-poddisruptionbudget.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-service.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-stats-service.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-deployment.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-service.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/headers-configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/role.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/rolebinding.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/serviceaccount.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/tcp-configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/templates/udp-configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/nginx-ingress/values.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/.helmignore create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/README.md create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_certificates.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_chartref.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_container.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_deployment.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_envvar.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_fullname.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_hpa.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_image.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_ingress.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_initContainer.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata_annotations.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata_labels.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_name.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_networkpolicy.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_networkpolicy.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_persistentvolumeclaim.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_persistentvolumeclaims.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_podSecurityPolicy.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_role.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_rolebinding.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_secret.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_service.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_serviceaccount.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_statefulset.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_storageclass.yaml create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_transformers.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_util.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/templates/_volume.tpl create mode 100644 qliksense/charts/edge-auth/charts/qlikcommon/values.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/.helmignore create mode 100644 qliksense/charts/edge-auth/charts/redis/Chart.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/README.md create mode 100644 qliksense/charts/edge-auth/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/values-production.yaml create mode 100644 qliksense/charts/edge-auth/charts/redis/values.schema.json create mode 100644 qliksense/charts/edge-auth/charts/redis/values.yaml create mode 100644 qliksense/charts/edge-auth/requirements.yaml create mode 100644 qliksense/charts/edge-auth/templates/manifest.yaml create mode 100644 qliksense/charts/edge-auth/values.yaml create mode 100644 qliksense/charts/elastic-infra/.helmignore create mode 100644 qliksense/charts/elastic-infra/Chart.yaml create mode 100644 qliksense/charts/elastic-infra/README.md create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/.helmignore create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/OWNERS create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/README.md create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/elastic-infra/charts/mongodb/values.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/.helmignore create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/Chart.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/README.md create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/NOTES.txt create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/_helpers.tpl create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/addheaders-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/role.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/validating-webhook.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/clusterrole.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/clusterrolebinding.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-daemonset.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-deployment.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-hpa.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-metrics-service.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-poddisruptionbudget.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-prometheusrules.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-psp.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-role.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-rolebinding.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-service.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-serviceaccount.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-servicemonitor.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-webhook-service.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-deployment.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-psp.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-role.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-rolebinding.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-service.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-serviceaccount.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/proxyheaders-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/tcp-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/templates/udp-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/charts/nginx-ingress/values.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/.helmignore create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/Chart.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/README.md create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/NOTES.txt create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/_helpers.tpl create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/addheaders-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/role.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/validating-webhook.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/clusterrole.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/clusterrolebinding.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-daemonset.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-deployment.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-hpa.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-metrics-service.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-poddisruptionbudget.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-prometheusrules.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-psp.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-role.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-rolebinding.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-service.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-serviceaccount.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-servicemonitor.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/controller-webhook-service.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-deployment.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-psp.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-role.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-rolebinding.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-service.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-serviceaccount.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/proxyheaders-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/tcp-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/templates/udp-configmap.yaml create mode 100644 qliksense/charts/elastic-infra/nginx-ingress/values.yaml create mode 100644 qliksense/charts/elastic-infra/requirements.yaml create mode 100644 qliksense/charts/elastic-infra/templates/_helper.tpl create mode 100644 qliksense/charts/elastic-infra/templates/ingress.yaml create mode 100644 qliksense/charts/elastic-infra/templates/tls-secret.yaml create mode 100644 qliksense/charts/elastic-infra/templates/tlscert.yaml create mode 100644 qliksense/charts/elastic-infra/values.yaml create mode 100644 qliksense/charts/encryption/.helmignore create mode 100644 qliksense/charts/encryption/Chart.yaml create mode 100644 qliksense/charts/encryption/README.md create mode 100644 qliksense/charts/encryption/templates/_helpers.tpl create mode 100644 qliksense/charts/encryption/templates/deployment.yaml create mode 100644 qliksense/charts/encryption/templates/service.yaml create mode 100644 qliksense/charts/encryption/templates/serviceaccount.yaml create mode 100644 qliksense/charts/encryption/templates/token-secret.yaml create mode 100644 qliksense/charts/encryption/values.yaml create mode 100644 qliksense/charts/engine/.helmignore create mode 100644 qliksense/charts/engine/Chart.yaml create mode 100644 qliksense/charts/engine/README.md create mode 100644 qliksense/charts/engine/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/engine/charts/messaging/README.md create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/engine/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/engine/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/engine/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/engine/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/README.md create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/engine/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/engine/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/engine/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/engine/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/engine/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/engine/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/engine/charts/messaging/values.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/Chart.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/OWNERS create mode 100644 qliksense/charts/engine/charts/redis-ha/README.md create mode 100644 qliksense/charts/engine/charts/redis-ha/ci/haproxy-enabled-values.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/NOTES.txt create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/_configs.tpl create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/_helpers.tpl create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-auth-secret.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-ha-announce-service.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-ha-configmap.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-ha-exporter-script-configmap.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-ha-pdb.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-ha-role.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-ha-rolebinding.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-ha-service.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-ha-serviceaccount.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-ha-servicemonitor.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-ha-statefulset.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-haproxy-deployment.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-haproxy-service.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-haproxy-serviceaccount.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/redis-haproxy-servicemonitor.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/tests/test-redis-ha-configmap.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/templates/tests/test-redis-ha-pod.yaml create mode 100644 qliksense/charts/engine/charts/redis-ha/values.yaml create mode 100644 qliksense/charts/engine/charts/redis/.helmignore create mode 100644 qliksense/charts/engine/charts/redis/Chart.yaml create mode 100644 qliksense/charts/engine/charts/redis/README.md create mode 100644 qliksense/charts/engine/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/engine/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/engine/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/engine/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/engine/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/engine/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/engine/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/engine/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/engine/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/engine/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/engine/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/engine/charts/redis/values-production.yaml create mode 100644 qliksense/charts/engine/charts/redis/values.schema.json create mode 100644 qliksense/charts/engine/charts/redis/values.yaml create mode 100644 qliksense/charts/engine/requirements.yaml create mode 100644 qliksense/charts/engine/templates/NOTES.txt create mode 100644 qliksense/charts/engine/templates/_helpers.tpl create mode 100644 qliksense/charts/engine/templates/deployment-args.yaml create mode 100644 qliksense/charts/engine/templates/deployment-stateless.yaml create mode 100644 qliksense/charts/engine/templates/deployments.yaml create mode 100644 qliksense/charts/engine/templates/engine-reload.yaml create mode 100644 qliksense/charts/engine/templates/engine-template.yaml create mode 100644 qliksense/charts/engine/templates/engine-variants.yaml create mode 100644 qliksense/charts/engine/templates/engines.yaml create mode 100644 qliksense/charts/engine/templates/hpa-stateless.yaml create mode 100644 qliksense/charts/engine/templates/hpa.yaml create mode 100644 qliksense/charts/engine/templates/ingress.yaml create mode 100644 qliksense/charts/engine/templates/networkpolicy.yaml create mode 100644 qliksense/charts/engine/templates/pre-stop-hook-cm.yaml create mode 100644 qliksense/charts/engine/templates/pvc.yaml create mode 100644 qliksense/charts/engine/templates/rules-cm.yaml create mode 100644 qliksense/charts/engine/templates/sc.yaml create mode 100644 qliksense/charts/engine/templates/secret-jwt.yaml create mode 100644 qliksense/charts/engine/templates/secret.yaml create mode 100644 qliksense/charts/engine/templates/service.yaml create mode 100644 qliksense/charts/engine/values.yaml create mode 100644 qliksense/charts/eventing/.helmignore create mode 100644 qliksense/charts/eventing/Chart.yaml create mode 100644 qliksense/charts/eventing/README.md create mode 100644 qliksense/charts/eventing/charts/messaging/.helmignore create mode 100644 qliksense/charts/eventing/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/README.md create mode 100644 qliksense/charts/eventing/charts/messaging/charts/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/message-delivery-monitor/README.md create mode 100644 qliksense/charts/eventing/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/eventing/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/message-delivery-monitor/README.md create mode 100644 qliksense/charts/eventing/charts/messaging/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/eventing/charts/messaging/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/eventing/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/eventing/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/eventing/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/README.md create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/eventing/charts/messaging/templates/message-delivery-monitor-secret.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/eventing/charts/messaging/values.yaml create mode 100644 qliksense/charts/eventing/requirements.yaml create mode 100644 qliksense/charts/eventing/templates/_helpers.tpl create mode 100644 qliksense/charts/eventing/templates/deployment.yaml create mode 100644 qliksense/charts/eventing/templates/ingress.yaml create mode 100644 qliksense/charts/eventing/templates/secret.yaml create mode 100644 qliksense/charts/eventing/templates/service.yaml create mode 100644 qliksense/charts/eventing/values.yaml create mode 100644 qliksense/charts/feature-flags/.helmignore create mode 100644 qliksense/charts/feature-flags/Chart.yaml create mode 100644 qliksense/charts/feature-flags/README.md create mode 100644 qliksense/charts/feature-flags/templates/_helpers.tpl create mode 100644 qliksense/charts/feature-flags/templates/configmap.yaml create mode 100644 qliksense/charts/feature-flags/templates/deployment.yaml create mode 100644 qliksense/charts/feature-flags/templates/hpa.yaml create mode 100644 qliksense/charts/feature-flags/templates/ingress.yaml create mode 100644 qliksense/charts/feature-flags/templates/service.yaml create mode 100644 qliksense/charts/feature-flags/values.yaml create mode 100644 qliksense/charts/generic-links/Chart.yaml create mode 100644 qliksense/charts/generic-links/README.md create mode 100644 qliksense/charts/generic-links/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/README.md create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/generic-links/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/generic-links/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/generic-links/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/README.md create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/generic-links/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/generic-links/charts/messaging/values.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/.helmignore create mode 100644 qliksense/charts/generic-links/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/OWNERS create mode 100644 qliksense/charts/generic-links/charts/mongodb/README.md create mode 100644 qliksense/charts/generic-links/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/generic-links/charts/mongodb/values.yaml create mode 100644 qliksense/charts/generic-links/requirements.yaml create mode 100644 qliksense/charts/generic-links/templates/_helper.tpl create mode 100644 qliksense/charts/generic-links/templates/deployment.yaml create mode 100644 qliksense/charts/generic-links/templates/ingress.yml create mode 100644 qliksense/charts/generic-links/templates/mongo-secret.yaml create mode 100644 qliksense/charts/generic-links/templates/service.yaml create mode 100644 qliksense/charts/generic-links/templates/token-secret.yaml create mode 100644 qliksense/charts/generic-links/templates/webrisk.yaml create mode 100644 qliksense/charts/generic-links/values.yaml create mode 100644 qliksense/charts/geo-operations/.helmignore create mode 100644 qliksense/charts/geo-operations/Chart.yaml create mode 100644 qliksense/charts/geo-operations/README.md create mode 100644 qliksense/charts/geo-operations/templates/_helpers.tpl create mode 100644 qliksense/charts/geo-operations/templates/deployment.yaml create mode 100644 qliksense/charts/geo-operations/templates/hpa.yaml create mode 100644 qliksense/charts/geo-operations/templates/network.yaml create mode 100644 qliksense/charts/geo-operations/templates/service.yaml create mode 100644 qliksense/charts/geo-operations/values.yaml create mode 100644 qliksense/charts/groups/.helmignore create mode 100644 qliksense/charts/groups/Chart.yaml create mode 100644 qliksense/charts/groups/README.md create mode 100644 qliksense/charts/groups/charts/mongodb/.helmignore create mode 100644 qliksense/charts/groups/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/OWNERS create mode 100644 qliksense/charts/groups/charts/mongodb/README.md create mode 100644 qliksense/charts/groups/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/groups/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/groups/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/groups/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/groups/charts/mongodb/values.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/.helmignore create mode 100644 qliksense/charts/groups/charts/qlikcommon/Chart.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/README.md create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_certificates.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_chartref.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_configmap.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_container.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_deployment.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_envvar.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_fullname.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_hpa.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_image.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_ingress.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_initContainer.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_metadata.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_metadata_annotations.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_metadata_labels.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_name.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_networkpolicy.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_persistentvolumeclaim.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_persistentvolumeclaims.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_podSecurityPolicy.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_role.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_rolebinding.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_secret.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_service.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_serviceaccount.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_statefulset.yaml create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_transformers.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_util.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/templates/_volume.tpl create mode 100644 qliksense/charts/groups/charts/qlikcommon/values.yaml create mode 100644 qliksense/charts/groups/charts/redis/.helmignore create mode 100644 qliksense/charts/groups/charts/redis/Chart.yaml create mode 100644 qliksense/charts/groups/charts/redis/README.md create mode 100644 qliksense/charts/groups/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/groups/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/groups/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/groups/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/groups/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/groups/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/groups/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/groups/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/groups/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/groups/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/groups/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/groups/charts/redis/values-production.yaml create mode 100644 qliksense/charts/groups/charts/redis/values.schema.json create mode 100644 qliksense/charts/groups/charts/redis/values.yaml create mode 100644 qliksense/charts/groups/requirements.yaml create mode 100644 qliksense/charts/groups/templates/manifest.yaml create mode 100644 qliksense/charts/groups/values.yaml create mode 100644 qliksense/charts/hub/.helmignore create mode 100644 qliksense/charts/hub/Chart.yaml create mode 100644 qliksense/charts/hub/README.md create mode 100644 qliksense/charts/hub/templates/_helper.tpl create mode 100644 qliksense/charts/hub/templates/deployment.yaml create mode 100644 qliksense/charts/hub/templates/ingress.yaml create mode 100644 qliksense/charts/hub/templates/service.yaml create mode 100644 qliksense/charts/hub/values.yaml create mode 100644 qliksense/charts/identity-providers/.helmignore create mode 100644 qliksense/charts/identity-providers/Chart.yaml create mode 100644 qliksense/charts/identity-providers/README.md create mode 100644 qliksense/charts/identity-providers/charts/mongodb/.helmignore create mode 100644 qliksense/charts/identity-providers/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/OWNERS create mode 100644 qliksense/charts/identity-providers/charts/mongodb/README.md create mode 100644 qliksense/charts/identity-providers/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/identity-providers/charts/mongodb/values.yaml create mode 100644 qliksense/charts/identity-providers/requirements.yaml create mode 100644 qliksense/charts/identity-providers/templates/_helpers.tpl create mode 100644 qliksense/charts/identity-providers/templates/deployment.yaml create mode 100644 qliksense/charts/identity-providers/templates/ext-deployment.yaml create mode 100644 qliksense/charts/identity-providers/templates/ext-hpa.yaml create mode 100644 qliksense/charts/identity-providers/templates/ext-ingress.yaml create mode 100644 qliksense/charts/identity-providers/templates/ext-mongo-secret.yaml create mode 100644 qliksense/charts/identity-providers/templates/ext-secret.yaml create mode 100644 qliksense/charts/identity-providers/templates/ext-service.yaml create mode 100644 qliksense/charts/identity-providers/templates/hpa.yaml create mode 100644 qliksense/charts/identity-providers/templates/secret.yaml create mode 100644 qliksense/charts/identity-providers/templates/service.yaml create mode 100644 qliksense/charts/identity-providers/values.yaml create mode 100644 qliksense/charts/insights/.helmignore create mode 100644 qliksense/charts/insights/Chart.yaml create mode 100644 qliksense/charts/insights/README.md create mode 100644 qliksense/charts/insights/charts/mongodb/.helmignore create mode 100644 qliksense/charts/insights/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/OWNERS create mode 100644 qliksense/charts/insights/charts/mongodb/README.md create mode 100644 qliksense/charts/insights/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/insights/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/insights/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/insights/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/insights/charts/mongodb/values.yaml create mode 100644 qliksense/charts/insights/requirements.yaml create mode 100644 qliksense/charts/insights/templates/NOTES.txt create mode 100644 qliksense/charts/insights/templates/_helpers.tpl create mode 100644 qliksense/charts/insights/templates/deployment.yaml create mode 100644 qliksense/charts/insights/templates/ingress.yaml create mode 100644 qliksense/charts/insights/templates/insights.yaml create mode 100644 qliksense/charts/insights/templates/mongo-secret.yaml create mode 100644 qliksense/charts/insights/templates/prune-graph-cronjob.yaml create mode 100644 qliksense/charts/insights/values.yaml create mode 100644 qliksense/charts/keys/.helmignore create mode 100644 qliksense/charts/keys/Chart.yaml create mode 100644 qliksense/charts/keys/README.md create mode 100644 qliksense/charts/keys/templates/_helpers.tpl create mode 100644 qliksense/charts/keys/templates/configmap.yaml create mode 100644 qliksense/charts/keys/templates/deployment.yaml create mode 100644 qliksense/charts/keys/templates/hpa.yaml create mode 100644 qliksense/charts/keys/templates/service.yaml create mode 100644 qliksense/charts/keys/values.yaml create mode 100644 qliksense/charts/licenses/.helmignore create mode 100644 qliksense/charts/licenses/Chart.yaml create mode 100644 qliksense/charts/licenses/README.md create mode 100644 qliksense/charts/licenses/charts/mongodb/.helmignore create mode 100644 qliksense/charts/licenses/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/OWNERS create mode 100644 qliksense/charts/licenses/charts/mongodb/README.md create mode 100644 qliksense/charts/licenses/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/licenses/charts/mongodb/values.yaml create mode 100644 qliksense/charts/licenses/requirements.yaml create mode 100644 qliksense/charts/licenses/templates/_helper.tpl create mode 100644 qliksense/charts/licenses/templates/deployment.yaml create mode 100644 qliksense/charts/licenses/templates/hpa.yaml create mode 100644 qliksense/charts/licenses/templates/ingress.yaml create mode 100644 qliksense/charts/licenses/templates/mongo-secret.yaml create mode 100644 qliksense/charts/licenses/templates/proxy-password-secret.yaml create mode 100644 qliksense/charts/licenses/templates/rbac.yaml create mode 100644 qliksense/charts/licenses/templates/rollbar-secret.yaml create mode 100644 qliksense/charts/licenses/templates/service.yaml create mode 100644 qliksense/charts/licenses/templates/serviceaccount.yaml create mode 100644 qliksense/charts/licenses/values.yaml create mode 100644 qliksense/charts/locale/.helmignore create mode 100644 qliksense/charts/locale/Chart.yaml create mode 100644 qliksense/charts/locale/README.md create mode 100644 qliksense/charts/locale/templates/_helper.tpl create mode 100644 qliksense/charts/locale/templates/deployment.yaml create mode 100644 qliksense/charts/locale/templates/ingress.yaml create mode 100644 qliksense/charts/locale/templates/service.yaml create mode 100644 qliksense/charts/locale/values.yaml create mode 100644 qliksense/charts/management-console/.helmignore create mode 100644 qliksense/charts/management-console/Chart.yaml create mode 100644 qliksense/charts/management-console/README.md create mode 100644 qliksense/charts/management-console/templates/NOTES.txt create mode 100644 qliksense/charts/management-console/templates/_helper.tpl create mode 100644 qliksense/charts/management-console/templates/deployment.yaml create mode 100644 qliksense/charts/management-console/templates/ingress.yaml create mode 100644 qliksense/charts/management-console/templates/service.yaml create mode 100644 qliksense/charts/management-console/values.yaml create mode 100644 qliksense/charts/messaging/.helmignore create mode 100644 qliksense/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/messaging/README.md create mode 100644 qliksense/charts/messaging/charts/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/messaging/charts/message-delivery-monitor/README.md create mode 100644 qliksense/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/messaging/charts/message-delivery-monitor/templates/message-delivery-monitor-secret.yaml create mode 100644 qliksense/charts/messaging/charts/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/messaging/charts/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/messaging/charts/nats-streaming/.minikube_ignore create mode 100644 qliksense/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/messaging/charts/nats-streaming/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/messaging/charts/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/messaging/charts/nats/templates/nats-secret.yaml create mode 100644 qliksense/charts/messaging/charts/nats/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/messaging/values.yaml create mode 100644 qliksense/charts/mongodb/.helmignore create mode 100644 qliksense/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/mongodb/OWNERS create mode 100644 qliksense/charts/mongodb/README.md create mode 100644 qliksense/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/mongodb/values.yaml create mode 100644 qliksense/charts/nl-broker/.helmignore create mode 100644 qliksense/charts/nl-broker/Chart.yaml create mode 100644 qliksense/charts/nl-broker/README.md create mode 100644 qliksense/charts/nl-broker/charts/mongodb/.helmignore create mode 100644 qliksense/charts/nl-broker/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/OWNERS create mode 100644 qliksense/charts/nl-broker/charts/mongodb/README.md create mode 100644 qliksense/charts/nl-broker/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/nl-broker/charts/mongodb/values.yaml create mode 100644 qliksense/charts/nl-broker/requirements.yaml create mode 100644 qliksense/charts/nl-broker/templates/_helper.tpl create mode 100644 qliksense/charts/nl-broker/templates/deployment.yaml create mode 100644 qliksense/charts/nl-broker/templates/ingress.yaml create mode 100644 qliksense/charts/nl-broker/templates/mongo-secret.yaml create mode 100644 qliksense/charts/nl-broker/templates/service.yaml create mode 100644 qliksense/charts/nl-broker/values.yaml create mode 100644 qliksense/charts/nl-parser/.helmignore create mode 100644 qliksense/charts/nl-parser/Chart.yaml create mode 100644 qliksense/charts/nl-parser/README.md create mode 100644 qliksense/charts/nl-parser/templates/_helper.tpl create mode 100644 qliksense/charts/nl-parser/templates/deployment.yaml create mode 100644 qliksense/charts/nl-parser/templates/ingress.yaml create mode 100644 qliksense/charts/nl-parser/templates/service.yaml create mode 100644 qliksense/charts/nl-parser/values.yaml create mode 100644 qliksense/charts/notification-prep/Chart.yaml create mode 100644 qliksense/charts/notification-prep/README.md create mode 100644 qliksense/charts/notification-prep/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/README.md create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/README.md create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/notification-prep/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/notification-prep/charts/messaging/values.yaml create mode 100644 qliksense/charts/notification-prep/requirements.yaml create mode 100644 qliksense/charts/notification-prep/templates/_helper.tpl create mode 100644 qliksense/charts/notification-prep/templates/deployment.yaml create mode 100644 qliksense/charts/notification-prep/templates/hpa.yaml create mode 100644 qliksense/charts/notification-prep/templates/ingress.yaml create mode 100644 qliksense/charts/notification-prep/templates/secret.yaml create mode 100644 qliksense/charts/notification-prep/templates/service.yaml create mode 100644 qliksense/charts/notification-prep/values.yaml create mode 100644 qliksense/charts/odag/.helmignore create mode 100644 qliksense/charts/odag/Chart.yaml create mode 100644 qliksense/charts/odag/README.md create mode 100644 qliksense/charts/odag/charts/mongodb/.helmignore create mode 100644 qliksense/charts/odag/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/OWNERS create mode 100644 qliksense/charts/odag/charts/mongodb/README.md create mode 100644 qliksense/charts/odag/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/odag/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/odag/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/odag/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/odag/charts/mongodb/values.yaml create mode 100644 qliksense/charts/odag/requirements.yaml create mode 100644 qliksense/charts/odag/templates/NOTES.txt create mode 100644 qliksense/charts/odag/templates/_helpers.tpl create mode 100644 qliksense/charts/odag/templates/deployment.yaml create mode 100644 qliksense/charts/odag/templates/ingress.yaml create mode 100644 qliksense/charts/odag/templates/mongo-secret.yaml create mode 100644 qliksense/charts/odag/templates/rbac.yaml create mode 100644 qliksense/charts/odag/templates/sa.yaml create mode 100644 qliksense/charts/odag/templates/service.yaml create mode 100644 qliksense/charts/odag/values.yaml create mode 100644 qliksense/charts/policy-decisions/.helmignore create mode 100644 qliksense/charts/policy-decisions/Chart.yaml create mode 100644 qliksense/charts/policy-decisions/README.md create mode 100644 qliksense/charts/policy-decisions/charts/messaging/.helmignore create mode 100644 qliksense/charts/policy-decisions/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/README.md create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/message-delivery-monitor/README.md create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/message-delivery-monitor/README.md create mode 100644 qliksense/charts/policy-decisions/charts/messaging/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/policy-decisions/charts/messaging/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/README.md create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/policy-decisions/charts/messaging/templates/message-delivery-monitor-secret.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/policy-decisions/charts/messaging/values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/.helmignore create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/OWNERS create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/README.md create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/policy-decisions/charts/mongodb/values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/.helmignore create mode 100644 qliksense/charts/policy-decisions/charts/redis/Chart.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/README.md create mode 100644 qliksense/charts/policy-decisions/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/values-production.yaml create mode 100644 qliksense/charts/policy-decisions/charts/redis/values.schema.json create mode 100644 qliksense/charts/policy-decisions/charts/redis/values.yaml create mode 100644 qliksense/charts/policy-decisions/requirements.yaml create mode 100644 qliksense/charts/policy-decisions/templates/_helper.tpl create mode 100644 qliksense/charts/policy-decisions/templates/configmap.yaml create mode 100644 qliksense/charts/policy-decisions/templates/deployment.yaml create mode 100644 qliksense/charts/policy-decisions/templates/hpa.yml create mode 100644 qliksense/charts/policy-decisions/templates/ingress.yaml create mode 100644 qliksense/charts/policy-decisions/templates/mongo-secret.yaml create mode 100644 qliksense/charts/policy-decisions/templates/redis-secret.yaml create mode 100644 qliksense/charts/policy-decisions/templates/service.yaml create mode 100644 qliksense/charts/policy-decisions/templates/token-secret.yaml create mode 100644 qliksense/charts/policy-decisions/values.yaml create mode 100644 qliksense/charts/precedents/.helmignore create mode 100644 qliksense/charts/precedents/Chart.yaml create mode 100644 qliksense/charts/precedents/README.md create mode 100644 qliksense/charts/precedents/charts/mongodb/.helmignore create mode 100644 qliksense/charts/precedents/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/OWNERS create mode 100644 qliksense/charts/precedents/charts/mongodb/README.md create mode 100644 qliksense/charts/precedents/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/precedents/charts/mongodb/values.yaml create mode 100644 qliksense/charts/precedents/requirements.yaml create mode 100644 qliksense/charts/precedents/templates/_helpers.tpl create mode 100644 qliksense/charts/precedents/templates/deployment.yaml create mode 100644 qliksense/charts/precedents/templates/hpa.yaml create mode 100644 qliksense/charts/precedents/templates/ingress.yaml create mode 100644 qliksense/charts/precedents/templates/mongo-secret.yaml create mode 100644 qliksense/charts/precedents/templates/private-key-secret.yaml create mode 100644 qliksense/charts/precedents/templates/prune-graph-cronjob.yaml create mode 100644 qliksense/charts/precedents/templates/service-cayley.yaml create mode 100644 qliksense/charts/precedents/templates/service.yaml create mode 100644 qliksense/charts/precedents/values.yaml create mode 100644 qliksense/charts/qix-data-connection/.helmignore create mode 100644 qliksense/charts/qix-data-connection/Chart.yaml create mode 100644 qliksense/charts/qix-data-connection/README.md create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/.helmignore create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/OWNERS create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/README.md create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/qix-data-connection/charts/mongodb/values.yaml create mode 100644 qliksense/charts/qix-data-connection/requirements.yaml create mode 100644 qliksense/charts/qix-data-connection/templates/NOTES.txt create mode 100644 qliksense/charts/qix-data-connection/templates/_helpers.tpl create mode 100644 qliksense/charts/qix-data-connection/templates/deployment.yaml create mode 100644 qliksense/charts/qix-data-connection/templates/ingress.yaml create mode 100644 qliksense/charts/qix-data-connection/templates/keys-secret.yaml create mode 100644 qliksense/charts/qix-data-connection/templates/mongo-secret.yaml create mode 100644 qliksense/charts/qix-data-connection/templates/service.yaml create mode 100644 qliksense/charts/qix-data-connection/values.yaml create mode 100644 qliksense/charts/qix-datafiles/.helmignore create mode 100644 qliksense/charts/qix-datafiles/Chart.yaml create mode 100644 qliksense/charts/qix-datafiles/README.md create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/.helmignore create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/OWNERS create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/README.md create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/qix-datafiles/charts/mongodb/values.yaml create mode 100644 qliksense/charts/qix-datafiles/requirements.yaml create mode 100644 qliksense/charts/qix-datafiles/templates/_helpers.tpl create mode 100644 qliksense/charts/qix-datafiles/templates/deployment.yaml create mode 100644 qliksense/charts/qix-datafiles/templates/hpa.yaml create mode 100644 qliksense/charts/qix-datafiles/templates/ingress.yaml create mode 100644 qliksense/charts/qix-datafiles/templates/mongo-secret.yaml create mode 100644 qliksense/charts/qix-datafiles/templates/pvc.yaml create mode 100644 qliksense/charts/qix-datafiles/templates/sc.yaml create mode 100644 qliksense/charts/qix-datafiles/templates/service.yaml create mode 100644 qliksense/charts/qix-datafiles/templates/token-secret.yaml create mode 100644 qliksense/charts/qix-datafiles/values.yaml create mode 100644 qliksense/charts/qix-sessions/.helmignore create mode 100644 qliksense/charts/qix-sessions/Chart.yaml create mode 100644 qliksense/charts/qix-sessions/README.md create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/.helmignore create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/OWNERS create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/README.md create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/qix-sessions/charts/mongodb/values.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/.helmignore create mode 100644 qliksense/charts/qix-sessions/charts/redis/Chart.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/README.md create mode 100644 qliksense/charts/qix-sessions/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/values-production.yaml create mode 100644 qliksense/charts/qix-sessions/charts/redis/values.schema.json create mode 100644 qliksense/charts/qix-sessions/charts/redis/values.yaml create mode 100644 qliksense/charts/qix-sessions/requirements.yaml create mode 100644 qliksense/charts/qix-sessions/templates/_helper.tpl create mode 100644 qliksense/charts/qix-sessions/templates/app-secrets.yaml create mode 100644 qliksense/charts/qix-sessions/templates/configmaps.yaml create mode 100644 qliksense/charts/qix-sessions/templates/crd.yaml create mode 100644 qliksense/charts/qix-sessions/templates/deployment.yaml create mode 100644 qliksense/charts/qix-sessions/templates/env-variables.yaml create mode 100644 qliksense/charts/qix-sessions/templates/hpa.yaml create mode 100644 qliksense/charts/qix-sessions/templates/ingress.yaml create mode 100644 qliksense/charts/qix-sessions/templates/mongo-secret.yaml create mode 100644 qliksense/charts/qix-sessions/templates/rbac.yaml create mode 100644 qliksense/charts/qix-sessions/templates/redis-secret.yaml create mode 100644 qliksense/charts/qix-sessions/templates/sa.yaml create mode 100644 qliksense/charts/qix-sessions/templates/service.yaml create mode 100644 qliksense/charts/qix-sessions/values.yaml create mode 100644 qliksense/charts/qlikcommon/.helmignore create mode 100644 qliksense/charts/qlikcommon/Chart.yaml create mode 100644 qliksense/charts/qlikcommon/README.md create mode 100644 qliksense/charts/qlikcommon/templates/_certificates.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_chartref.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_configmap.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_container.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_deployment.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_envvar.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_fullname.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_hpa.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_image.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_ingress.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_initContainer.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_metadata.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_metadata_annotations.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_metadata_labels.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_name.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_networkpolicy.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_networkpolicy.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_persistentvolumeclaim.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_persistentvolumeclaims.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_podSecurityPolicy.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_role.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_rolebinding.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_secret.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_service.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_serviceaccount.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_statefulset.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_storageclass.yaml create mode 100644 qliksense/charts/qlikcommon/templates/_transformers.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_util.tpl create mode 100644 qliksense/charts/qlikcommon/templates/_volume.tpl create mode 100644 qliksense/charts/qlikcommon/values.yaml create mode 100644 qliksense/charts/qlikview-client/.helmignore create mode 100644 qliksense/charts/qlikview-client/Chart.yaml create mode 100644 qliksense/charts/qlikview-client/README.md create mode 100644 qliksense/charts/qlikview-client/templates/NOTES.txt create mode 100644 qliksense/charts/qlikview-client/templates/_helper.tpl create mode 100644 qliksense/charts/qlikview-client/templates/deployment.yaml create mode 100644 qliksense/charts/qlikview-client/templates/ingress.yaml create mode 100644 qliksense/charts/qlikview-client/templates/service.yaml create mode 100644 qliksense/charts/qlikview-client/values.yaml create mode 100644 qliksense/charts/quotas/.helmignore create mode 100644 qliksense/charts/quotas/Chart.yaml create mode 100644 qliksense/charts/quotas/README.md create mode 100644 qliksense/charts/quotas/templates/_helper.tpl create mode 100644 qliksense/charts/quotas/templates/deployment.yaml create mode 100644 qliksense/charts/quotas/templates/ingress.yaml create mode 100644 qliksense/charts/quotas/templates/service.yaml create mode 100644 qliksense/charts/quotas/values.yaml create mode 100644 qliksense/charts/redis/.helmignore create mode 100644 qliksense/charts/redis/Chart.yaml create mode 100644 qliksense/charts/redis/README.md create mode 100644 qliksense/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/redis/values-production.yaml create mode 100644 qliksense/charts/redis/values.schema.json create mode 100644 qliksense/charts/redis/values.yaml create mode 100644 qliksense/charts/reload-tasks/.helmignore create mode 100644 qliksense/charts/reload-tasks/Chart.yaml create mode 100644 qliksense/charts/reload-tasks/README.md create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/.helmignore create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/OWNERS create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/README.md create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/reload-tasks/charts/mongodb/values.yaml create mode 100644 qliksense/charts/reload-tasks/requirements.yaml create mode 100644 qliksense/charts/reload-tasks/templates/_helper.tpl create mode 100644 qliksense/charts/reload-tasks/templates/deployment.yaml create mode 100644 qliksense/charts/reload-tasks/templates/hpa.yaml create mode 100644 qliksense/charts/reload-tasks/templates/ingress.yaml create mode 100644 qliksense/charts/reload-tasks/templates/mongo-secret.yaml create mode 100644 qliksense/charts/reload-tasks/templates/private-key-secret.yaml create mode 100644 qliksense/charts/reload-tasks/templates/service.yaml create mode 100644 qliksense/charts/reload-tasks/values.yaml create mode 100644 qliksense/charts/reloads/.helmignore create mode 100644 qliksense/charts/reloads/Chart.yaml create mode 100644 qliksense/charts/reloads/README.md create mode 100644 qliksense/charts/reloads/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/README.md create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/.helmignore create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/Chart.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/config/client-auth.json create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/templates/NOTES.txt create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/templates/_helpers.tpl create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/templates/clusterrole.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/templates/clusterrolebinding.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/templates/customresourcedefinition.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/templates/deployment.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/templates/natscluster.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/templates/secret.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/templates/serviceaccount.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-operator/values.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming-operator/Chart.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming-operator/README.md create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming-operator/templates/_helpers.tpl create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming-operator/templates/clusterrole.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming-operator/templates/clusterrolebinding.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming-operator/templates/customresourcedefinition.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming-operator/templates/deployment.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming-operator/templates/natsstreamingcluster.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming-operator/templates/serviceaccount.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming-operator/values.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/.helmignore create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/OWNERS create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/values-production.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/config/client-auth.json create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/.helmignore create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/Chart.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/README.md create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/config/client-auth.json create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/templates/NOTES.txt create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/templates/_helpers.tpl create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/templates/clusterrole.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/templates/clusterrolebinding.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/templates/customresourcedefinition.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/templates/deployment.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/templates/natscluster.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/templates/secret.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/templates/serviceaccount.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-operator/values.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming-operator/Chart.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming-operator/README.md create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming-operator/templates/_helpers.tpl create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming-operator/templates/clusterrole.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming-operator/templates/clusterrolebinding.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming-operator/templates/customresourcedefinition.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming-operator/templates/deployment.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming-operator/templates/natsstreamingcluster.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming-operator/templates/serviceaccount.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming-operator/values.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/reloads/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/templates/natscluster-secret.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/templates/natscluster.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/templates/natsstreamingcluster.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/reloads/charts/messaging/values.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/.helmignore create mode 100644 qliksense/charts/reloads/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/OWNERS create mode 100644 qliksense/charts/reloads/charts/mongodb/README.md create mode 100644 qliksense/charts/reloads/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/reloads/charts/mongodb/values.yaml create mode 100644 qliksense/charts/reloads/requirements.yaml create mode 100644 qliksense/charts/reloads/templates/_helper.tpl create mode 100644 qliksense/charts/reloads/templates/deployment.yaml create mode 100644 qliksense/charts/reloads/templates/hpa.yaml create mode 100644 qliksense/charts/reloads/templates/ingress.yaml create mode 100644 qliksense/charts/reloads/templates/mongo-secret.yaml create mode 100644 qliksense/charts/reloads/templates/pre-stop-hook.yaml create mode 100644 qliksense/charts/reloads/templates/private-key-secret.yaml create mode 100644 qliksense/charts/reloads/templates/service.yaml create mode 100644 qliksense/charts/reloads/values.yaml create mode 100644 qliksense/charts/reporting/.helmignore create mode 100644 qliksense/charts/reporting/Chart.yaml create mode 100644 qliksense/charts/reporting/README.md create mode 100644 qliksense/charts/reporting/charts/redis/.helmignore create mode 100644 qliksense/charts/reporting/charts/redis/Chart.yaml create mode 100644 qliksense/charts/reporting/charts/redis/README.md create mode 100644 qliksense/charts/reporting/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/reporting/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/reporting/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/reporting/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/reporting/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/reporting/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/reporting/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/reporting/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/reporting/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/reporting/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/reporting/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/reporting/charts/redis/values-production.yaml create mode 100644 qliksense/charts/reporting/charts/redis/values.schema.json create mode 100644 qliksense/charts/reporting/charts/redis/values.yaml create mode 100644 qliksense/charts/reporting/requirements.yaml create mode 100644 qliksense/charts/reporting/templates/NOTES.txt create mode 100644 qliksense/charts/reporting/templates/_helpers.tpl create mode 100644 qliksense/charts/reporting/templates/deployment.yaml create mode 100644 qliksense/charts/reporting/templates/hpa.yaml create mode 100644 qliksense/charts/reporting/templates/ingress.yaml create mode 100644 qliksense/charts/reporting/templates/redis-network-policy.yaml create mode 100644 qliksense/charts/reporting/templates/redis-secret.yaml create mode 100644 qliksense/charts/reporting/templates/service-cmp.yaml create mode 100644 qliksense/charts/reporting/templates/service-rpr.yaml create mode 100644 qliksense/charts/reporting/templates/service-rwr.yaml create mode 100644 qliksense/charts/reporting/templates/service.yaml create mode 100644 qliksense/charts/reporting/values.yaml create mode 100644 qliksense/charts/resource-library/.helmignore create mode 100644 qliksense/charts/resource-library/Chart.yaml create mode 100644 qliksense/charts/resource-library/README.md create mode 100644 qliksense/charts/resource-library/charts/mongodb/.helmignore create mode 100644 qliksense/charts/resource-library/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/OWNERS create mode 100644 qliksense/charts/resource-library/charts/mongodb/README.md create mode 100644 qliksense/charts/resource-library/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/resource-library/charts/mongodb/values.yaml create mode 100644 qliksense/charts/resource-library/requirements.yaml create mode 100644 qliksense/charts/resource-library/templates/NOTES.txt create mode 100644 qliksense/charts/resource-library/templates/_helper.tpl create mode 100644 qliksense/charts/resource-library/templates/deployment.yaml create mode 100644 qliksense/charts/resource-library/templates/ingress.yaml create mode 100644 qliksense/charts/resource-library/templates/mongo-secret.yaml create mode 100644 qliksense/charts/resource-library/templates/pvc.yaml create mode 100644 qliksense/charts/resource-library/templates/sc.yaml create mode 100644 qliksense/charts/resource-library/templates/secret.yaml create mode 100644 qliksense/charts/resource-library/templates/service.yaml create mode 100644 qliksense/charts/resource-library/values.yaml create mode 100644 qliksense/charts/sense-client/.helmignore create mode 100644 qliksense/charts/sense-client/Chart.yaml create mode 100644 qliksense/charts/sense-client/README.md create mode 100644 qliksense/charts/sense-client/templates/NOTES.txt create mode 100644 qliksense/charts/sense-client/templates/_helper.tpl create mode 100644 qliksense/charts/sense-client/templates/configmap.yaml create mode 100644 qliksense/charts/sense-client/templates/deployment.yaml create mode 100644 qliksense/charts/sense-client/templates/ingress.yaml create mode 100644 qliksense/charts/sense-client/templates/service.yaml create mode 100644 qliksense/charts/sense-client/values.yaml create mode 100644 qliksense/charts/sharing/.helmignore create mode 100644 qliksense/charts/sharing/Chart.yaml create mode 100644 qliksense/charts/sharing/README.md create mode 100644 qliksense/charts/sharing/charts/mongodb/.helmignore create mode 100644 qliksense/charts/sharing/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/OWNERS create mode 100644 qliksense/charts/sharing/charts/mongodb/README.md create mode 100644 qliksense/charts/sharing/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/sharing/charts/mongodb/values.yaml create mode 100644 qliksense/charts/sharing/charts/redis/.helmignore create mode 100644 qliksense/charts/sharing/charts/redis/Chart.yaml create mode 100644 qliksense/charts/sharing/charts/redis/README.md create mode 100644 qliksense/charts/sharing/charts/redis/ci/default-values.yaml create mode 100644 qliksense/charts/sharing/charts/redis/ci/dev-values.yaml create mode 100644 qliksense/charts/sharing/charts/redis/ci/extra-flags-values.yaml create mode 100644 qliksense/charts/sharing/charts/redis/ci/insecure-sentinel-values.yaml create mode 100644 qliksense/charts/sharing/charts/redis/ci/production-sentinel-values.yaml create mode 100644 qliksense/charts/sharing/charts/redis/ci/production-values.yaml create mode 100644 qliksense/charts/sharing/charts/redis/ci/redis-lib-values.yaml create mode 100644 qliksense/charts/sharing/charts/redis/ci/redisgraph-module-values.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/NOTES.txt create mode 100644 qliksense/charts/sharing/charts/redis/templates/_helpers.tpl create mode 100644 qliksense/charts/sharing/charts/redis/templates/configmap.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/headless-svc.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/health-configmap.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/metrics-prometheus.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/metrics-svc.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/networkpolicy.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/prometheusrule.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/psp.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/redis-master-svc.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/redis-role.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/redis-rolebinding.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/redis-slave-svc.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 qliksense/charts/sharing/charts/redis/templates/secret.yaml create mode 100644 qliksense/charts/sharing/charts/redis/values-production.yaml create mode 100644 qliksense/charts/sharing/charts/redis/values.schema.json create mode 100644 qliksense/charts/sharing/charts/redis/values.yaml create mode 100644 qliksense/charts/sharing/requirements.yaml create mode 100644 qliksense/charts/sharing/templates/_helpers.tpl create mode 100644 qliksense/charts/sharing/templates/deployment.yaml create mode 100644 qliksense/charts/sharing/templates/hpa.yaml create mode 100644 qliksense/charts/sharing/templates/ingress.yaml create mode 100644 qliksense/charts/sharing/templates/mongo-secret.yaml create mode 100644 qliksense/charts/sharing/templates/prune-graph-cronjob.yaml create mode 100644 qliksense/charts/sharing/templates/pvc.yaml create mode 100644 qliksense/charts/sharing/templates/redis-secret.yaml create mode 100644 qliksense/charts/sharing/templates/sc.yaml create mode 100644 qliksense/charts/sharing/templates/service.yaml create mode 100644 qliksense/charts/sharing/templates/token-secret.yaml create mode 100644 qliksense/charts/sharing/values.yaml create mode 100644 qliksense/charts/spaces/.helmignore create mode 100644 qliksense/charts/spaces/Chart.yaml create mode 100644 qliksense/charts/spaces/README.md create mode 100644 qliksense/charts/spaces/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/README.md create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/spaces/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/spaces/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/spaces/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/README.md create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/spaces/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/spaces/charts/messaging/values.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/.helmignore create mode 100644 qliksense/charts/spaces/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/OWNERS create mode 100644 qliksense/charts/spaces/charts/mongodb/README.md create mode 100644 qliksense/charts/spaces/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/spaces/charts/mongodb/values.yaml create mode 100644 qliksense/charts/spaces/requirements.yaml create mode 100644 qliksense/charts/spaces/templates/_helper.tpl create mode 100644 qliksense/charts/spaces/templates/deployment.yaml create mode 100644 qliksense/charts/spaces/templates/hpa.yaml create mode 100644 qliksense/charts/spaces/templates/ingress.yaml create mode 100644 qliksense/charts/spaces/templates/mongo-secret.yaml create mode 100644 qliksense/charts/spaces/templates/service.yaml create mode 100644 qliksense/charts/spaces/templates/token-secret.yaml create mode 100644 qliksense/charts/spaces/values.yaml create mode 100644 qliksense/charts/subscriptions/Chart.yaml create mode 100644 qliksense/charts/subscriptions/README.md create mode 100644 qliksense/charts/subscriptions/charts/mongodb/.helmignore create mode 100644 qliksense/charts/subscriptions/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/OWNERS create mode 100644 qliksense/charts/subscriptions/charts/mongodb/README.md create mode 100644 qliksense/charts/subscriptions/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/subscriptions/charts/mongodb/values.yaml create mode 100644 qliksense/charts/subscriptions/requirements.yaml create mode 100644 qliksense/charts/subscriptions/templates/_helpers.tpl create mode 100644 qliksense/charts/subscriptions/templates/deployment.yaml create mode 100644 qliksense/charts/subscriptions/templates/ingress.yaml create mode 100644 qliksense/charts/subscriptions/templates/mongo-secret.yml create mode 100644 qliksense/charts/subscriptions/templates/secret.yaml create mode 100644 qliksense/charts/subscriptions/templates/service.yaml create mode 100644 qliksense/charts/subscriptions/values.yaml create mode 100644 qliksense/charts/temporary-contents/.helmignore create mode 100644 qliksense/charts/temporary-contents/Chart.yaml create mode 100644 qliksense/charts/temporary-contents/README.md create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/.helmignore create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/OWNERS create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/README.md create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/temporary-contents/charts/mongodb/values.yaml create mode 100644 qliksense/charts/temporary-contents/requirements.yaml create mode 100644 qliksense/charts/temporary-contents/templates/_helper.tpl create mode 100644 qliksense/charts/temporary-contents/templates/deployment.yaml create mode 100644 qliksense/charts/temporary-contents/templates/ingress.yaml create mode 100644 qliksense/charts/temporary-contents/templates/mongo-secret.yaml create mode 100644 qliksense/charts/temporary-contents/templates/pvc.yaml create mode 100644 qliksense/charts/temporary-contents/templates/sc.yaml create mode 100644 qliksense/charts/temporary-contents/templates/service.yaml create mode 100644 qliksense/charts/temporary-contents/values.yaml create mode 100644 qliksense/charts/tenants/.helmignore create mode 100644 qliksense/charts/tenants/Chart.yaml create mode 100644 qliksense/charts/tenants/README.md create mode 100644 qliksense/charts/tenants/charts/messaging/.helmignore create mode 100644 qliksense/charts/tenants/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/README.md create mode 100644 qliksense/charts/tenants/charts/messaging/charts/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/message-delivery-monitor/README.md create mode 100644 qliksense/charts/tenants/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/tenants/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/message-delivery-monitor/README.md create mode 100644 qliksense/charts/tenants/charts/messaging/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/tenants/charts/messaging/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/tenants/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/tenants/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/tenants/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/README.md create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/tenants/charts/messaging/templates/message-delivery-monitor-secret.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/tenants/charts/messaging/values.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/.helmignore create mode 100644 qliksense/charts/tenants/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/OWNERS create mode 100644 qliksense/charts/tenants/charts/mongodb/README.md create mode 100644 qliksense/charts/tenants/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/tenants/charts/mongodb/values.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/.helmignore create mode 100644 qliksense/charts/tenants/charts/qlikcommon/Chart.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/README.md create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_certificates.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_chartref.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_configmap.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_container.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_deployment.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_envvar.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_fullname.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_hpa.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_image.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_ingress.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_initContainer.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_metadata.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_metadata_annotations.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_metadata_labels.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_name.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_networkpolicy.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_persistentvolumeclaim.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_persistentvolumeclaims.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_podSecurityPolicy.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_role.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_rolebinding.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_secret.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_service.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_serviceaccount.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_statefulset.yaml create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_transformers.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_util.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/templates/_volume.tpl create mode 100644 qliksense/charts/tenants/charts/qlikcommon/values.yaml create mode 100644 qliksense/charts/tenants/requirements.yaml create mode 100644 qliksense/charts/tenants/templates/manifest.yaml create mode 100644 qliksense/charts/tenants/values.yaml create mode 100644 qliksense/charts/transport/Chart.yaml create mode 100644 qliksense/charts/transport/README.md create mode 100644 qliksense/charts/transport/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/transport/charts/messaging/README.md create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/transport/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/transport/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/transport/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/transport/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/README.md create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/transport/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/transport/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/transport/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/transport/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/transport/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/transport/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/transport/charts/messaging/values.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/.helmignore create mode 100644 qliksense/charts/transport/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/OWNERS create mode 100644 qliksense/charts/transport/charts/mongodb/README.md create mode 100644 qliksense/charts/transport/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/transport/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/transport/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/transport/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/transport/charts/mongodb/values.yaml create mode 100644 qliksense/charts/transport/requirements.yaml create mode 100644 qliksense/charts/transport/templates/_helper.tpl create mode 100644 qliksense/charts/transport/templates/deployment.yaml create mode 100644 qliksense/charts/transport/templates/hpa.yaml create mode 100644 qliksense/charts/transport/templates/ingress.yaml create mode 100644 qliksense/charts/transport/templates/mongo-secret.yaml create mode 100644 qliksense/charts/transport/templates/network.yaml create mode 100644 qliksense/charts/transport/templates/service.yaml create mode 100644 qliksense/charts/transport/values.yaml create mode 100644 qliksense/charts/users/.helmignore create mode 100644 qliksense/charts/users/Chart.yaml create mode 100644 qliksense/charts/users/README.md create mode 100644 qliksense/charts/users/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/users/charts/messaging/README.md create mode 100644 qliksense/charts/users/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/users/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/users/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/users/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/users/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/users/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/users/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/users/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/README.md create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/users/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/users/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/users/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/users/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/users/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/users/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/users/charts/messaging/values.yaml create mode 100644 qliksense/charts/users/charts/mongodb/.helmignore create mode 100644 qliksense/charts/users/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/users/charts/mongodb/OWNERS create mode 100644 qliksense/charts/users/charts/mongodb/README.md create mode 100644 qliksense/charts/users/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/users/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/users/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/users/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/users/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/users/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/users/charts/mongodb/values.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/.helmignore create mode 100644 qliksense/charts/users/charts/qlikcommon/Chart.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/README.md create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_certificates.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_chartref.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_configmap.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_container.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_deployment.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_envvar.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_fullname.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_hpa.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_image.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_ingress.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_initContainer.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_metadata.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_metadata_annotations.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_metadata_labels.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_name.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_networkpolicy.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_persistentvolumeclaim.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_persistentvolumeclaims.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_podSecurityPolicy.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_role.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_rolebinding.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_secret.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_service.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_serviceaccount.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_statefulset.yaml create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_transformers.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_util.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/templates/_volume.tpl create mode 100644 qliksense/charts/users/charts/qlikcommon/values.yaml create mode 100644 qliksense/charts/users/requirements.yaml create mode 100644 qliksense/charts/users/templates/manifest.yaml create mode 100644 qliksense/charts/users/values.yaml create mode 100644 qliksense/charts/web-notifications/.helmignore create mode 100644 qliksense/charts/web-notifications/Chart.yaml create mode 100644 qliksense/charts/web-notifications/README.md create mode 100644 qliksense/charts/web-notifications/charts/messaging/.helmignore create mode 100644 qliksense/charts/web-notifications/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/README.md create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/message-delivery-monitor/README.md create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/message-delivery-monitor/README.md create mode 100644 qliksense/charts/web-notifications/charts/messaging/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/web-notifications/charts/messaging/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/README.md create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/web-notifications/charts/messaging/templates/message-delivery-monitor-secret.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/web-notifications/charts/messaging/values.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/.helmignore create mode 100644 qliksense/charts/web-notifications/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/OWNERS create mode 100644 qliksense/charts/web-notifications/charts/mongodb/README.md create mode 100644 qliksense/charts/web-notifications/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/web-notifications/charts/mongodb/values.yaml create mode 100644 qliksense/charts/web-notifications/requirements.yaml create mode 100644 qliksense/charts/web-notifications/templates/_helpers.tpl create mode 100644 qliksense/charts/web-notifications/templates/deployment.yaml create mode 100644 qliksense/charts/web-notifications/templates/ingress.yaml create mode 100644 qliksense/charts/web-notifications/templates/mongo-secret.yaml create mode 100644 qliksense/charts/web-notifications/templates/secret.yaml create mode 100644 qliksense/charts/web-notifications/templates/service.yaml create mode 100644 qliksense/charts/web-notifications/values.yaml create mode 100644 qliksense/charts/web-security/Chart.yaml create mode 100644 qliksense/charts/web-security/README.md create mode 100644 qliksense/charts/web-security/charts/messaging/.helmignore create mode 100644 qliksense/charts/web-security/charts/messaging/Chart.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/README.md create mode 100644 qliksense/charts/web-security/charts/messaging/charts/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/message-delivery-monitor/README.md create mode 100644 qliksense/charts/web-security/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/web-security/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats-streaming/README.md create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats-streaming/values.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/Chart.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/README.md create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/NOTES.txt create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/configmap.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/ingress.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/charts/nats/values.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/message-delivery-monitor/Chart.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/message-delivery-monitor/README.md create mode 100644 qliksense/charts/web-security/charts/messaging/message-delivery-monitor/templates/_helper.tpl create mode 100644 qliksense/charts/web-security/charts/messaging/message-delivery-monitor/templates/deployment.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/message-delivery-monitor/templates/service.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/message-delivery-monitor/values.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats-streaming/Chart.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats-streaming/README.md create mode 100644 qliksense/charts/web-security/charts/messaging/nats-streaming/templates/NOTES.txt create mode 100644 qliksense/charts/web-security/charts/messaging/nats-streaming/templates/_helpers.tpl create mode 100644 qliksense/charts/web-security/charts/messaging/nats-streaming/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats-streaming/templates/networkpolicy.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats-streaming/templates/pvc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats-streaming/templates/sc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats-streaming/templates/statefulset.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats-streaming/values.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/Chart.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/README.md create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/NOTES.txt create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/_helpers.tpl create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/client-svc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/cluster-svc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/configmap.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/headless-svc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/ingress.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/monitoring-svc.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/networkpolicy.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/statefulset.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/templates/tls-secret.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/nats/values.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/requirements.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/templates/_helper.tpl create mode 100644 qliksense/charts/web-security/charts/messaging/templates/message-delivery-monitor-secret.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/templates/nats-secret.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/templates/networkpolicy-nats-streaming.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/templates/networkpolicy-nats.yaml create mode 100644 qliksense/charts/web-security/charts/messaging/values.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/.helmignore create mode 100644 qliksense/charts/web-security/charts/mongodb/Chart.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/OWNERS create mode 100644 qliksense/charts/web-security/charts/mongodb/README.md create mode 100644 qliksense/charts/web-security/charts/mongodb/files/docker-entrypoint-initdb.d/README.md create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/NOTES.txt create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/_helpers.tpl create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/configmap.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/deployment-standalone.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/headless-svc-rs.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/initialization-configmap.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/pvc-standalone.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/secrets.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/statefulset-arbiter-rs.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/statefulset-primary-rs.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/statefulset-secondary-rs.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/svc-primary-rs.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/templates/svc-standalone.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/values-production.yaml create mode 100644 qliksense/charts/web-security/charts/mongodb/values.yaml create mode 100644 qliksense/charts/web-security/charts/sense-client/.helmignore create mode 100644 qliksense/charts/web-security/charts/sense-client/Chart.yaml create mode 100644 qliksense/charts/web-security/charts/sense-client/README.md create mode 100644 qliksense/charts/web-security/charts/sense-client/templates/NOTES.txt create mode 100644 qliksense/charts/web-security/charts/sense-client/templates/_helper.tpl create mode 100644 qliksense/charts/web-security/charts/sense-client/templates/configmap.yaml create mode 100644 qliksense/charts/web-security/charts/sense-client/templates/deployment.yaml create mode 100644 qliksense/charts/web-security/charts/sense-client/templates/ingress.yaml create mode 100644 qliksense/charts/web-security/charts/sense-client/templates/service.yaml create mode 100644 qliksense/charts/web-security/charts/sense-client/values.yaml create mode 100644 qliksense/charts/web-security/requirements.yaml create mode 100644 qliksense/charts/web-security/templates/_helper.tpl create mode 100644 qliksense/charts/web-security/templates/deployment.yaml create mode 100644 qliksense/charts/web-security/templates/hpa.yaml create mode 100644 qliksense/charts/web-security/templates/ingress.yml create mode 100644 qliksense/charts/web-security/templates/mongo-secret.yaml create mode 100644 qliksense/charts/web-security/templates/rollbar-secret.yaml create mode 100644 qliksense/charts/web-security/templates/service.yaml create mode 100644 qliksense/charts/web-security/templates/token-secret.yaml create mode 100644 qliksense/charts/web-security/values.yaml create mode 100644 qliksense/requirements.yaml create mode 100644 qliksense/templates/NOTES.txt create mode 100644 qliksense/templates/_helpers.tpl create mode 100644 qliksense/templates/ca-cert-configmap.yaml create mode 100644 qliksense/templates/certs-job.yaml create mode 100644 qliksense/templates/certs-pvc.yaml create mode 100644 qliksense/templates/mongo-secret.yaml create mode 100644 qliksense/templates/qliksense-secrets.yaml create mode 100644 qliksense/templates/redis-secret.yaml create mode 100644 qliksense/templates/sc.yaml create mode 100644 qliksense/values.yaml create mode 100644 qseok-values.yaml diff --git a/keycloak-values.yaml b/keycloak-values.yaml new file mode 100644 index 0000000..53d655e --- /dev/null +++ b/keycloak-values.yaml @@ -0,0 +1,398 @@ +init: + image: + repository: busybox + tag: 1.31 + pullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: "10m" + # memory: "32Mi" + # requests: + # cpu: "10m" + # memory: "32Mi" + +clusterDomain: cluster.local + +## Optionally override the fully qualified name +# fullnameOverride: keycloak + +## Optionally override the name +# nameOverride: keycloak + +keycloak: + replicas: 1 + + image: + repository: jboss/keycloak + tag: 9.0.2 + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: [] + # - myRegistrKeySecretName + + hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + + proxyAddressForwarding: true + + enableServiceLinks: false + + podManagementPolicy: Parallel + + restartPolicy: Always + + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + + securityContext: + fsGroup: 1001 + + containerSecurityContext: + runAsUser: 1001 + runAsNonRoot: true + + ## The path keycloak will be served from. To serve keycloak from the root path, use two quotes (e.g. ""). + basepath: auth + + ## Additional init containers, e. g. for providing custom themes + extraInitContainers: | + + ## Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy + extraContainers: | + + ## lifecycleHooks defines the container lifecycle hooks + lifecycleHooks: | + # postStart: + # exec: + # command: ["/bin/sh", "-c", "ls"] + + ## Override the default for the Keycloak container, e.g. for clusters with large cache that requires rebalancing. + terminationGracePeriodSeconds: 60 + + ## Additional arguments to start command e.g. -Dkeycloak.import= to load a realm + extraArgs: "-Dkeycloak.profile.feature.upload_scripts=enabled" + + ## Username for the initial Keycloak admin user + username: keycloak + + ## Password for the initial Keycloak admin user. Applicable only if existingSecret is not set. + ## If not set, a random 10 characters password will be used + password: "keycloak" + + # Specifies an existing secret to be used for the admin password + existingSecret: "" + + # The key in the existing secret that stores the password + existingSecretKey: password + + ## jGroups configuration (only for HA deployment) + jgroups: + discoveryProtocol: dns.DNS_PING + discoveryProperties: > + "dns_query={{ template "keycloak.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + javaToolOptions: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + + ## Allows the specification of additional environment variables for Keycloak + extraEnv: | + # - name: KEYCLOAK_LOGLEVEL + # value: DEBUG + # - name: WILDFLY_LOGLEVEL + # value: DEBUG + # - name: CACHE_OWNERS + # value: "2" + # - name: DB_QUERY_TIMEOUT + # value: "60" + # - name: DB_VALIDATE_ON_MATCH + # value: true + # - name: DB_USE_CAST_FAIL + # value: false + + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 10 }} + matchExpressions: + - key: role + operator: NotIn + values: + - test + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 12 }} + matchExpressions: + - key: role + operator: NotIn + values: + - test + topologyKey: failure-domain.beta.kubernetes.io/zone + + nodeSelector: {} + priorityClassName: "" + tolerations: [] + + ## Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + + ## Extra Annotations to be added to pod + podAnnotations: {} + + livenessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + readinessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 + + resources: {} + # limits: + # cpu: "100m" + # memory: "1024Mi" + # requests: + # cpu: "100m" + # memory: "1024Mi" + + ## WildFly CLI configurations. They all end up in the file 'keycloak.cli' configured in the configmap which is + ## executed on server startup. + cli: + enabled: false + nodeIdentifier: | + {{ .Files.Get "scripts/node-identifier.cli" }} + + logging: | + {{ .Files.Get "scripts/logging.cli" }} + + ha: | + {{ .Files.Get "scripts/ha.cli" }} + + datasource: | + {{ .Files.Get "scripts/datasource.cli" }} + + # Custom CLI script + custom: | + + ## Custom startup scripts to run before Keycloak starts up + startupScripts: {} + # mystartup.sh: | + # #!/bin/sh + # + # echo 'Hello from my custom startup script!' + + ## Add additional volumes and mounts, e. g. for custom themes + extraVolumes: | + extraVolumeMounts: | + + ## Add additional ports, eg. for custom admin console + extraPorts: | + + podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 1 + + service: + annotations: {} + # service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0" + + labels: {} + # key: value + + ## ServiceType + ## ref: https://kubernetes.io/docs/user-guide/services/#publishing-services---service-types + type: ClusterIP + + ## Optional static port assignment for service type NodePort. + # nodePort: 30000 + + httpPort: 80 + httpNodePort: "" + + httpsPort: 8443 + httpsNodePort: "" + + # Optional: jGroups port for high availability clustering + jgroupsPort: 7600 + + ## Ingress configuration. + ## ref: https://kubernetes.io/docs/user-guide/ingress/ + ingress: + enabled: true + path: / + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # ingress.kubernetes.io/affinity: cookie + + labels: {} + # key: value + + ## List of hosts for the ingress + hosts: + - keycloak.example.com + + ## TLS configuration + tls: [] + # - hosts: + # - keycloak.example.com + # secretName: tls-keycloak + + ## OpenShift route configuration. + ## ref: https://docs.openshift.com/container-platform/3.11/architecture/networking/routes.html + route: + enabled: false + path: / + + annotations: {} + # kubernetes.io/tls-acme: "true" + # haproxy.router.openshift.io/disable_cookies: "true" + # haproxy.router.openshift.io/balance: roundrobin + + labels: {} + # key: value + + # Host name for the route + host: + + # TLS configuration + tls: + enabled: false + insecureEdgeTerminationPolicy: Redirect + termination: edge + + ## Persistence configuration + persistence: + # If true, the Postgres chart is deployed + deployPostgres: false + + # The database vendor. Can be either "postgres", "mysql", "mariadb", or "h2" + dbVendor: postgres + + ## The following values only apply if "deployPostgres" is set to "false" + dbName: keycloak + dbHost: psql-postgresql.psql.svc.cluster.local + dbPort: 5432 + + ## Database Credentials are loaded from a Secret residing in the same Namespace as keycloak. + ## The Chart can read credentials from an existing Secret OR it can provision its own Secret. + + ## Specify existing Secret + # If set, specifies the Name of an existing Secret to read db credentials from. + #existingSecret: "" + #existingSecretPasswordKey: "" # read keycloak db password from existingSecret under this Key + #existingSecretUsernameKey: "" # read keycloak db user from existingSecret under this Key + + ## Provision new Secret + # Only used if existingSecret is not specified. In this case a new secret is created + # populated by the variables below. + dbUser: keycloak + dbPassword: "keycloak" + +postgresql: + ### PostgreSQL User to create. + ## + postgresqlUsername: keycloak + + ## PostgreSQL Password for the new user. + ## If not set, a random 10 characters password will be used. + ## + postgresqlPassword: "keycloak" + + ## PostgreSQL Database to create. + ## + postgresqlDatabase: keycloak + + ## Persistent Volume Storage configuration. + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes + ## + persistence: + ## Enable PostgreSQL persistence using Persistent Volume Claims. + ## + enabled: true + +test: + enabled: false + image: + repository: unguiculus/docker-python3-phantomjs-selenium + tag: v1 + pullPolicy: IfNotPresent + securityContext: + fsGroup: 1000 + containerSecurityContext: + runAsUser: 1000 + runAsNonRoot: true + +prometheus: + operator: + ## Are you using Prometheus Operator? + enabled: false + + serviceMonitor: + ## Optionally set a target namespace in which to deploy serviceMonitor + namespace: "" + + ## Additional labels to add to the ServiceMonitor so it is picked up by the operator. + ## If using the [Helm Chart](https://github.com/helm/charts/tree/master/stable/prometheus-operator) this is the name of the Helm release. + selector: + release: prometheus + + ## Interval at which Prometheus scrapes metrics + interval: 10s + + ## Timeout at which Prometheus timeouts scrape run + scrapeTimeout: 10s + + ## The path to scrape + path: /auth/realms/master/metrics + + prometheusRules: + ## Add Prometheus Rules? + enabled: false + + ## Additional labels to add to the PrometheusRule so it is picked up by the operator. + ## If using the [Helm Chart](https://github.com/helm/charts/tree/master/stable/prometheus-operator) this is the name of the Helm release and 'app: prometheus-operator' + selector: + app: prometheus-operator + release: prometheus + + ## Some example rules. + rules: {} + # - alert: keycloak-IngressHigh5xxRate + # annotations: + # message: The percentage of 5xx errors for keycloak over the last 5 minutes is over 1%. + # expr: (sum(rate(nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak",status=~"5[0-9]{2}"}[1m]))/sum(rate(nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak"}[1m])))*100 > 1 + # for: 5m + # labels: + # severity: warning + # - alert: keycloak-IngressHigh5xxRate + # annotations: + # message: The percentage of 5xx errors for keycloak over the last 5 minutes is over 5%. + # expr: (sum(rate(nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak",status=~"5[0-9]{2}"}[1m]))/sum(rate(nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak"}[1m])))*100 > 5 + # for: 5m + # labels: + # severity: critical + diff --git a/mongodb-consolo.yaml b/mongodb-consolo.yaml new file mode 100644 index 0000000..e34961a --- /dev/null +++ b/mongodb-consolo.yaml @@ -0,0 +1,521 @@ +--- +# Source: mongodb/templates/poddisruptionbudget-arbiter-rs.yaml + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: mongodb + chart: mongodb-7.10.11 + heritage: Tiller + release: mongodb-qlik + name: mongodb-qlik-arbiter +spec: + minAvailable: 1 + selector: + matchLabels: + app: mongodb + release: mongodb-qlik + component: arbiter +--- +# Source: mongodb/templates/poddisruptionbudget-secondary-rs.yaml + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: mongodb + chart: mongodb-7.10.11 + heritage: Tiller + release: mongodb-qlik + name: mongodb-qlik-secondary +spec: + minAvailable: 1 + selector: + matchLabels: + app: mongodb + release: mongodb-qlik + component: secondary + +--- +# Source: mongodb/templates/secrets.yaml +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-qlik + labels: + app: mongodb + chart: mongodb-7.10.11 + release: "mongodb-qlik" + heritage: "Tiller" +type: Opaque +data: + mongodb-root-password: "OFh2Z0p6Sk5GcQ==" + mongodb-replica-set-key: "RWRyQjNaWWY2dg==" + +--- +# Source: mongodb/templates/serviceaccount.yml + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mongodb-qlik + labels: + app: mongodb + chart: mongodb-7.10.11 + release: "mongodb-qlik" + heritage: "Tiller" +secrets: + - name: mongodb-qlik + +--- +# Source: mongodb/templates/svc-headless-rs.yaml + +apiVersion: v1 +kind: Service +metadata: + name: mongodb-qlik-headless + labels: + app: mongodb + chart: mongodb-7.10.11 + release: "mongodb-qlik" + heritage: "Tiller" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: 27017 + selector: + app: mongodb + release: mongodb-qlik + +--- +# Source: mongodb/templates/svc-primary-rs.yaml + +apiVersion: v1 +kind: Service +metadata: + name: svc-mongo + labels: + app: mongodb + chart: mongodb-7.10.11 + release: "mongodb-qlik" + heritage: "Tiller" +spec: + type: ClusterIP + ports: + - name: mongodb + port: 27017 + targetPort: mongodb + - name: metrics + port: 9216 + targetPort: metrics + selector: + app: mongodb + release: "mongodb-qlik" + component: primary + +--- +# Source: mongodb/templates/statefulset-arbiter-rs.yaml + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mongodb-qlik-arbiter + labels: + app: mongodb + chart: mongodb-7.10.11 + heritage: Tiller + release: mongodb-qlik +spec: + selector: + matchLabels: + app: mongodb + release: mongodb-qlik + component: arbiter + serviceName: mongodb-qlik-headless + replicas: 1 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: mongodb + chart: mongodb-7.10.11 + release: mongodb-qlik + component: arbiter + spec: + serviceAccountName: mongodb-qlik + securityContext: + fsGroup: 1001 + containers: + - name: mongodb-arbiter + image: docker.io/bitnami/mongodb:4.2.5-debian-10-r44 + imagePullPolicy: IfNotPresent + securityContext: + runAsNonRoot: true + runAsUser: 1001 + ports: + - containerPort: 27017 + name: mongodb + env: + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: "0" + - name: MONGODB_DISABLE_SYSTEM_LOG + value: "yes" + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PRIMARY_HOST + value: svc-mongo + - name: MONGODB_REPLICA_SET_NAME + value: "rs0" + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).mongodb-qlik-headless.qlik.svc.cluster.local" + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-qlik + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: mongodb-qlik + key: mongodb-replica-set-key + - name: MONGODB_ENABLE_IPV6 + value: "no" + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: "no" + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + volumeMounts: + resources: + {} + + volumes: + +--- +# Source: mongodb/templates/statefulset-primary-rs.yaml + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mongodb-qlik-primary + labels: + app: mongodb + chart: mongodb-7.10.11 + heritage: Tiller + release: mongodb-qlik +spec: + serviceName: mongodb-qlik-headless + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: mongodb + release: mongodb-qlik + component: primary + template: + metadata: + labels: + app: mongodb + chart: mongodb-7.10.11 + release: mongodb-qlik + component: primary + annotations: + prometheus.io/port: "9216" + prometheus.io/scrape: "true" + + spec: + serviceAccountName: mongodb-qlik + securityContext: + fsGroup: 1001 + initContainers: + containers: + - name: mongodb-primary + image: docker.io/bitnami/mongodb:4.2.5-debian-10-r44 + imagePullPolicy: IfNotPresent + securityContext: + runAsNonRoot: true + runAsUser: 1001 + ports: + - containerPort: 27017 + name: mongodb + env: + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: "0" + - name: MONGODB_DISABLE_SYSTEM_LOG + value: "yes" + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "primary" + - name: MONGODB_REPLICA_SET_NAME + value: "rs0" + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).mongodb-qlik-headless.qlik.svc.cluster.local" + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-qlik + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: mongodb-qlik + key: mongodb-replica-set-key + - name: MONGODB_ENABLE_IPV6 + value: "no" + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: "no" + livenessProbe: + exec: + command: + - pgrep + - mongod + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + subPath: + resources: + {} + + - name: metrics + image: docker.io/bitnami/mongodb-exporter:0.10.0-debian-9-r24 + imagePullPolicy: "Always" + securityContext: + runAsNonRoot: true + runAsUser: 1001 + env: + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-qlik + key: mongodb-root-password + command: [ 'sh', '-c', '/bin/mongodb_exporter --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:27017/admin ' ] + ports: + - name: metrics + containerPort: 9216 + resources: + null + + volumes: + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" + + +--- +# Source: mongodb/templates/statefulset-secondary-rs.yaml + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mongodb-qlik-secondary + labels: + app: mongodb + chart: mongodb-7.10.11 + heritage: Tiller + release: mongodb-qlik +spec: + selector: + matchLabels: + app: mongodb + release: mongodb-qlik + component: secondary + podManagementPolicy: "Parallel" + serviceName: mongodb-qlik-headless + replicas: 1 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: mongodb + chart: mongodb-7.10.11 + release: mongodb-qlik + component: secondary + annotations: + prometheus.io/port: "9216" + prometheus.io/scrape: "true" + + spec: + serviceAccountName: mongodb-qlik + securityContext: + fsGroup: 1001 + initContainers: + containers: + - name: mongodb-secondary + image: docker.io/bitnami/mongodb:4.2.5-debian-10-r44 + imagePullPolicy: IfNotPresent + securityContext: + runAsNonRoot: true + runAsUser: 1001 + ports: + - containerPort: 27017 + name: mongodb + env: + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: "0" + - name: MONGODB_DISABLE_SYSTEM_LOG + value: "yes" + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "secondary" + - name: MONGODB_PRIMARY_HOST + value: mongodb-qlik + - name: MONGODB_REPLICA_SET_NAME + value: "rs0" + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).mongodb-qlik-headless.qlik.svc.cluster.local" + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-qlik + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: mongodb-qlik + key: mongodb-replica-set-key + - name: MONGODB_ENABLE_IPV6 + value: "no" + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: "no" + livenessProbe: + exec: + command: + - pgrep + - mongod + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + subPath: + resources: + {} + + - name: metrics + image: docker.io/bitnami/mongodb-exporter:0.10.0-debian-9-r24 + imagePullPolicy: "Always" + securityContext: + runAsNonRoot: true + runAsUser: 1001 + env: + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-qlik + key: mongodb-root-password + command: [ 'sh', '-c', '/bin/mongodb_exporter --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:27017/admin ' ] + ports: + - name: metrics + containerPort: 9216 + resources: + null + + volumes: + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" + + +--- +# Source: mongodb/templates/configmap.yaml + +--- +# Source: mongodb/templates/deployment-standalone.yaml + +--- +# Source: mongodb/templates/ingress.yaml + + +--- +# Source: mongodb/templates/initialization-configmap.yaml + + +--- +# Source: mongodb/templates/prometheus-alerting-rule.yaml + + +--- +# Source: mongodb/templates/prometheus-service-monitor.yaml + + +--- +# Source: mongodb/templates/pvc-standalone.yaml + + +--- +# Source: mongodb/templates/svc-standalone.yaml + + diff --git a/mongodb-consolo.zip b/mongodb-consolo.zip new file mode 100644 index 0000000000000000000000000000000000000000..e66cd06d964b25a89aad8fcadfa3bc2f6602e1f5 GIT binary patch literal 110544 zcmaHT1CS`elI_^GZQHhO+qP}no;$W}&mG&g%{w#yzPB6yz1_cmJEA%=8X28c-I-Zg z=d^+}FbEXDKejk$UB!P2|G9$zzyYwew==glF``#hfdl~l4VkTv29>Sv>H!S^2yzAt z0PxS9!e0$2fA4_+h!@sUMDXydZvK1f5eWbQ>)$lE8QQp-I@38DTbSA!(pfp%+x=7L zO@Erc6VU&ob6Z{7ev<*F_f#EWJ#MVCkQNf>c-Ss zO3V0(#)OeeVk)L&aZj*v$rSIX1G!4v4Tv2MBrdcm%T7RpgE6Br6B69H8IBLli_{b+ zsjh}~16xC0BcPR$`L|Ov>8k@`^p3`Bj7Nb=zQyk80g zHIWdskZI(cXOx9m~ zQ}pBKF5axI!sVEKR-l(&H`tX;9_BgVw?qkb4+bq>AtD?H)6xab6&D&9xmKy)WOniW9IV{Zh5UiGYh%#5W{ZUk6-m%ded!=_#Mxh8?ra$ z-|n0U(t2cMt4pjHC1g3Pd3vYiRTO>Wixu=um2YX?U8%uw2J9eQ7$(d)Cq8~NN=%d& zwvAr}ZRN&h5xxj~bChjmD>H^=FdH|mZYXSZ4faopOd`_k8G=?H4$q2DdQbtJ&I^ba z<<61U({sm@H#Mz;e65?CM!ds?rLDCyUyH9bRBA!_Vvh2`kw{Fg)OnY=n-gWH*N~U1 zI-cs>y$oe0TY0IWS2`aI@m+Gm!Tg+D7aUzDBZHq`MEdq9D-=5nh}^qZx(26CEkibs zPT?PR(3fl+==LAK`ka(}h=~YUjd6TdR=bIzanVHdD?%gjxBSrS-?-D+Xdt7Kw_7ki zWD%eyJ%3C>AS&?F4U0ZUm@TG%p+UV_rJ?&82qP&UX+^Y3ZSPI>G>7SuW^hyJGC{#9 zEt}o&zw5gop*fzldDHozqRM^$KW* zz&o<_Z;IWp-M{A2qrf27W|gn{JSna>Ke8az757yRr(FAdjXqO*Q8DH`H}^WQ&3!{@ z{a#TLy0G&9HfOx}T-n4C|8{5mfc~$3K;9`^z2dJ|*!bHp|IH6LnL4^!I+@y<+POH> z+1MLf|8H)9Hk#XRm;eR9TQ0Cwe`?HN52s>**JIWiDj1YP$qqkCLRp3}_(?&uvSTjd zYMOcr#nbL}o9eK8aZxO@=10tL`aP!I_C1I%T*9=rJ!0AJr#B4$qPQ8W14WM_>{!@X zjE`<^MO1(kmm@SoJ|B)g)^Rj1+;!y!QjmDPpnJlAB$tr0&6+__`9aJa_)0jHOJ;Cf zU(4!U4f>1JRA-G~&@TkR?e)vZ|G(Pat)1e2@YilxKmY*bf3uy=!qmpr(%jD8$@G7- zS~h+_4ww-}_rZQ znmqvqg+o1rlxT=S`C%9^3R|Aepver_J`thV*6!MVmm9Uc5X?4cn=!ZZD>}>B&k2yY)Y??Bt+XQC&94;6Q~VY@oe~~) zk9FS5;F1$~oDq$Ov(74c(#QYys$f1S)aR1{0Q}ws06_e=?otvJ6pMKiBLdth77JPh3W!H+gkD@DXB-VM%uyvt3M<7d_qTJ=(mdP@$2_^ zyowdR5k8@OGWoi*oPOzP7^Xp3V+xZLX;N^n?{?X2jeP!lNKeYX5}z**R?l*uuY0}J zA_XTWUvF=3UiEz;d-l}v$6LZX~0kWB5|7Zz1EX$&90XXd{#1kGDaJy@E< zlfmPCBjF(?guj6UPd%-cC4U}fO1zi7eXXy;X!QiKnLzwbG*Ki0#75$XA%x~d1jp7P z;@278vZI&P6bUf{wCoHR{8b|7Q+X0~0`kMM1O-j8#zMY(E@|rRQgXk#7Ohj~$hKTW zmKHA&I+Tea?15ECj>ZG6WI>?w_FLxKUB^r#fEjOy!g0Hg`;Da0AE!}d&dw+>|5udw z@$O(g4=anMmMC7vh!qxO%7q!r2X2@9Z43T(m;0cefHBOyRej_RwsIECmDKeqd*Lq2 zZRY#MS}>cEpng~K6@ss8uWN3%hpU%yLz+4RQ&am*_oOQbNk}!SMt9C959=o*IdF>z zpsGW;VB(`Ck3Jx&^~WSTmMgg+hK-3?)$I*U{Jcj1^3ekzo1#eO8}kKKB*+WwYjuRmGgPkpIo zIfbnLud98_KC6aumCOaF{Zj(Ibt*?fH4CjoS2AAH#`bvx=3EcBvY(1g-c6XWjA-X^ z5K!)P-$YGI^JoUMK+Q-3cd;?xW6qe-#@j;}ofyulj~J47c%NpZDExG+ zF;Jxtisz4UIDAHJKz2UiABOq)o&MIEIvY@sc3GtGzEzBSZjna*(}%ExzN~%$GJ1o2 zA>@TmB2FG6yLVQPH+qZZxR|UcgtjI3AJXCys3O{eE zI|1K@XGQJM0l^XYdyE@E!(#Ogui;a71Vcevt*^#Bw;fuK9M)B;??riGzW8i+SAE|# zr3R7FkkXBX!Xix|J0buy$@=rl=TpVLfNY7!xJd4Z248SZOW>P zL8)?4^mv#>52f2I#VCB&rrGX%Pc)5?swI&NPPqDa{}(yY4{V^C?1`W3ZuXyn5x4~#)&x-%49Lwh~a^%RuS0Jgh zVZ7N?Y&1RsM1wJBmD-}6`ntsJhiYbB&ooCv#~xfF2%JIDm4?G;!KU~MNp(`8R9539 z5Y|^^mH}yWThQhnnizm8zC5oW>-$EZXc7n_^$9)#>`KVAVobP(&xC1i?M6C3BuEvz zl9t1!i#n=NS)dRAiX5@2q|GPiYOHW=NWi*Zya4e5`R14OM;K{+Xku?))e>hT$*cQ_T(!-_jL~U~ z;g!fQDoiOq6ay8=7^1Cl_#;9^yYYX#%MnT9`j$Jb_d70q zmXU2nQ@wHbEAX}LZ&Be3^wA2(wBMcLYknR;#vUL)c@hL1gQ0&NDmMPMID0A%^sQ_E3 zsGc|QJ?F{_^Oxg_mtB~em>0b2M|i?$*|p3wF-21rV#!?VP7@zyyaGW3dw*`H;Cdi! zh|GkD=UCAZv&gCB|H`K(#lSEl={c&8qLDO(c|KcekE#riK?hX>HC+bG=i6#I)gG7h z@&(gj&bp779Z@ep8#Yi=)pAWh911^A>a;l~iPid$OhlaZAzHQGbK`%U3S3{a%O~_8 zLsBI+MJu}>Od_xDQVXFe%t{7$lA24jubV~qoQ?T~ni~y6y^Te+tm$v^Ve(m_N zQ3CZzIDwDPVGbn*G?~wv@T|v-6rJpmt!Bj4L|3)BBqd|6jC#O)f1Jk1s^*?3`uF2v zVK=5G#x@gXbz`F)NnnYqYW0evIsV;RX=n&D-u@X>tF?X8lea*(gHqx=lRS=eira~j=a!1mU=19yG{-?Vo9<7>c z*Z533Y;o7l(~%SPeGHll>VS zbptTW(NTF~A&f#z=G{9bR~BW21H8f~)evWSxoK*a;P!$SY=hGK)(ybqj(L0NECO{& z2ZKXBy=Zcp$Iu=(YHIvWJ5tx&{;HgS0P*k3!G~}S%6ni6ue*{3giA%h*Ts?;J_&Oo zw|Ap`LJ5Q6t34%~BxJT_x6w7FSH*7_Pvw1Rvpz>5(n3O!9q0WAR_lhMXt8Ea_KO}7 z*<9Jfqi=wMUIuZ6XIssN=cnaRXB|~iVb{i`bS&>kTGFumeBH0k5>N$(fIlbHOSCE_ z#7Qd|VSXon>T$DpAxad7qk_fmY}FzTT+!Di@ea$cW47`?mzoRmLe=#jJ-BI9Hs&iR2dVkf2st%sJ+7(>id41- z!thtN+s6zO7S|05^18PcX4KMZArbMRfN=?Lp{*ev77JQ6)m;lz-f1J??)=y`CKad= z3D1!StfYE#d0G~k6y^G5k!rm&>CR`?M~*4&XVWIJ9DyyHN?~_OVrOri>;lGs8GWG zZO`ZOer36>s&G&fbL&yT#719&BfT+SF2ei8n>l%+@nx#KGQPOV7Z^WHv0bg;T^0`z z7c;q~i8ghTTlNhC&h@_ZK(C9U(4Ug=>bj}~U`)ehr&5J~x)A_5Ut58KQwKkRy_mO| zYV!(|FeBQtbG&lVec=lzPE7p80I@v9Hlb9U=0>pVw^IbROiS?=Q9q3xxx5>Z9;Mk- z(YeX6d%;HXblhxNTbvcvuWjd`W&~A9J6H{P1xJVyHF?6L0{dRt+86B3oa$+3RPKRr zPb@xzFOKi01T(9IVQn!LR3ef_Fg@A=s>sU$U{(}JCNycxX-|jgT@<^Q_KwmQ=q?0R z`*)7Uc>AC$vEA&VcK9lLV}iEa={HuZMmaH1((MG8t}aJO6;7`&Re`DKXty28y@H|` zz7S$+ReRXMhhEO2lFdGs;FP z&8>5J_BoLHKMgq323@riijyCfNvWYgVnIQFvisld%5)g&!MlK|f)4n*Aw8jK07m@R zK$o)Y6^!+r;h8ZvPP&3Fymual7k<*r#>Mx!HXBO2TqiN9-CEhB3O*>BGro*>PHgHx zORRUG{`U8H_MaA*#uksr-agvIeSJ|Vxq8D$I3)F63uoL7L7(GhjRC8&LPW>gU_som8=+9;f{qzNen!eELP^^qkgqRm-8V)gDt_0Ihk?V-MygbT(BR_S0#wGoYXFTjsRU(mk6B-5Yek@?Ka6?r1-yU4$95u^{Feku2$KvJrY5L_AgQH*75ZZ ze?GFTO|z{3fVsLW+)Q8C6-c1nf=dK5rI)#4>lV6#s&=bv!=mg|?%*SD{_MD@D~x})__hp1aX8>~u`=W!nEAjEZU6H-o5uSkKrLYCKt zGHgMVR8^P0;A`;{kkfuCKM84%M?_0DoFd>dvsMeNieldv2FeDQf}XKnE_SKY4MX}- zKM*0;FnZWpN|dJqt3yHx(tB2y#q+SG`!)b!yK6xVp^mi|#zS#R>;*4kuux}Bs#UAV zg={>R+WcX$#s^+nxTAAkBnRNJvVq=3{$V7zBj3$mZ+af{Ai8D8r$zkxio|O%M_E@} z-&~*nyhCP5vo5;=RN>@7v;WJrV*uNyZXGobCHTbv_@XqeQQ4KF@bV__x(Fsp?Ti`~q8D@%+CB3O_BR8m&qe7Ue9XwLtAV;vh zk;ZHH4i<{;dx~(tH}?gZfl9r zk;**8+gvy}#DzznjOB9bq?v4j1d%zjH~Ut|o|OjLg}~bf$A%& zOJNG$((S5WQo{AUB15uK+fRmOGWca0ga#yod11BP!Gd4n84D0jK>dfBkt_L&Eb*&q+J2!By^cYgEI2mUOynGdM?2T*)NHE6>c|3mh&@*<*a= zDt3TJe|=vp%ldU$)!M6DPN*Yg4EeqEkry#0^_uIEz9~|8{Jv8-QmsfU;Ycc=Y*6{i+U@oq>2xYaFPF~5wtHFEk9rx;EBhqh5gVDj4 zJgK2-Tw(bueEdVu(Rt_lEO2(HxIi`8L~?SK9X35akx!10R<##W%TB!{o)~Sx)wH#$ zjVB>hN7b~xDPGAD4}wwExe>2S5v*}2<4SyEh<9&aUIn@IGX zi*&V0Xfa)1C+~`Dn!Kg%lKZQmGFK@occFkk1BLsqf_Jw{Pd7Wnt%Px{n*xQ z-sEeQ#1^xi4obpz0p^>(a_KImMjpQ1M(*F|#o^I*x>X^36=%zl-Oh?I_Xf%f9Dt3i zX2q5dr{Mw9`tL)(aC*<4{E5Y{n@?uhPS=3e8#JsZTN>+WDb5UN8Y&ucn z0L#HjHE);6go#yLr1le7%U3*B?Z&J|E*r4z5p! z=1MOlO08Q`xN*WRK!DO(0`P!it3|>zk?yS40kDn{h_(oTIG#&)WUB2oe{Jb6lx2}YU1q~1rEke(n!cNGDJBzq;v?7ckp)vB4uuU719+x zYZ?p+m|bJ9iWHFUE(EMXz}zU}=ZY9kd45eO35j_r4(D>OT;Sy8 zHC(3h>vtSJBcquUf9uCNC7F0YmW$cJj2*SQQM@gQ?F7g zP|#}qys2QUmA$?$`D!OrAojkj3YkeconFvOU8wjDy#c)@4MxtY^6Cg9dVLb(4CZeH z(720bzU5)Rh@B=yGWQ7oQuSA$Cf9j+3z!P-~mDG6Fnzs+hbKLsXBf zu0Cpk2c!3~f7q_5IZLntZp_Ow8oB`i-!ao2xxsz7$M{-KH+qa%IlfN(LOn?t`yC_X zez$Y^w;Z2fHq23q{NgS3C=%_?f+btFh$*jDtk2Mfp>03f009D4_k9X6_N((yvomX0 zsnoo`y4LiHSE)BZ;>S}2)PT--i!i)wXHTDduIC?o-8@B;>7?bN5eUWxacN(dHvqGRhcuE z6pP_i^y1v@ic!NjbeK?aDr7{n^vCle)|%$RviGR5^P6EtJqB^?8vWYc2JctB&-os2 z@+Ty{ZLRyuDuUxeM?Ecz5{u6 ztC~`AuPOA&w{U8fSfu1*V|gHHg=10a;&INx7apW_V--9z0>KfM&tnZ%4l%Ydxk(JI zr<7LMl32hy0Lq4cR|<}S)i4x4LJ13$CLBq-ZRCXAYXM~XzloPY^{*4^VIdH%^@?+C zavd+&cue1vBpL^)D@=X_v?T7FrxjY*>jxN%T9R#i19RRCC*4P zqnaK~Ago==(Cw$1p*&Bd01G~Iry>TYi8~KzrLW~uac_#ut>?*IJ4-LAdr)Vo>fr{9 zMeZovsWbed&WHBxuutGKgfEOfNY>9xd(PCbWsoi$;So6rNlXK`?VMF;qha0HFQ}Ol zJ*S|*Eqf5%S+_o)Bh2FmCcPjfW#VZJF9!*_kCfg+T93&nG(7k?)yU_t z$nz_g2cl4?Y@i&vuyFXW%H%2}<2+@Dw*n##gFUI}W<3CBoK-TE;jGS=H%!{}w^oN8 zT;~&dp#D-H-S>PkQRz2;Y(RC2HH?vb+ELi?t8JefT`u42%R6qvJ3(FOhU8BylI+t}3s#eeHXHD8{Xn!Eh6d0~ zr9kXftO_;OJ zf_uI)XwoOVY^=PFK=6W4HmAkcXI$@rff|H#pGsBj{pe6LJWOQU{?`BXtd^4NrT9L$ zOV^r@V%f*zHGaf&P!=!eL1GFG{OIvthF?Buy2%O<-?R}*+ZfH{_e)E>Tm{{k!E^P@oS+s z&Cfd2X&wqXCL|z`Mzqp?)zWfr6UK0<_n>mo6Aj!ZhoSh-TOvN)(Qx|RW{Rm{fKVMR zU}wHYves|iC*twTy0L@~aW`{66pdt4nkGae860X-MD^(a|V6 zsU4^`;~NW2h=Hw~R=H%YHG*f-Gfe>s@#mp2>@%%z)V%D^O{^Sy6Rxf~!hRAwN^=Fg z3mEFA0>NaB)2Qa<5bk-(6_7C1=%xe&fNkwX?DNFakA=Vt8gi7xWOgGslY(;ry7VZ6 z6w!lf^nEjRKJ~kYJ<}Fcoz#axeewR}exu<6%#x5(OacLpQIXUQ3RgYv#B?FQlZ1ft zXRb5P>_dF zLo2^T;}~e0CaSCgEW~R5Fj=Fj%&c$n0C&!SRf54A_cen={lVaM9Xm6|rDrafJmi@@ z)DU<62NFgW)JZOw#Cw25{sr0lsYqgQbFcpN#QR|1LTSJ~E#4n~Dy8Fn2tbW=+fnJnV`GC!G)#3z4m;Ck?o7Cxt#GJM zNCwYdf~iBvmaT9ys2OY-J$E|D>G$Lmq_VK3YZZqWYiU5J@Wd4MsPnkq$h5{VWSU7d zgUN^*uKu;WF_8@{2_ZLW6cqDbv&~Kd%%u=%%rf=d7A+) zMmT{&mWHs}5R;fZZ!5;P$vrc>&Xkm`JxaT`*I*HjTs1hl6!B2EhOOc&m`gJu1b&b7 z2q_zi&>nD9;G||S%_vGn>a*pQ_waJ}w7;EHJ(le@0Jp_-xy0|}@pF6M|Cu-+X=p+eZjflVMg+ z*z)7<;O~GZQ+`+7>-jYRUOvNY)@Zp-|eDf;^bDWpqotSQLXMK zGZzoouffhY!+SLrzE=4%ZYUy>Z^jBspooLNQSmJ!`<#)>wduA=9hG!o*LMOb>jZF> z4%dK&gKSi=d!5?NXT%Jtj5kv6(C|Q$io2-O6SXC_kVJL0RhiYlmN)0O3Vz%Cwsn}O z=hGsm@(R7h+gyRP=1380j1lWJ6`OiGn!^R~_A}jEnCdWD2(|;z=|J+~WXOeN{s8&RW*w>WjrXBtc?i=|Kdh zM6Dpn<+?ToBm#9sg1+Uxb~Er~y15+#;Pu%Ij`RRGG>b^Q5>MUv_pYbsjW-}t??q7b z-eFV`JHOru3Br3`Z+F`}A4F#1M3$90!Q_Iw$JBG^&jkPF^ukM%&nLS?=-KYrJR1Uc zS)rYm(a2GC;#0N7QXwq<;p>S)?R>z6R8G?fM|IPQ6jLuQ%&K4q2;eXn7n>-r{CYWB zVO?Of@LducFjXUa!foye+Je(ZDChmIX*A0b$Iv+BDpN!i+k-)USzfN0btiY&_!|}r z2UvYa0k>QY^PN}~p%MXCDP?1cViVchLYfiN8b$W3D0Y=?2MGPd_I>3@1$*ywRxZES zr!`#o$OCOW6S2QCbz*~i>ii>r`5-}27zdbYG2^y#Sh@W zfPqDhx{9k41g48%001J)0Dp0A|AJTlC(N4G*xt_B-o~EJ)6mxDpP)B-uxx#?zwN(4 zuCKLioi{iTzGv$0-Zmt;oktVMu2ZvTQb4kdv4Wy4nLDzvscb-(#?z=Lnzw%Y_dQd$ zGVcA7rAt15r=+gG4aCu^B8>Ox*9j1jzQ-V45z1#L*C;Z(j%s(4X3W@@Y{3cDvVxo1 zlqgb@Riwx=pbCCBdmTi}>v#;jL)Tb}?SE&hV zSqNu|0>Mg_j^~Z*Zq6Kk6)89=HHWdnQzC*sCiwhfZ`$VPHSBe(n~SGmr^l-52qAx-q9v|^z98%w2BRTqmi8EC1}(-TCVdd~ z0**|3@;h4WabyxuzoG9BjXKcQ;jpGQ1@#{883gMtcuZX&1&uh4RrdgsnyI?cL?kH_ zwf+rzejT06NM%_0@R&?T?2sa%8`5{jQ^ndSM?X>_JuoIcb6eW}(@S4+6@BD6E<%(+ zGDG$(8(K_zY4lmxGp_(tcGA%7IS~`tWFKL1O8eq#p+G{aV}ubr69zc^G^g^#jF?2* zd)KJ(9VZyapPViMHA9IJ8hoI34zt=}{o{o*X~)4#_%L?tSTmNC!D}fIo$;O$mwTph zEmYow2f?JZsm{S}xePd<`mnS6q!mHmKELvH521uAR%|SO4hlMUAF0@c)s~c83*rtdjiJlLt7Gomv0xN`iIjId5Y=`!=yc)2c2+Y&A`4Vwic z7_P%6x4y2<7g%#SJneCepbFmM^pSgyQ30nFsaYxBHOrxup<2fcZoByD8;dP`7bzKl z$3vRUv%&gRdwBZMKG-S6%($y%wUIO@j(#*cHn3mYffjLIW^Kq>jcU`y1R}q~+eMH>xHF1EF|H*b(}3#+EXu?BKSx|&HUl1n9`W9q5}>^ z#pPi;sa#_t>N%k5Re`x$Q96bBuvG#NQRTElk5BF)T(33ko~|@*+>JXN(`=pg3mKMh znZE=xD8DftJ`ZRcTl=%G2`n!2tZ!4GEll!Omr3*LW)g`rhheJi{S+uWL)$~Oh&BLK1s z`sWnDi&NlL0*6H2q}76NXyZ4q8c!B0rKkG$<7>~i7eRfse7_YG%KM{wN&Ek)u907k zuN%U47!9!rRgr?9Ep9CP(~%igBTZ2vF}LuF?nv;Pc{OxteT|k3Kc13khTLZ8Q>Mn+xX0)8ytmnI5SP(UB8N9p4@#N;E& znsre7dvtHK=I$Z%Q`qx9>3Q7P4Zp)63s~7Qk_kcFzQ)MM50>9dnjtI$W zlXZH&$vUe~dokx^y{#KZYls`q>b1w4zxi^|UH7dPde?Qoi;m~@s?))AHFVb&9WFvI zsUo<${d$MYeu#GpO>n3}5EygHzV{v3DH4$=ki0=5dPEv&XWFmY8oa=ba?%A>TcE{)%#4R;B-b|a2)6QkVRqb~n! zewHo~E=o97j%TkcZh0;ZIu|e@*|11xzykEBt_ZqDt6r^T1PQgE4bH3*J=Dpj6=*s3 zvWP3U-=ojLn-}jL;_TQ;`i-pZ6BTgbOw}|6mTbw%zc<3;8P=8u&+F`HFJ=*AiV58* zS;l3czjS=Y)}uj0nFi&-rpAn-+w9Sva^_WPPq%k|tOl-B?t3E9xgEe`T%YH>OehsW z=m<)EqH*)vE%K+8UI|ISodVJ!!o}M=!`JK`5nGcy=E>9=3O0W{_|6|@dk(XmYiA<` z$I|e*xH|FqqG?*yTMcqVzs%dQDzxCSW#Q=iD{nuH{Eweg9Al*`bJhIW*fbU_w#Hf{F?qPz&!hI8O{&NvG@VKc%JK!?2pLs39A6UqP1PaP8b+Uw zkR@^y^QGfZz?9=SEtD%cH=w1;cxIYBX>(sp#U2#M5)J z;pW=49aT?zaiI#;xSO64@KpYYulEZ6p13)0!Euq;7ahYP&CvkC>V!x;?%fT;zDk0K zsBALvEMt;Yo)YTQcrcNIFcy!%YaAjasV*V|krfnEg9Vbyj@UR!M6zsy zN_3a1F^%bAHF1~`gQ@4mC~sVf2?+j>QQePIC_&~E^;)i5EEg>%AB2Jvm`;Pwh3H6= z7DgHJw{iyXwsq;U19pUbym2?QY`d8|aDg*C_e3orLS`d*!ZX359A^=?0~C7}^nkIw z4mxTvTepr;L`0?x=BFJp{2JX)Mo&+Iajb!0({Ip2<|o6GRSJmo8ze;$5ItRD4IFD) zK)BLcBXMYWgbh{9y%?>;?u#vKqdjx|WSDG~vp(S^#=gcg6M zd}##&0xOA3SxjyXlyg6Vzhg-L0w8AdHe8*3c2)gVJ=ndi__QnBJRwI65vezn*Z20w zCSJ3R5Uo0D7P?xm&iBvMeLT^fkKlKqNRkroWqvP+0efx*6bu-G3sTVIV>UgY{A!tQ zokBt&C<5&_N&-B#u*wyK@qKr_QZV;;hIK-m{=o(}35}ZH#nJuwXF%mJXRl3$d+7NRhQ!zA{ol4g5AL?lJ1O(+m0!E+%3_ZB3n(e3% zNNruKUlUOT=Kb{b@h3!t&R~QY4y^D_k>6l9N;Fd>tJFk{f*SU%JpzGQtU@!^iZrE2Ll_{UFb9Va9t9Fs*}O0VW@aaX15srp0!+7l zZDGbg##jMFI!yUfc;yieOrDjd@l&1b*u4N=mplg{D>;6InNT>tQO`BfkfIS0u?coO4WkGM9d^*BQ=Z({MmsA z=ZCSc%@-fqUwj-|K0GYjKP%P$-9(deSCwX!U6rVGy3k#*fv(C__lhK zU|JRVGgR%}ZpdBQDTjoB*+5fQSMyE>ArVhB2UIe&1^};Mux=sLqk|Ca`%OCe@uIVv z?))YXJpPHp4_E|z_bd+%(>bB+F?i~-;Z&>XH2PRbl6UC(;L9=x^NE?Jg8^F^Va}db zxkDTPD>?P9@GRgCST@ao?iG$cYYTdFIj|)P=!mxt%SYhH3cuSf(6nL7N%Bs~X$g|A zm8lN4{ZC7#1r%xd@M6qUyfATi3iO~ToD3IuP(;Y zPYBpjqoe7>QUf_8ZS%N=Nj7d9k=?3|GQIz&Xv@m@m9hyzw&T2=J+Yy}3qEVcX3ux_ z#v+;FcdBu%Qtuiue#*6~G9{WpZn*;5GYAi~x$0!J(^u2aQ4LXnCd?wK(7>_-0h+4D zLR2KNK48_Oe5kz&zaX!;aWbpMjrKB9sNNRWT^U zTgQjbH)+3S9%bRQ1YyH`JzuCvH7VzaKR+pl+1eHvpQEJe^2TprV~A=V48HA`@$KFY zG|ESEG{8yc+Jd_^^2&X+qeIr5>&+vCCe1kU*#8{L`3j`YU^XgWY>Lb7aY*%rw%Y=t$sN>~U57P8>| z>FdXB;Trsg^TRc$o)xPjI&bB67%8|XB<9(x@RrqWq*Frt=ZV*CBkjd zHWie3ecs}fRc_>gn>l#miSWgu-j~Jt@np2#)!M(B?5?Ar5vpvCcZI_vA8Vy|Pis1EHmV}{qR?Ll|WfNkEZ z0m4&El(lf|wqHzKJG^^~zlC*{>?(HPh`${-q3!!VrSHJLd%oHYY_rEUiy>3NF{G!Q6JicaZ2dn|-D>I=P zK^hMZi>ze^E%itQF_3O;qnWB*$TZP3y8fhk$sUv*x4Zxv(KKHUb5uJZ4 zesUA87lRYgZ~fYz)uhfUwel~%-O}WXG2TeRvP}u9f(p?Z(M0h|abS>09?d#WXc?kH zE11ZVD2kdI##jkuCgUl5ts9+KDoh{mlCt|$T5llF@GiGI z{RN8OI`05-tE4OJ_V=Fv)t75`xsBF75wKHsrAy5?dTY?gW6Gkl0}dC5n{x^J_Nl~GRs*%~@NX;b@LCG(W!n?(QYAT{53tuW zHT@BPig7uXzn$^&+-_xtl+P%7;A22@*%x7L0<2L?Y`!(~-^Od@PiL&$!4Q-umoc=a@&M}-&U)dA# z%rvmK<;P2E)54SPdDqYY|7LD{XP^IcQA@W>6l5`Lbr3X4D+(GpnG^V=gDse_ngctywo4f>t%2E}E{|J~j5W|<`LbDop zg~WwyqU1AH)C&Mk)SGO!PqEphUpNt!yU=Ag@tG4L03k)nKzj6d8M&B zW}}pCsMz#7AO!k|eE>=S;RuXiUe8oFK9|F!%O@&JOO3fkS_TM4H#d(M&m=yU24BgQ01(C z10(JBI%P``;q`nVo!JfB^@g8FosQc-}vAgw)qEv6jscQg%I zn(FD@jFHCk7cGT0%SR8N)aqEPgy2NYpi7~hixWiPb&mro)#CDj5xj=GO6>JikArrm zt7f|`@y!!1zTrP&KB!!A1BA&l>1ULIR+*U+zWJy zv=eo0^qUTV!%LQ@rJKPm&!>VtW63FwP)9~0|2$K=Het1~4?P(M$3uE^1R$et5bMY` zgVNsiim2`e;`!+?(lU18T>bHLg`RXTUi`3u%Xtcd0bs1O%~O$%7rg#<+~*&qkUvc1;wN2f2&$Hdv{t5^QEfH$-@9M@P7eW!GLcM23w`4!?V=dx^U zis}m4(no7W6xPQQzzFce(8BcqrYqAAeY-)sh5P_`g&t|Rd5P$2pU%Nu(Aojgrm|m? zo9nMgEwB9EAnF10CVTgmcfJiIOWXhUgfmE8TTG#y;sJiXy<33|1G0(wFn7OFWzobO zBY6U8T^bhk4?MP_e?-sT)0=`fsEY~oV)%$M#KiMGV@s_C?7N$ByyDB6WKcuk#9MC@ z@V6A`0bG4qZma^y^#>Eqkm5Rw6_t`*iPT{@fI1^6w_Ge-iWsqU+9^h;;Gp$eru)Vu zko)LIek(>-Cx>3Qvo~gn+u4JyP8rR%Df|I-YbrV(7r})iA(=H2==nDi}>?7#j(T4&Fz263*_c2KXbq zmuHLkmO8g@+pq!jyB^%()i6R)C=ozjToOYxEh*7bkxkE0SG%(=K<%s7F@-B+^uq0TYEHrT=gc zT4x_W&1~l*eSJKQbT@=QdP08@=n?W00Eozr06rh8$D=di!HcnWyraI?` z84MNZjjks?@2v6v$g?B|`eu^Nyaq;sDPgSq>Xgj0^Wpu#kHOP%SVN1hnee0(TM$ zo^z72pe=1J89n9&1TwBO602{W?G>>Y5W&iA*WV25rjkSUwO6=mlS%FI%ktb@2z+%= z&@!|M#H2)`ajN-h&yWYfH0^ExXVp3LdrnM$|8fSUx&b2dKM4Pbdz$%1OULaYr8IPu z$CDzRI1y<`Rs?2ntl6kJxe^v}D4m1xxCB_c9 zEMw&1J)wb$N0MA;4u{VIEq!m%gC4}rj()5t#Fk7yK;TC&5-ci^yR!%#qS+5OdduSi zC5y1Z=zNGfdnxze?ZjuSi-ZI_00{H0)xEz=M35N~<;Bu&I^OIF%tkvP zr6d87N;r4e3!fwv;>;TGF6Pz7y{zXcFn<=#kS<%$aEr_m8}{a0nqYlRrb=L}9qoIx zM9eWdmY_MXBsOA+J!68$cpSC7)~K(`vRn?jDh}}PQa{2H55+O)sdK`=ULanM5+1AV zs;Z~pW>=WER&Pe9%#9pQSXCXsX^$bS3WRUuqMZ%<^`Y8EPe>r?A{W7a%jPmb%A5e? zoJZJqM6xYOU+>ZFhmU9)fnn6wo}$cX(jqX)D-&f%x?bklPZv(w6*{K~dD)l=Qr%$F zs$(uZ=Fev2P+r5R%N@6A@?A2f4RJ>vm01?Dly?~gvBzIMN9WgTl2=KZNwuyvwH$7! zpD^-x|4_-k-rYkHb z`d`=TrV(^pvR;ztF)39yI!#_zc}sd+?x{3O^(g%bHEshEl7px3><`RBMd|H?KIlSH zC7x6(GRjxnKbFnRjpoCX%*k?o)KV)n*Vp+aTzhzN>o6nJiJsHs>`)2rdDWTI%sSBP zRQj~)%A^-FqHdYIMaqUGnsoKZnk3U8h4bQGlUEbI;v1Z)3T%*sP#OEZzTT@=Vi5<# zH)d^GA3ipJA8uNDEML*E)Ok+-1PQxgBYg#zY7PoJEe$y%sb6^V;GwRmRApMES7F_Q zkNPR+kQ6qP_l-%vlWcbW`CT7Ph7;tgNeXVA7TRYg0OHppsO3~D96 z#JuLHe^mH8DrP=Jfbh6`aqRkV!pp0@O#1`q1b49;(kdCxvbh3RP$qB)Tu3s{<=B&G zJ>*^1IYp&qgCbyS-5i(nakEgRETe+r3~;(|KvIXbMKG78`am6a+$izk7*(pc0D(a~ zH$^k%qkpN1FusvlhkK8z1zbmDmk^+iFC>L*D3uK~%1sfp+jvl`#fAlSZM3SWw z=j`m&iNkLG6ML}AO1ZsLT2Vsr5Kg80=B44ZCZnFQ;zO&1aKob2yan zn2dg|o;D^LlvHSsTr7uhNPx4s85MD=gsn%wwppbkJ zB>#m=5?1_{QE;TsvQiVt=?YrI`^zbQ3L7r{d7$?(d5}laBN9oVh_|yk@GL zu7mVSo}LdGNL6%~mwNfQ`AQ=h0~UR8R$rOUjf6N<&k^+a4QCyO-s?{(^TW^UsKe|J zAdQGUMN0jjG@D?bBpQTLxkaO21JhGCLuPEqM~?YNs>|#Mr>zf4@*R6{UN7@?D_6xk ztXrMv7oWW$`8%w>-{Bu!E=jZ5hItVNMeQS)BPp4pzV~N+;I#NY=iMUSvWFboG<7z8 zQp;P~sC=(Bz1`E3Xm?h$K;)Gm{d`oV)%{tS_SJ<&?>+8r5v>-r zbSwCG1|hXhZMAL5LbQ&>lS1(B=7D*l)o-B|pLWZf=b}k^I6^j+d{6jge8h z8$t06Sl<19+elQQ_lv%P>3pQD2nWW}2%cUHNI> z7N8ognNMaJCQoRX1juGp8qfv4mw4Vl*0^ciQ8ngldOkgoi#(8rKcB3?VW+_79P+f0 zl`2r9mLLt>mbx?A{EMtuCk;JwumpUPlx-yitM9!Zya!>lxvJ>*j{}!I+hCjyBstK~ zUZ6A5lxWn#<88#u+ej*#0`aSYt49^AGnd&PMCJWhRgLm6&uSC2G6=9kcli8T8*g34 zsU-1zqEH1PFm7o5qoQlXMNbWh;T&C*YQu2~klNPL7N}XeL_anH7v+E9sb?Fq0(tE# zMVX*DmhnDf$w8arcxSqq<}yao+x$o&bbH{8OTwKVikN8{E_27(`V@ccZ=wBj4sFFA zf{4CySolRO{55y;4_t+vk-^_$u&8)m*)Q&9;L$5wkp%su4AEd1%)m%5_jRDFNd_PTxrNrURu z=7YW9G$fHwx}0ZhaImZx8A>#R$!CBuc_gAe)pA3@G#MG>qHVP}VN>=`BhlM~pexOE z1RlhVkhEaj0sQV~FVhHfnWE&IRL%D*r8>to(vMXD*l7?xP) zLt}-oR#11~^EP7Wipqqu+4R!h*pg7m1@gmQc_mGHjTCV|HsnfEx8b;0B;X^#;k>?i zjb5}9QB0JQ0M7K21KKeZ!%?O5wBQZ`o}@$j&<2QGV}|CoHs*wISRY8DnkR*o{CEQ2 zUtB81g9{j`6{EXKsImv0wP3Gh8LCd;5kKNcKcz>_zNjc5qNA_?D(aDtqoJYcC4E~F8B!7}Q9@_R z1w886&P&^47>bzyQz}0~_|fS1jGriVaE|Lr3C-iREMjj$h6Hj|L!8VO`0INy3mZ_y zHjq>Pc;b$TxkFl0Pv7OJVoVpB3>Ru=Y^bw7OM+BI%z)R~4`fEV+X;sO zr~GQcmSRkVWP7^Krk+DLin}oCZCheDp}PumYU%P*wS`{-%1hJf{lB`o+skXsjFsWl zvLFkic0Xr*Z2EH4`g%UPI+?@A#-x!|>|9;pJVc&VewFef6{$f*{*2tK$gg&=9k(q@ z!lnS8N_^e*r3qm|KrPCFFy`Fzm-TsU&1n~L|?9>ls&#)KHOak^`6uU`tGduIN zDX9`HoRAD-r6ptU_kwt)bv&>n{?_*u39Som$Y@;xPL9pSCI_HPVqwCoxiT{^^M={f>s=ek4qJm zu0@Q(33C|b%#P-b5Sv8-ikn>zw}&{`ce1&eg>}hYHLkv4E?BS>HaTcronyBihxx%x zR}$XFLxUUGgg-w8bzsL?C$+nCP(1li*@sc4nq!j}>zIDqQ`FD0qT;}+MT;q%@N)vD z5|}_n-BxD7$0q9zAo83q2wbB;3m^N{*sZE-y7)mSOARj+8>yIzbUt+z+`~2pGbBv3 zk}(V_gwZ$kv`a^OrncJx63q^p=eO?_j9r60sPAdF@x1Gi|FC@ept;PdiKzMA5L^MT zWVe_j_`yamoPgU6x1rgXqjR!lPscM*zxWic+q2Kd_Y8J@=C`J=*^5_YyBM#=S`IIk zXJ_@s*h5WDIUo1g7z>=G_qFppPSLG}pk=G!<1DuI3{+jA6}720zjsN5*d7yW@W{zZ zu>$w2Ny(mSxRy|WebnBkvkHdeJ~>MIw(g(*yHxZ8oR`0J2bZt^ufMhgIoMhm|Lq0o z98+ua_1F+{^#fKCeF^c@#aYkZdWISIB=|8 z9^fL1fJ3cYfX(P}#=Ds;@t_E2&G~d61!EiQ2ll1^4?G_cF*=Jr(2x(Ff{V z@KTq8f~$oaEdufKWx@A-)*}X6OHXd64HgD*nzP@*5Z8!hb&$~4ProJ$UaZ2z+b;bx>AFHP>azHy9FTo&Tm5ToP4{a>t(~!hBaM^Ye>>3s7j11oW6O4p1;J-Z zmmdOK53MmmzlRQFI4}V@aAqDhzN*i!OAZmcK3^=UAj3oZ^Km2Y#d@R;yLe2}>iWX% z@p^9~?xxwcS~s3qb}~#qnW|OKC7~{Yh%s4&(nUt6>*M47?7LWI=X1IO22#6$&}`7R zMyKq2lm!=$NMj)a3B^TD~N?ih%7s3)XIS67W{m>y?_p#^?a%n`2GSaOxdYJ$-FeZ`F%4X360Uh zYquxU^>|SlkZ~D3hbtr!`^lrt2ZIoX*KL)Ndwtg-tD?dP`~?c*^o~CEDq2X{-+_;v zTkxccY_$i0-5j!H z9?K4?1i@1pWRMKiD4yC-V@Y&H4hdMed}Pij*OCDl^WC2}B`k${XlzC&Y;@l9l9G@) zNx)jJm>V9c=((oB=_ogyMgqCX$uZW`8g`l7s$sDLTD0#jT@>a7u36#|(6YCfXTX{l zUimDlWgP>XC=~~Z%gXS|Vt~Oh1=5A@$~!!;7oLJg_UazQdi zstDTt@mC5X$aAQkts(R7BPs*gw&X|bG_p#6)p~v&=)GWC5$cH`Z(d=ra6PfkA{4z6 zsotD2yDIjzlB0PF(G%ops`LO{BibHC4u^uKi3_dB_e3ARSiKfd3%b%P#=u!i6$&JJ zRJN%vzf3b$8@3+%4nrQ8mYWx)fvujbNQ=FPKE@K-Mr#M??4}c4m$SVVnR^a~lI>XO z^2~(WK;|nXW{vW(_a}tv+AjH={yc z{zh?}zGEbiUFJFbW;J<9lBdn&fp>KIYDKCGRA7-a$;L$v4Szcjq(IP#8Y00*(?EOa z0{x31cl|8}{_r{d=s~;ReFyoGCoK%K)m^&mY5QOwpLGrZCBVjqVI3B{Tk@42GrkU4 zZ3^O0(j>XlVmu1ey=*x6xhX;GFuEW*d>Fu?tEx1I5TGfE5!md|>8d!u4x7(VO=zve zf|ImAI4xbHr;%R|SL>|@nM}?6w(w?<&PCE{DO!t?Lu!(X8`e9NTOX-^he~_QiU;Lg z6Qd}JNKM&Jz~cGz?FVXSIv|!BxRf_|l`AwCle`>02!^U-i6r?(e+E04o57R z6OBA5kkP)<$RxJjE>cKtBY{m8p9Xm(c1+235^qdoz!Nvy@(MfL#iLLn!@NWA34?6f zisVtcN7KBb=}Zn)R8!jF_|qjt%Z(Ywv(q|Ez;Q74yBKm>KQa7v=6bbX&DEVF4&Z=;Yf46qWYM|PH^S7=Brzoj7sUgHXY3GoyKyJE>S2-N~Z+Wa;>J!3)A zpXeOSpnh`IDl8J%^9zzqRBGwG{!BjOPnlU5mN4spEX>zV)7O=`d4KglwOnlSFn>18 zBoOv9YI8O$Oc)d4FGZi|EIU2ZUgKB%?k1T90}eC8C|J)9A7T+?q@v3_$oclh;Hgf9 zsHJ2RXdFtrVH_bbsFJE1@3F)WI;~R zHD2XWjp*Y{%@)rmZq3s4!ZAWn1!Lm3Z)J^3^UyhUfiW5TR`RU8HaAe)YcpD4dm(Q0 z2{Ne3v_exl?vy*{#`cj5oGT2W^^#&n;M!ZOxp=~BP-hx0KQ{MUH4=Fx^S-b|IM8)u^+w~bg9DqN(?zfD$gx|o$+D3Ck z@YCD6;3lTFNABJb4ZjFqpXf=I)VN9(-Z&Ay&T~I=tb4G}M@LVbt1SAX*|I2G_|yGE z-#`Ldo3{}()iB3{H31C}K}`hcg)ScQuHW6vEn`_KfIjaE=tfRkispFC6$B^qQTX{8 z9@f=$_!C#$1$I9+W)CFe8e?luZiMx6zUOZp1TkH3W1R-um5dW~>-9 z2!U-ge;8wu#o>-2ADDnwI%AcGRVOx~ecO7*NI!d%9LXg|5QbUhd>KM4c+h82^xqpv zaBc=rcjC$AS9a(M!|(;vLuwk+q3231#?TX~l0V#qGJ*&*YATOn+mT9V*AUKLf^L<9 zWwH3xd0_?pg6lDGO@+l}NCu0&`7{*;WAScsK^y=Ip~OCPCZ=h@@knbL{Gq-%4d|z|5bcO75^@>383? z2XOxy`cB4-W%I(IhtKderEv&IWlc@MK>co{b!2k0r`NN-)5BV&0CffX@pMW$y@}pj zBxDIZqux}xN9@aWP#EmZTEe$ZIskn|n8r_81Gz6epHhMN4bI%bFuSIp7ptJ)fQJFk z+?f=HdpFNfJz8*24jI;J3kGhvJYe8_VZM3ElJ>5@N@&lkFu$sE$#*zgJ)yfTKv_9t zp4eqG68_i0f=!O3OIsizd^R6C8UCABK&4jSBNQiD$gwOb~5e z3qNrnd>A#>)e~kM=sKR?e-io2L6LNOs{g>yG7liWie!Fphcpqg6i4xRB{Zi}C(40F3WFfm3fYRk6kz63bK4$kydsoAO}i z!`p_v7((zfOUAni!=t;{b*m`w=Ybd7_ZS=Mnoc#DqCk%J)>#wn{Pr0Gqo|bnXETEd z+~jWQ^MTWMH`^wJRZjD+b|pr4P_#49JmeD z6j?kRT?yLK#tWa*NtqSnZwK9^b(~t;kqR>rdLXYq8Jd|+FcEY0M~~;ab+tmgY7!(Z9uw+$;|REtGnWe%g0>a<&i9jcYUDCer6kL< zXL`co5~Ub3HHg0Mhi9-pFxWw3YiX?GL;m90XRsJfu!LgQ0wANb&k7J^v&?zFyxVmG zxp`+f(C|T-QN9^>8|-mn=P0G)8bsuBTLq|$L6~2aZHcs|oBQ3+3%EYm=o5T|-XybH z?Ko&UPikH^v%2Gt^D)2-;$4JfKGu?)tEI)jV$6niMIz?t`~gI-Un>zsb}^j-8eB7* zWAEyhK6uFy4!B}epJh>;8LCs$PWnRAm*u!$%lMl&nmI38`{hX_a8`^)%W2 zdYVxF%Rj6COmdRvw*30?&c>zck9d z^LnTH!s6oM$CoakcK_h}Rp*c#tH3R=IWnmzV~KDsmS`zR3&Q;MrD*o zN^^Gsf(OaSp@6;MCw=3v$0I}6XTp3i*6kqephm#1$&HpUy?GBEw?zu* zlt0NS3@WhFKubL3zB|Dd>Z1x36Y7(&;XuQ<2-2%h5rbS-d}v@Vq8MwTd5#qYvD)fb zg-Z|Tx!a6KJIIf%>kZZc=v9<{nZA9Eo1z>ylr+RV)c6oTY^w_qYo!KIKQ4S&t9|A1{=YJ3vK0l^D=!yVsHHk-)WH9PvTHeA9DaHAEoZw0atLu z7h|Nj7=yORI*X(H{7=dfyF@k3?H57X|CIu=f7vSkzMSM7+xZ7u8sy3gs_!HQR!%l8 zZh2I-Vjcz;G@OVx#}7bysWU30ki+{EZ`0Lvzs?B(Y#G$W{sH7X^(ocMw&sq%1YH#( zmK0(lFJqLXlZ{ z>H^1Iv?%fd_kw+s50u~mxw!12>wqRoBGuZ2g#SU!%ebmXBUNqNdk>J#)G^(YLP{V3 z@>uygLroYJX99`B$SR_Qz;4))cxaGgX0qVQ?nAOpdm1N zm6KlrXy!Y{pY@K?ieoQtK!0YPJs=N6FTR3`_rKQkAFQ*1xs8#zjp^T#&q)gHHbC?U zXYVMKFO1=d1Z~kwPUL)fTlJ4W0&Tr-0twJQxKH__mj&ZgfkWJrVa*W{&W?4WrOlHYLhUi+yyz1_rDgAcC7v9Z=r zS$gNnzdib!3$85Zx?1t8JGAv8r1)e` zE2D`u6k$8-^n79SoE@*UIIx{`S653CQoxNeRhXeS3WAIG zrmC2QD^G5L|A^mTL&3kEGx&mQHb(kZwl>E90U8|EOom_kN(svsBK&Jhz&{-OZ_O1b zNn5V*{Q(4F@#*&8XAar2)2?XuHcs}J zWhss&Uy(r@%kb{3n`F@JN8V(S_Jm5eZK-eN63H6F*JK5f)xa@7Rmtu}RPW?%`usd1 z#v=Lv|8qlIMu&Wie$D9Hu>$~*|7An}L**D5+fh6I8>3tsnv#xdY=~V`)q1t@n6G%` zBa7 zarqYgY)3of9I~Ro7v{~sL;mAkWoK3w(chey?^gt!D9^6hTi{+l*)MjN5hO92AKjQh zDr8xQPT{3g{8`36_bnO-c_`Be_2A6R`%@v2`OBj7XqH{<^j63v9J~jo*x*bYUJ|OQ zgQ%j6)YNV+%q)Qt)GDP$ziT#(tft;h`lrKcHB{zKTT-<_R_)KDGY zlG-PKDk8~?w=*-4fkg;n$u#K^!KNn7C1pgekIU^2A)ZB@Qdxq5Dxw!n`G@j4H^(sn z_5tBRecHGbrv5?)&HE^rPusiFj+Z3INODZM;$t6e6fAKB3faqm&}F&YX1cX^jph#5 zpJx7*m37F5N^Atte2pC10+_omB90DreaNHOyBx;P>b$1scVI6tw2}{DZ|k{nK{Cs= zpg1Y9lj$A2M+OPno8gBH)qiA`@@_1<575lqVk7WoIpF!~afLIzOVIs0o+w|5s>-Bznrv}; z&4H>7092@IWGUwP`~Y*|#%2|xaenz9`+R{$F-p>a8(}S0GD4>ybt4jI<#TMlNd^Qz@H&qJ$J(2W|8zgYp@=D>_?P zx`>OX|7e%*Z93m#CgZE$lAw&;$mXl?vjClSd8*z(NOcB|{cI;Y=vO9g2(3}UkJc2Y zA8dHUJ^8tcrV%C7=J=apDNny0FC0U>G&HTP3Ue|H^;dfjmKa@V+qY!J+iou8usVo0 zna6?I#X)d)LrocqJh68LveMtgiiTudTvyJ}j;nDvsD!KUy&+3pwYY|n!W}H&*E)}u zVOFhbG5B9!t#cM|F}xd_1U12pX7gcr9e-Z9ncF`+vFG6I?w;o&C6rg9W)eXT2bKY) zc8$WbYE5TB9R9Y81<{0wn* z?5N_kwlF@Tv>d2;bN|&&F$srAX6ZvZf0-xdt$Vw+cHnz|NQVDbf-O^3ZR_iqv9V6g z%W5Nce~qp{=o*@4*~A>NaH65@r9)-P8ne`#5(8iV{CvJ^@!Yz=y9`&g|2y3Zhfez& z$;&tF0qJVggxk|Jq9*VB6`cC>5@na|xuUw^N{59bLH>pp>z`s~V(#o^VFr9rD4qRsVj z{NN8*2CRDjWdz0^k&TN`DSz%rNZEpIdwBT9Z(%yTjmA2-(=nsozJHoq#*Yi2&Dqh> z?G=bHMF($ajwJ=rs7!Ewr|VS~^bDsGoe9s|?fF^H z9Rm|+ie)%>L8gwuNf(_jC7-rk{hIu&)0KN3Hz&h_Od4|SMK}sP6AvZFHh{nC&mexr zI1r}18M4wgn2QWu$07io$RG>8!yhC*x+Xs##sMSAO(wnK0BwAd9OMHW%cC5sk&h=u zegRY{+t>iPA5LbRA+*FGkzjsT9tTsWQ>&AQ9{zwTsCsl)cQ>8~yB7!;;{6?`CAJoy z7iu>G>u!ID=m#rrRvWl`aMPHMO|CxnDH^5}{Lpl)hkHnfbA{zNN~(yZ2k6msL1&pO z){OJ3?s08WQKN;H23PYw>}oULhsDke-p*N`l+k#MwYjBF7BIbQ&rRt=XSCNYw>}5J z`>Zz5GX?IxKuG3j!wXzu3Q)lvs1hK3u}5i-1kcauo)jXl1}L_1I1%CK_)dAcvP~U7 zUaKL)Oh-P0M?8#F^DdL&duDKxCCpF=M@+Yhl@f%47nlWd|gpn zUk}#Rx1^}S{ASawE@)h&=YU885 zr0K4RGu!l$C#x}HX}#}sruTHdo3o{?BADcQrfjs^%gxth$;4gkJ$#J%XoUD{&MW>c zo_!KKL5y0kvcu^MI-#@&j*TEud)_`9PXqn;zhdVJnb{GW{XG|%Q?Yt7-WQ>pWo zD#obeT_VnMNsA+8aAh?d=?nZQ+xjlfLhp*YUUIXuS)z44LpUCtOC1y)`8rpl6Q`>WYl`o}m34 z7()0GA8Eovtj{Nf+*seYq=B!yY#=LLHIdI09`T{_Y!J&1xV8+Nl(<1;VU%ebOfCnj zu^*{{b>ltXTnG-JpBTJftK~lMHl5K*-VsG7_Oc&a4lV(p;Bn`DfOpm0PHb-^D9YaZ z$v-<*;WMr3tSW0=qi~2m0RY5IQpRAf?^8wPJ9CU%mDLS^N%jZKm{=)Dx-XydWW>M83d1^c|awy_9*m?EbGQ zY2+`w2LbSh?R5}Pa8Tk4X7qmH=)Z*-cK^osvaP92zQ8A^%N#A9m3!;sj_U(cW$y(*Ct#y+%8v!HIBIBj>2NNWXqXDGSmR%! zA9Y>6E@q)7hn2tW4h-gPRm3F2L7WR_)T5*nSSMuX`WwC>uM#)on{T^SJS_f1)uA=7 zgQbCWk|?ulSYv?b3Vs~2z~c`Lum;`!FN|HWM3aWa39b$b zzA6GB_kT^4orCR{9BgLn?C9Y9Zy4?x<(=_-ngr?Y>V(U&*IR@dx z;)t}7ByaEG9{1OV9Jwx#7DKMHx_!UM^>noIbmKuf-j76ue!vFUlj5d=W@G8n03y~< ztO>*^f2=I$V{J!CCrqb&iYOqx3%j((%l__uJCdA1kh7FEDQprr!ZOKO6GTdL)pDb6 z9Nj$uQRyr^81Qji;xX8DMLlz=QY&PTjJ&R43RPmL{JtXL)H>Zo*3#Pk07P%Ggf-HD zo2pPk|M-bQKXT((MVrR+Pi=r{1Yv*t72Wo)R-pY$%w+zxn$BF`%G^WW$=ufFKX86K z$NnEfIvMhpf^B_&Tr3VjW0&=a@1BT=M^}b1W%IVLK z2U(daCTQ=llULtD9m~@6y!?~f|3abvO^cL$ZQA_`=Kudg_=kx8pG~{}&4loMZPpco z`hT|T{@=={%8QjljU^%q`5JCHMfkG|;(tzZ(YJCocBFAMG&8o=r?GIfwfV2rbBXt< zI)0%4&)FNQlD2DfFkMHg2rJR2r>kp=aD8y+;U;UDO~-1P)b8=?_x^8>@^yR5(O9PD z{;-sCJ;Vb`qSr{TT)?T_%=RLnlT-Rx>Y7>E8S5CtF&4+QvttP_w-axfoHn?U4yMXZ zqP3OAg(}46l*2WLX~b0JFdI4a-H9?9`a@g_6r_opT`CYiJtsuNpwrG9fPlpYk=Mc)OCq+!HFVQ5;Ee`~9CM3|waDC$h2g<(nn)5|V>ZqD!GJMYTmB`LkHw$I5n7Ewu9gy$uG+e!t* ztJYRv_Bi%P)yK)f$N0HFG%+>Q2>V6Hb_WuWM0ZeYTBS5rZ;Pwk+?0fwgQU!)H9WPh z%~}Aa>*h6^V1R(~8*R9z+`7Y&Y@vX`WQMUCkC#&4clK+ge$RVaA@ZNkGvV6CN*HEZ z4yam2UHk{in)6^?S7S6FyK{o24cO6pIn+_?0Xy!T;qs3^_qp`EE!iRVSi&}teRSFI9MWrkn{wZ$HpxAlV?`!6pW<2@Vj65kx7;Re%tpoL zEJ-8e^3L5zCaC^^*qE|lc~~Noo?O4Qc=-P34)4%Rblv^BEu_D0-+#1_|4)b0e0id+ z%}s4=9gP1~YfH!U%6y53LYL1_`8oi(XQB;QZ1U#v)nx7VNOKiP#G5pB+iUjCW!pFV zQiBO2U=K1Nl&rBZ$ZV=X!~}hGiZ}g$;ooIx^y*E%mg-_Qv?gi^hyEbv7jyR;89`ro z2SG_d;>f=ce=FvE!0bW4V_{rR2^W14f`=}>yy*Q!Sxh_JVWUIA-wPFnURm4z(TR3m z^lgnx8q;q1VY`&Y5LmqVMq_Jvr`?6pRsd!LwAql;@ez&b_vayysKoMkzO|%0+(kj6 z_z`X+j#eQTtJ@0qX<*?YJl25P&F{8yS>oG&Sk&~##mGj~YEbtb0HChs%QW;CSFD0C zzmT*rjkVFg3fjK*spEIn_|M61Ae2k+k#FRjrIHT~NtN-HTu#@5rsSJzH?1~37_mZ3 zSmQv9iLJ%2Th;A=W zaUwDn2%N``>P>p{>o|SyJ=Hn`95HIZUw2isM)F?NNS>I}D25hAYXb8Rb&k+`eYH=j zoy@tAa_%f=0d8K-g7%LVyBf`ne+nu{vyMTo&5hC{u%bPvX=n4Wmn;!Q|G|Z%; zm2NP9K|TipXv2Z5aw=T#ow`f(w`W&xf1>K}Ec5~BfHwnm;bfqUT}};;{|j(E>5bnYM9x>ZDm9tJ8@{M2*~Wb==?Y`e+gy3le#_} zuSS1J{d#M9&Ra)C|3>NF_Arn=|9vJ44i!8)ugJF@)9)L^85$5(%qmEaZlXOwOZLil zb7GB=<~1Hqo?e1xl$0v86u=oY7X=?`{ON+&))P~vWAw4eF9Elgoz<$_0SqZ_1xs^e zq%coz8(dj(i`Ltmkj8&Swtfi_l_i6X7mIeoIhEsQfEV1F2s?sIr_G`XlsH0?Et6F^ zh+8}XmgYf~L{d**`2pd5H&I_xjcjOCMSZqFzAjmOuk!Tqnkkvh)2(BjLm%|%n8{>C z0sI?;*|7NAI#e^r{fZ}3L{^04#2)!zOmu4haxCEo<+G?)LBgAgyFpQ9mH{}NO9qmd z!#~Qv<0-*hBY>pf3&FyAMN8KZ38RQr4thpx77DR8sIxnbe6lHc@Bg<6yjxr=;xVA# zfobMMf0e{M17{W4gn?Q?&v##3H2re_NQfhPlX^}$@F_uk2E?RB2d$ht+T9L{|H~f@ z@cq=Z2IM=t6snydaIA#Prj@#VGv$hcFBkzGR0BCvXrCl{UjyDOHWe9pDo<`lr-bYj zK){J`1x@|l6Yw|o+XI^pX)GWDLSpQyQ7~oT6dMBc8i2BqADx8K@4oJ!pwWaNdIJ#9 zb9D!k7W$mfoS4PxDGO>O7Kl#IqNWbMo=1m#wmppp`L~p#5UCk_b2xuf6bsdcFyXSE z7zL>tve_0A#u0Y*xZl`mf7<qpS3JNnCtQCD1kF zjfzM>hsQqUG&%m8I3K zL;@%uN7{SEld8?;=Z6VLqG|%isC4PK(%b~&N&>Moowxr~Q$HkpkF}#q6)Z; z`tVKlI38ZbuiyIrJ`Dk}yaY){)e0Q4)97TwcyHd!6-d4X%C2fIIS!{029oqhJeQ>^ zw}pZJ>$960M3>4IeW+?x%e2bfxJZ6Avm+jvYaUrf?yQHJ!6m3T&Af0yl!$#cNO6$9 z!*6mRykVoy%SXLjT2IXmo!({_ah!t^KY`s~H6N~3*tC@V0w@kwVc70xDG!~qJN6<( znYuYx8;AnW4`k*InWkRU#4t!r%}tibbP#LT94wMEB#y%NGn?jg@=E{g+d>54g$PDo z0bAw1+zp!6G?)NGGhSn;9ySg7LF)wZ_tUL1a`eu#NFNbXJ$N>%Oi2*%=5K~rj2D{j zHxO>E@H$Zm@3XQRh;+!o==q~~8fj{^KCZWEk$u2*`b$7jqB8fmF&7(yj%MOnC^gg}t{6bpnAFyB+YUw_ zzSxSi4{&7xXlMjaF(pz9=F5LXfNvD!SL)t5Q!`IxbMKDs2a!ez%~D{ToUB!(VPII5 zXM7km6X-RAvp=j==~i3&L5C~Pf?=xo9h3I}<5Py^^mr7@>hyRE`-#-j0sqV&p9{01 zoN|+Tbqb87KOn8CI6{6fp7)>O$=jeBNR?u8$f4=NA)NFR!Do%Mh;~>waMmk#z=EKu z9PN9Ns<$V|(XI$p=`@3qzT|f3LML>QF+FRMI<5+(Cf73;5YV^8{Y;)7p7q+rA18MN zI41*Vy*XOh{9)LB5>AHJimv-f%$__mrK=u`(M!{6&~Q+e(MzmvuX)1I0>c!lxaaOT zY_UMs!!&uM8waW<-Mga$mlmpP!B5&D9GecGJdGwMd@LeXLQPL2wI;TwWs^!l@9d7p z55?!-RF;=i_YfG7&s|%&yQdv3(vB z*v#Wq{dO&HIRdn(2%9-4^o%W63Ra2( zRRy!Ho$YrN(LwInZ90!dGnW%sYcgwp ztQTffIrYnQcPU%@^Lx8%IN9j0OxmT@B8LdBOu~lnTk@$fCY=Gs zQE12M8){Hnuxgt9Dn=+FW_HX;YP;8jSJ03W@j&QtRW=jhYtHm9AXoLH$ax-#0o zT{(>{dz*wRQ8kNRqel-2m!U}^N1)rX4zYx7OC=Kl5G7Xv&e=xCMuV9tlpVLy?+iv$ zHN!Lenw~(`gqsOSn!}A6Ip<9Il)U&={>1rVkrZgarmRgNaii@_h6%rOb`46X9F64Y zvj04fUjPgd(qZ=e!i0vfJ68G@F6Qg^ds0NvLs zySC+Zi3qc;JSyqNBsVj_WY{zx4~ADVd!XdWwJJ0>2z_`Y=ro8BVug}{madGzG*T;1 zuidEY$$@gnEveZIAbQ*0mTHX~{}||mX$j$(n*=PwYl14@f!LSmtfac3|Fnx)PM?gH ze3|68GL;*_K;uGIfoa;vLbp>gZtW2tk$mMz%%CZ}`rnC|e?zk250#DOpz>U_+iL?WSSrb!Z=yf2Ql246rAtxgUv6A@YfGpqOH zDrjglgn>X`K~D=)n)l0wqi2dKc-66MiUNC?oMW*&bQy+}voT6;{S61MT)Xo^r|(nK zg^32t?*{9|IO+Rh(`>y%kLR%!swhQQbo$W9AiaNUaewm=yK zyF>ofj!iZE6F2Mqr3UgNruHL+MPl%TJX0e)9jN5l!Z{7hJhS8%Yp%RF!C=_5)s0(& z0t3rv_a3R)Rongj6vu0yw{%E7@(c_v9@QqIQCW!~bWCr+%*aUfoJg$a3zN)M@wTlm zHkl(S0MJJ{elfO-UGpkaWmvzd8L@gBXChBl189K41F69LdWOvuZeFsTcn2GT8+W z>Gaqy>#eE(>>pidYBIrw#`;MGpoG6KeOymFMv9mO%+}oi71!6i#eg9LDT(VG^e&%A z@%U+1-2pc7kxZme5AwIZMSTR|$L|k@HOb_!_EPP26&=#C`pdXdb;5~}iUpZyOu3uW zJ||ZAtO_PM_c0TWlt=y2tTX;2EmATjX4_*?j0*Mw0S{t316%2)hRL{W$GQ9|QYgtO zytq*)4!1aaEQvktdklYn_P+ome-d)@rE=^b5GhME=QMrgIWoo+XogeI*bp!K>2hd+ z`T-!wnNxv9KH@VZAvJb!amHbiCkC<~`d1&-)a7?(yFy2Th#LwQpP*yGlqs%_MWQoe zkR|feNvHDV`^vu(o+tb~2$}W9Ctu?o{EB6&Xn<_;#p*gV&6miDzG#fRKdfSmay7FS zn}?X{YO^oVifS@fT_-+~+ozzIoi;=D0i}Pc;6){Xja|#bzZ5W&q7|GK~i5qIh27rf4pra9&bgeb29%PgFT=Ivuu}tD0FMvWj+3xi)bU7+-ig zn9cW9>6t7>&0dC-Z|J&kv`s)zNI^f;=SVz0ZXzhPbfe;TvP5-vf{AR{9VIM-{|3v8 zqJ#~{q2C?V|B(a~7;2KLF0#ssuOnAmD>i_`^gLq?lv_3?D=Tf3Tzga)%yzUKe5d4~ zXbQY3o$MXdxs_4EEao2MUd5K0bskQJ4WhqESM zMSokR$=$pdqu;oiJpLGjjd8i(L-M+ETG-^( zw)#nAJ$)Z1@~9RCXB-D-7E>Ur$*GGA2O3okAqZ(QQmN>a(B1N{UrQyjE-okI2o8~H zjIZV=70Usoie%3-xpt`%$5E=*Y9510jJBw2OU{UJXFg=zMd-!lIyA0V; z56-sk+9JClNaS7LpymO}9dVPc>5S5W6o8{ zS--1*{mOkcG5Ix2W#1ko^82~Vgc6%y4P~jJMB<4X{fn8!dy!}?G;z8z$F~s%g{WgU ztw5=Qb_DrY$d%($WE|?+z~L86T=3RNCKEEL-hrue8`chu-6!X2~b`-&b*z`>LaOLre8qY zpRfPsa;3W-JcE{iW|uy8qYmyouhM^UseGWkHfz<|4jE0I#+*nW!^rMRI zw!LDS;4$?yJCFtO-YV1N$&+H)kUy{NpQ86m#5E+EREzAz_siDggl}#AOopD8rs&?( z%~%px!)7wcEzctloQi9FIAna0yA=V|-2FfNn+v3vZgpCFmw4OzQ%kk6f`%-bz_pcwwOdB8^U&gi?aOm}UNQl*{CrLGU?Z9!#1Q{KOF;H&xtYk!eIb`m>z%d)>;~ z2nJvhhzEPK@!@Ph7CcJPIUKZ(#^!D}N8t38;}SK3>T_xtvU``jIJ13In1*|?$!H^F zEa-?^QNy%-wkgCf5YzMifoilVsBMxeQs{tMn_+6cnd-0DPG*@T7#s*IF_!hL2P-A( z#<*4u{zp0gFRlmk{!2j=kY`wJYmuh$jvf|!i%}iGE>*!Gr{gK#KyOvd?YfNPAAjR{Jj-% z=^L#LjtgLpKv$?&|KLIQ_#HJX4U3UgSU!!Kqo^2{x&+fTxMnL>u^?}BYaM^=Q3Wd; z8}dgJ#L+(>!9NE+c_nm10B0ZA<+<_x(ZN~O4yASJlziEzrt%K*mRv=n5HgsZMg#6D zcsDF`4EP91{7qIf*-D7UyR5*DeNF%^*9{7ZoNtk6RdG|M&ixd-IzFzWv&V+h7qN5} z0JXbuNqYbYHR?BXDtMPFVc08XVkoQ59vpG|Rml(`?LPHGiIChY&kW72a4AE4m;j?v zv>-hBv;xF9q92$-jGzdLLU8oY+{3^JXCgMDdqV}Uc%3nDIqa3$?K`pIYTQ2rys~{V zrO}Qd#xd!p-EtB>#$<$=o-MeBu}6EqC%}+da+Ld+lA#vElM9iV$;lZ#*P|ChRRGm# zil<5=Sk3*GR%X?U2DNeG))MX}Jm%BTH@&FgZ5}J~lskj5#i)R$a+~U9l!>WFNOZ7W z^)I%c^2%9+ib(*|bj;z*z1K2>9WMuEJQ1d+HrUlH9~iSj8k6lEsfI=J3kQKQ@golb z+*2yNv^x@2h%h(BSk78>@-5TC(tu#-B}^!@0@|1;@x*s_>KR`Hbtc2})ey zLf_H3QeKOiS(SdWKaU6;$s3?r);3o9+{Mv(%p&B}t;2{0J=K;e?Z%M-GP^XdcILJ8 zk%H92@oJt>I+HEG)C?(7FZX;vo+v5nVYXGES7^nI4F4;i=oIvw1jCeuA}}7LBjmB| zKI=aF3eNPqaqq&NBQEEl!mJJ&{=N0w=qUs1vEb5UAwOT&cv`&#!rDEfS;sM^rF2st zA|^DFR}0po@U1%>&X6HYH&3vkDVRE35MVPz=pt5ZtD06P)8`=|N2 z$CL&#CdgDluE2Kc&y+m#4)`1T9N8C=DJc7pdS!b(;e0TrkuSE+3$BYhmlN|=&i8h2 zU2mgH1TDc@AicNQ7HSaWL z*i^D1BTu? z;c?sua_eVXI&fUIVVPOK2}5l^%dzw4(meyKUYtFXgel`x(F0RW1W6uoBFcYpHk_37 zPZ61@LIeA7Qu^xRqeWJv<9ASi{f9Rc>wV^c0tNucfCT^`{BJH6|NS@rIqmO%Xt|hFnYCJHM)G`C(FE(= zD~R1d(Evpo5m0Cr@>yigsBl#c{>l0aFBilhGF-O#yyhq%(s0V?S@*Mq*%={lz20RD zPChV0j!TiPC@|#52;;7=*dd|UoEDlOY|pTT@RFly)kBZ}v;3=axl)NulA;|wmXb|r z)TJN+n_eG9oV3dLr6U_5?2!ab{@doqCLmcRjcz*b-(H*?w3y(P}Bb6@Ha zt`-cqAYThFs6lrAw|@?Tjk(F|B&~|1a`Sk0>`zj(e>{!4NsEt!4mSN5SL`|0jKWbY zJOu82`Y!Jkjn)WoyGhU5#BYnN`vPU1&s+xf_?Y?6Zj9$D@uSe8h!v|j_|slPOg22? z58pof-NM$LAickzz`+Nq9$e;0nUYg3x5Pu0vIG*t{N0MGwtq58A{hRH^vNE-)RkoC z+H}iO$-&Ubyn9SNl5d4%NWJdHbd?rSRq*bIdVuPQ(O~T@SpFWSHd|v*vbTIAYRBvw zic%`e)2!KzkcOd|)JMJrhIo&G7gHh}#{lHeYp(sM(JWdoLmfPltl5-(wka@=X!i`-eBJI~ zNp37rcR^`4A3zKCs!OvkQDCpifTvq_Q@S2-ENNhoaXV7K$L&0kZ7T14W6Slr8WjH}VUd8=iwJO^H&$^BUH`WgPi$ACgp;}{QOI8Gt z9pUqwDfnBNGZm@%%h`eNOe-u>1C)|x48bwEoEtt6|9PQBGpP`f`DuFj^8=nG`Ck{> z{}Q?NKdiNt|DpX>M^un_AO*d>~JS`Q$=sWAxjA$;=vuM)#43X%cG{CE6x* zYw2A%*q6xl_t{7j2iSGC2Lc;A`#fmIbv;_dt_u9e@PjT=a2FLiIYb6RaRFWO1EHnR zx{d&L66@Z8|FJ!2UpX`G+n4KG{66VF50`rKYG@Btj3IN8GR6B3y2D1Xkdnrj;+fkYW&deKj#=vWfA zfrxZ`zVTMN>-|t#RXe~VVuT^wbBPR60BLdJ0Cp{&zq=GPCy(_^srwzDnnM5n;SpbI zQ4sd42(jrrg1&#mBS?>q1d{_cU86K1qGBZz==z0_=*@aa3Gai_FUBje;4CnosY*I2 z>0dD-4;_HmcIZp-Bv5kl$h&(qo}t*To_pjEjE|4p6Pv9%#`Cw+;QK%q&03|%OcaPJr8 z%>Qes^Qe^#7~j36d|VA@H&*CIn?2YCX+!dwDQ-o@ye*p_djP5Dkd&1+p)aIHtS+`5 zCn95u=Kto#aYY4LkQsUI=HI-s_l|Mw=lpa)yqU`;>%wK5RT*qYnWik!MLnZ7yl;8&t6YHpn$jf2D?@Ky^&Ed_#Y7L|7gVGeHc2K`SIyy5dQzY#{Y%x`9DmE zOErxjx+jv?b2TfR`oxu<$DX9o?5y6?{w2wVV{&y& zj%5UKl)@(l2d<@`o@E+UVzq5y8*mtuhu|ZaYYa)w9Ri`N-iq_11dEPbNmWMDM$ncu zjj^^e$*NE3GnRFY6~5Vr{b6UK4w?;K;iagIk)`ElS9%`5#2+DmZJ2nPV9CLSD#9m; z%QB9#$}whi#2ZMVk^xagy1*}6D50}7{iX{{5rXi;e4kHGfp^5FfZyx8g!}CV;#Lgk zLJ!nXm>ewL%#3~`laCe`5|AwpMpQ+wwXeD40eZ)a^lZ(~LcH)GOT!^lBn`rbt5ej0u_&E3^A?Ks&__i;}N-BQ;*PZrueYVbZo zp^@=uz#>AHMp}i9sky#`H4sCDe52n^X$DUspG115bN0riyVbzo$=bHxyJN(NrA}qG zRpChCdj?{!KK{@YCtA{(mhT>?M(+`F!Nf%TTH7FSALtwT!mW=9Ob*0$_x=NDJF0&9 z88D0TK<#uTmGpc1{z$5MC)n{23P&XfWVhNz4ZMWEF@T2_7pgA6ncS?PtP)Z`U0h{Y z+ST}GxHCGQZfqkg`n&yL8I_(_;3F~=lnzSq@yCUP?O&A6IbFf2dcO=!&WZCv#@t`*a16hdLgmcQT+oQEbrG-j8>z23L{y{ZI35+MC8T1Vj5iFnmjPMyi&gDWg z?i#kevjtMbluDpMFCjyQm;{JD^&Qh~+|jhJCBkYWT<&Tx$Z}_C?5m>Coz5N$EY%a3 zvpmKazKpoM6ief4)G+5-IA`!keDaJh5ywe+BZ!xUni5@euKy~qYndHg?72=fupG@C zT|#n^Y#T0ck104GZ$pId9QH0Ic@>w|)L3jgj8c9=c`peZ-{IR|4OrrVpAnwzMw$cK z)g+WBT-S#rOpkYv8V&)PkfPSo;oOBOMuB-u5m2a^V}1r!lah0WKj+^7(bc4>M0aWfaRq!IHZ65~!10SB;cvo~lh}2(Wr-i4i9v6^td0eG}DN z;_9VVSO$#G!GLOtTfOe)fy*~O-z zSvX{=eNw+`8xT#iX=Yg^k@@^pVE`kMTh$AHBR$u_Dd?b|1!E7AIVUQUWleIjGm`Bo z62y{O*BTqqDXN(g_W;OM2+1PA!5wFm1oPfC&Cp1wWdWtYWxA3;mQ25RnENez`d+by$<8g@c{{=euNEz`2lIP&+gV)_*chuHrIWk7Wxs?w}H<84cB`Lsv$4S zUUgaO?0nk{5iENS_6W+8*J3HgoW+;@ZsKCO%C8)6C9T}kyh%ew#2QO85;nDWN>!h1 zJ{ekmmND-$40QBtzGiae(*@JW`Y+70rgi3&c35x|_`Ce)BGLD=E}NT{|G*`gd!5{q ziB~_tGc}?@C)cbHhHJkkW#>3+_)Wd=lANkk4W3NOCxKn0#^1;Qo3^qm-;Jl`^Bqiu zwa>#Ug6qr7*(c9&-rGi_j6~-S4c}e3qohV*+2?n8KFWn4T7QaCPnRdCI5GsGGU2;T z-s@raObc(^NK4;65co;cY6;9860SP?psLb*_C)>pzR3s`!iF#nlHjYo^szIX^LsV> zE#-k^du!_y^QzRx(~6Bvjj?fbz-PD!!q=|8p|A?RsxONRw zx@J0KVu2G0fKzhcURfXL^RCMbqd05VAI_5k27gF0;C6RnW`Ht}0p%t1xs-cEN+B&K z8m1N!2Ei2S7MEs3ef6lC8RdG#t1RtZ{hf&ul^#T3RR(J+2yLy*IlkkLig>>eo}^b8 zPeZ4t^^43xH1OlM&(tCv6r31GV60AIrKzPIV~Hs+6^Bub^T|FGyBT5=AuVAOI*v-+9)a1a{8++rMDk2tAFN0P&)Oo5chHTP;?uq=zLN^S;Yi$y( zj{HdPCF6>U#VHK>(x&AoNszby0M_}(*qHvJm=gY>N8tbY=X}%uM&tf_CM30uovG#j z;fP$Se_O9NA$)Ur1LGTr(knXELl0)*;?R*gG3bj~tXMsT<=&Wt);5agiH})zI()xJ zy&1*jb+OWH0X4M-!b=RpMmuM7Y{|K~)x|!Cu*0rw!)@sj-Eo0Vi*DfKYrhQ$d4JgY z5nlV!UCq`6+aTRuJ>4C2qPB4{hTBTwuy8}Mf{R9IVeM~$^W(#a!g6utru>eDr5}MX zY00~)1DfkLLyD%6$a9yEk71$-~@IADpeA`pVdrj9qQhVk-v z;0K5y%g{PSCsPz%4vg1gQW4dnACKGhO7U1^ET%t;p_}@p2Qd-jOnnEtsVvkH;57~b`nD$d|A;Vu6tn;tx&^OD=XARHk~DEjwzF9%uh-&&`XcY%(H@oE4i#o10AD#OMsP?4ju^fQ49^m-}B9Ddls2*4Z zLZ(3v2X5z=djweP9H^Jb_N!^$;|<4+{IWK~Dn-D>Oeb;Oa2;t+CxLE%{7_<*{xpRn zH&c=HzzU@?;QWtRxgNIJT7{}9@}Eq3gNtJeG8v0T3lqJJ(7%sRl(%B&t|oKVHg-ox z?ikYKfoWt$nDSkjbveXJOoqtq;WC1$7iPLIeb|U;N*?c_anN0aAf51s-Z32Ck z)oXgCu3B{)6-XT$pDOvUp5ImtXi(K#8)WyEzyJ%NeZtD~6!tCcx1W7T9{=lsIs=^+VCOLO#V`$MYR|%YF%k>KYs?@d<5^^3i*nD7E^~DlX@xqp@ud#Qk zP)CnLm#{t|_p=tPsvoRbljRBZ($kL4nhr3hWgKoCLxdR_tPU?rr=sS;;-IR-AdEhm zc*yHa0e|9EiSlI6-^b3q88D?aTm*+1-3|Z_L3q1}mw*b9IzC;$%XrL7HGNmLTjSh# zGr%9PaLLQ#62@6KDW=sA3V6~#IkRBa0$F4rwz5a2(DnwmDifMz5nXmEs1x~6N9}^# zg#gWauOQOHrrql(orpQcB;8!D(xZm&yZr;v7O+Ve7K!JwgMvEmtMy|U)7LWwm^Vyn zCy$!Wwzs0ZwV|#_?wgqwN;kUF$=Et%Ai`!3c9rzgFi8SWOzJ!ftDE_Q`yoOE9d z`p;ioAehmmQT@x0kS=QvWOYC*Cw9)fi@gJhh2B5chRcIKJzln78@gRQ*Sk|2*O`cR zrcYQP+^c}UvpTzphky6my!)WDfSrM&+a8btQ9DFwPKU-eN_k`4QjuOVQaOatP^6DK zh5duQDVrjRriBrOHXs5ohY0%Gz*9WI&aRF)!+SXuS*PGX)!yidN zAequi;gMPNe8InJCEto6fuPubK%4i3#7bBW*b%ME77H%38hzLNteuIstpw&Vgv+~M z6q8Kr!KRL`M6VO1sf1o+mN++Xx6Kj}47MK{k{`U?7gf7B=hztjLe>~{iEm*}*%hgo zvEn#R!k^7g3u8|1VXh{tm-gOQlzX4WNWTSY?-%^KA1kkD^ZO&M$LA{x%(v;wRGZK4>#c-a)W=EXm5oEL zOiFr>uXGFuc*Z35k}cgIppW^SGU&6fR2cUC^Lo^i38w*;0k4$cH$M*q(&ZhL^{GaH?!%Q% zE0vsQFXE}RJcWEi-$1Mv0|tZJE7;BIF85Wf&va+^pHG+0DofQoFAKe2X?Log!UY-W z&hZGnf^S5q4{CFo8M)5v&f?IM$!d<6nc2CP;UdsVrNF1K_+O8chFZz#?ODv)IASXG%8bnorgCC>CHi@{Z(i(dt%to ze~CRj)nE(as^RggH}!sh)b8Ur;nXV(D!;MgXl3i>qr{znN26 z`xIqk^A@^mdFqqpT55)q-tsH)o%@>9BCP7gAAG}j&!)|9vCWG5VysY~U1Kh(17=^! z03{<~qMf{qoDjLEdJA_~qQxz=6Rxd*>U^q2O{q}2Iu75pEiH4x*r>4T0ToQ?C!H(r-VK(w%8poel*@ zK{8~%1AoNxoV#ARsKu&*;4XY#6u2^f&)pR7Y`xtSS=r`X(&2G^0E0* z=Z_KG?7OGx!$++k9H%ZxmmGdST>-5f+EC5cGPu25wZ}f^F(tt3G({^-UJ*XQF*@1j z#4aYKi)Kevao&M2Y&7tD&Y3FztGANflu}AL+%8^RO)o;BS19B^6$0z&kSlV)003a9 z003D2*MrSZaroZ?)&K7fG>7Vz*89u|Us*r7Kjd|g6&IULs+yjxInozWkwdk*uJe#E zf#aePmE}ZcVyo?44d0ytopDHydj%~(ZZL2}dp4{dFHo;gmBpl(uhAwp!TwykK*D=H z&b0z@zd~;WcsU>5uZ>xsuEF-XBZa|+|KQ?mWqzKCY=N^Up{5&{F(Z=#wxR)q^|_j^N;8dSKc zX+ceLHCGhX%wD1Q94K-~7hE8z(0IC+PA(%KqTv)WFU5RcljLN0yCdY+fC^+=^k0bO zmHX#$>Wp9?Chvck&~w3xlu3nu@pU=KD~HPDRP2eJyQo(^Xub|LCdW|Z{9@lokuO}* zMEI~?7(aVpbCeI_E)BKossUK8az<3IOJ1K2+N=QIMrRNP4Twx`D%siE(4dan9zL@GDa;VXtnWS* zNr?^Caor-t6!Fidx{;#36Bv>PDvVYJ!=H*weI{XYE}6|IjwH6fCmI-)WWZVpHC&}E z#+)Z&%G9dBuD7A&@u(WSe`!YIj>@Vr$hh`E^Wx8n$TsbA3Q-WSg(pJ{Z`=-ayOlnW zf45{*S`Xi^8qs%txKu_s!@=<4gGE$Ssi9mQg|D9v9@y<~aGald*w?pgZAhskLmD@A zvA8B5Wlc}80XFtywgrwo`QBZmR9yafgePS%_HboL6jmOZFVtpU894UiJxpOg0tON3 zt4!m78z?9h5QKix%ArP$ho%~8xVi<(&mGf;qvr=##|F--}t)n?RD(_OKT1r1$vGr1~Ao+BeP_TR^WW#9H#8C)~^WTRZ zdz3`x*O{w;wlpky@@f5ra{HQyHGa8lWaF`73nL}d^~K6$;p>epSXc0C@CS}Jqu)$b zgR#9q<+UbxJpSkj>4@RHTczdbmYxM!f4KR6q)(qc$he4Df4J*89lz6HzmwoXTEF;U z*6EX>F0GP2YG{0cm|eKI3D0 zcBY3X2JGpZI3G4hUCsju(QzN|Zy99qp2C7@V@H!*N1y{U=d_M2hT!Gpz79X2hl9Lt z&enUGEoOz|8K3LELiKv0UNMK*N6d`p+@%oH!sbmUztH~rj7+z@yqlAz@)vSd%9^#0+p^8~b)$x(btKKp)gE+Pjn*xu z9S?=Pr$ARoJDV!XI!&IBBc(@lkif3z1Ej4pQYL0BFpAwy{AA2v@HTAs+ta1ZyE8Wy zboZMP=Z1Q>2d*qCD90G$BaUV(I#y9}ap=RTHl0!FRTnSM`9`VfCy()xr=FK^0~d`# zdJ{^)-+Hg-$Mz;bI@l9KdiP|aTPCpw#gKaY0VD%v()-_S57BH#sH1rG34OTa=^75HPjqbH)M5!C`%A}(&^NVUs zlHuYE-Isly(Jd)VqGNT9`-4dpbTLl{+-L1_uEwxOtjecb@0CKt5YVGWrkj;zhG-M{ zC)Wdvp4HjAj>tMZn#b{@z4hkR3AsUY;;vlcQGvTA5;JTinJg!q$d)D#u=;q{^0fVOhfM(ky*7UQbqrNf~$+532FETN1y zWA5GR0GA-$&B}jsdNE>>=k)pb{ZhpeVO;XA?wF!Utl*uHi|Wt_yS(=Rp*odD|}A z)SIBOl+2b=KHUFL1dq^dUZ9X4y{JwL0KoXa{yjgOUJGkiOGB%l`uG2NU%{oiW%POr z{J)6BU(6bUPsC()DnP(zk(q;e?|Lg!O}+T4GYFkaLI}ulc6(NL+Z+p&Vsdp2=})qn znLRgg{l-BIu%qbWu&^``O=LxYkyf@T2;6!^;6Sj|A!xB^*8F?<*W1~d8@&IAv$qP4q{*^%#mvmim|`kPC1z%+#LUdh%qlT6GnAN1%*@Ox zG2_wSGj^t@Z%_A}YiW^Y`4VC25uWb(?X}lJ9-X14r1b}1DJCZB5kGcG!l1NtK!)a` z_At$jBOpOp%CawJApv<+-yUHJGQf+MP9ai20l{-1!P!#sWdYficj5W!)h9;Vh3^?t z5MNzomc|W;Yw4G63^VbLpd%&j#XGnE?$tQ{iK7ilB+i*O98>#!+f&IzWSQ^Ef{AO5 z(SKRxW6U}wqLBO!G)8dx72dz1SKxT62s$Ezj*2rR%m8?1w>4-Ma3ZW~=PuYGF*A>@ zkF@jLSiKflD6SkMjT*+_$Mn?2pHvQGBCgy}lF2Bz8@HcMQ^8spf3Btt{?JX76W>5a zKuWah1plpSSGpe%cDZFrf{F$@lRkNbq$ww#d`sWgx28=4hWEXfV1R}R#&{GYK1Y6z zN8FK75mB2G{(@}2yAio#MtkN46JILtJGg8b3_zt+mpw2?ecRrdpBAr>5q55|tShP# zqJp_?A)*taaWI?0g?e5sbV1?(XVT|L%I?&iHG1&6sT|5>Fv*i=w}e<9Tt(8_!cm|+ z8F?}uNNv*uI9U7sihUsSY!yd)^>CW$DpgjtgB4fOP@pSVwjl$7!2H`)HLOdDw@6M zG?kQ_#rn&RxvTGqD4@Utc?AbmXb{cl>5jUB3X_h&;ZfAq9{fUK7~*93Xl)pq6TzS*d~})q;2!Ee00Fvv9;HXq{wY)PKEDv|P9c zQXzR!-8H5dJgw1w77c553l^sJP(X{69;=`b1(MKPwv33oSgZhQEa*;cXl;hE?iPjX zLea;S?tAiYr4Iu3Z$0UEnxzoAr!(RuFAiw$(RuJW2&p?>`* zi?Kh1mj>Z}?76xC0bWv3ymtP8`Jba~=LvFtHH{uvtWNB9xo2EF5 zwCBxTjA`^7KxmyEX_5Xl)Dzvu9t{-_++)d z8Q8xT4;dYX`_MHmUoZ7e>uw0d0)>y3hvDw z2)YF?z_lCg`a$qI|0m;l1rqOojYPPoh0Yh10Jyf3JAW#4|c|ZV*(z~`Xqd!DCDbL8l z&?ix0o%TJHixqiKhUhyt7AEAHIs%Sbtj>09Gf>kg+%S+0y3c|fOtfWL@%}bwXV*i0 z#c|sa_eav;ohR|^twR|Yp3g{O^xK{``+fKM^XEq3mnme|x+tZW>kUfQuk5!F8DfS< zKb{+v;(jEeCHg;pP|3@Q*XsFl+5GBRukW_R_jbPuejH=&tTgn*3q!mPX(;LyxHLbM z(9v4TPKzZHM+ck^jhDtw#MZiqr_C%2-mI7#|JK#%_L%D;kSxenwLT9Lp9}|_WbZ?U zZH$vOOfI_8H8rfg6~fg7y$PgcZ}eRR)v!o3fVv!6h&hP!7`f7{lysQcA8a%4J?_lM z(&2sx72+t1-We?9qU2ImLHwi|nyq`_P@d0}mKs^z^4sy3`!=qrQ&Hw|Z+Zm}dz)YK zjQ0^9989io&Op#n|1^y$c^R7Fvi=bI!Ap}vX!uKIYS^n>pJ`sxiH}v6y{S{W6#CK2 zk$4Tn0XQC$$W{msD=m*Qqnf@f2STUNW5taZ_8i00NU+qiE}jB7j&mF&Eu91ZLNh?G z8J|{2OKQ3#p$#&Y-q~<0CRYN_Q{?a%w!*u$sV1$F!9>f7-DYn?%E++e!6(mxy8oJ2 zV2b$BH$2-k7%u&7!9spdWfO-27cu&I%^5uErP`$b_Vmn8Pum6Whr-snpyOHP;=GWP z<+@dS?LtHHm8FQ!#uIj9nmPNqPG#kARf}|QcjeU3J_tvSG@AN~mLIs7b!7EHpR`su z#9CC21N_H7*uCM0@&D=m+7udoZ3_QB5a<6}V*V$|BmcvO@GB4pq3iTUmurU?^6}?<=ldnYh!u!>Bjn3eA%b9)M&0Y;X_ne59rZ znBt~i=4;f;3|H>BHWgVtM z{$u7>y&5#9K7WiK1YyvmBc#C+Eq4KZ4et}@k;kZ|H@^EX@z4DmXH_j3H;nf z1v`Y{8+l;D?7hX=!{qAq+SLEBYley;V*K?rIKlrbo$3EW?fvg`rWAkaOa-C;(wS2I zU!5t9R0L}VJLmJGWHLTlO{Z1tYzN2fAv}w=EBq;D809=9Okd)h1I>JgT zDFW>i@G^dXRrkLygceFt7Ww5i&KS=sEW9A}u#diH6M@}!rmuZ>9J~7-Dka1S5?vRC z=MCLza2=1%4WDA=YfvHZqZhoxKm0IW|0w~Bnk8*>Z(E##@@c^swq#I|NbTfi9Du@j z;!dFMy%gHBf4Q`ydMv)9xjRd4XV#HpKoM?zkQya*$g`rXa_ z!{LScJbl4c<%FAbmxakjUb1hRDJaPER3XFG4jpu)OJZ4{?=Ul!X57#gG%)eHR ze`Oyz7(4!Zm5j5}giYX=De5hn>ik}Ac_Zp9fnKQ09EHm2j2~=RT+Vk_} z;ydp9B2{DP!>OV`Fj|uOSbk-H$e8`jnrNKk$wPG{bur@HV1dH1CC)T}mYr!E`O*A3 z1p8^H<+b$SB?Ql2kP)&wVij&q!SDsa#6^lt=xmt?jbfbn`sJx7hJC;cTl(LD4~?F{ ze%_aT{i>EOOL4+?)(3_i{0=0_S1e-1Kb%|`Q-tTeH z$C?@7?NzHuOGZe{`$P4p#@tzG*HZCXrBy^P$H(+U#2Iuy-iPkUfdG{&RDF&&KnxcY zfUqnDQRf_avyyh+4R`z7Iavn<$Yp)t?4@0k2+S(k0rMf_!PbOCn1fVTqj+sW-Sktm zv*BYvb#pMQ=N+jKi=ZYU@niStAN~t+`4!s`e%)2guNn@+zx*%wm$>}@rt1A~G*EH> zmDn}_Bt%clWNd6uCQqwk&_S{Ed)Gkgp$Y1%D_~NVN<{>+dH-nq1{;<)G|VBO_425FEl^~eA_M;;x}?oNDmd* z{r1MDt5yRK;K&(XRF1);FNT_&jmailsrf5-jpMC*lf=q1%0Woe1#?wgI+azu%Aq0s zV_XrjJf9&UJ0E>r*YYq%gq@}gk@h`JB|RA;>oy?YY4&b+P)`yI>te8br$>)q8=Ek+x7Nm;EbYDpw=>IWA~-tU`7*=Mo}$GhQ|zR5eJRtv z4bnXR8vUtNK~Pa{k+hE40bKEsXf02hiw1|g09S*S}sR$=?>#V{Q~%EcJP(3=kBy?);w0df(+k>?K07{U*8$&E{LJAEA@ zn3q4vq^AKSUMc=^2j;h!{H072*9iC5D~Pg@cA76W1Eu4lsI{3126vjLu#%Dmvoaf4 zp_LGnvy!XEWYZ=};h#!7GE4V2>eh2TE$onO5m5{OUVW8(ie@qFsiR(q^3o+nJjdNI z_v!v%{Bug?XXfrAZUT#W*Ht|;Y8N5vW|D!+u2@W3bGp$%@&O&+#~L?Q3Tz2r{?A;Y zKZn!R)NGUp+bE6TYkK_a(%OIdpwsJH89O+A{Rqs@HWyEqr;4b)ICp{QEe!wg9CfJnb#;{BHMt!*mG^MKo`1r1|cB zI4A2CG?g@WE*D`KR>YV(9J_!d6m;&=gN!RJUZ;~~WULcllwh=8Xrv!u$LESQ?oWnu z=}`->@>+m-Q%Co3eK+roUE=A6scU4ax!YCSUN066{ff|&S47|tBFqk;2+&5(=pdN4 z*RTGAUwR7J!-+IB3etmQGtG|XXD=$jk(;s!!;_dAq=YN}CmfozEH-%novf;3umgRL zmZ*iJl|z1hwJ?F?X6(R6rC9cif9B>>r7KTZj-mI~gMo&y2+uIL?vOKFS5V^a(&rDZ zm5eF`|9>BHz_~*o`d3@S(3i(7+rOL&e_ze~f0zOr8ed-AoJe0@+)#pue|kq<>ZmZf zR^rofnhbj#<|amR$yX0mQB)EfEDOKdLOs$836gQo2^N6^)H5-=pG`OovU9{BRam{vnL0bh~S0?_UQQK&ev@JVEE4^Iz=u;GDqJJ4iT4DbRw6n!Az z;~qWFzO@E`}5LeSBM(0ah4+=+>Ku^u&(&u_q9vHkFNAHa>lQ;|_V z^cnIz!Mi|#{K=-N@|c3a?n>uIV1E!ouBGUsr?yYDoPOjvBtY@p{ALQf-l z2J~LOmo1lblB?I{!)UCuWfGMJFFjT0SJzGf!cx5>Y%OM|0c6)7N({{G2FvO!2k1b? ziiH*2ePi4JwitCQ`?Udw!HP>Jt5jn+$#+3_y4jJ|*4VPP{3Mm){3D)pz1cx~;XcARC<(fF0!w2hbn01(e3Jelc`7tP+L zNN6jnoo$GtS7X&<%JyuYVJUK^Mk6A>9j-$$+s|H7KM9`G+Omu|_kEW?e4EobZ_Qn! z9c?`+w!}3nPf!p$9e7&91vy;5nR5)~z#Z8c<>7Qxw*C5sya4@P)H_k~EJ@M(rplG- z&{A`Zo{hQo2TbUaM4o4t3Hg0~xx01Iy{guOzWKKuL{=RIm<{b(G>#O00mn1?5zTV! z$^g#?2`bbdG5Sujp;BWFXXjLx<*LPn>17aOC}!s1Lqql)2H+jQo5h}kIzK;7tlj|{ zI_4seJAFS$XrAkisj;8DQN!Q(1^9QBXw%M=B)LSOF|QUCGCPq{ z8_$xP+LR%C)`6uSo&)5ZKk=D3`9&q5C%$+2IB9xpe?rFRn~CB@BGbW{rJ$=OK6^d8 z@6oN_D2pMKki>>u~`TJn}nZnwMfH#j8Ng8v3n%>z{(zLZ^+*?x~=G3u;} z>*>Mdl2Pjw!q+N)H}}c#fk4(^e(xRjfVoeHk1u!>NhlKr;L>d1~w1&Nm&0 zXAk$jW$YBWMyV?zY>n3^_wCfTSMZWjf-jJLXmr;(D0lidF}J{Fu6^#NWEpUi_*teT zpu-R{MTXj;4OxIhTFHJTf~GWMr|dHD_@xKHKMgo$DvUfWb_olj?iZ3i$zN*l>qYy{ zF#Y}K^OAdR@Er+>S+mbi==3y9d4YyI5{9o&{%~SA#mG8FYBn85MTpITxWPvsn2I_7`Aog@kT|`7oj7%*z_njht@F?lUyOd!mC`)^iF6| zm3M6pY#rEY9v>IxbaNSElwnEBdX4uvg6sv+RKsAfPk}?ymQlA4&>ehN*^!KC$Blmc zl;)*J%Z67I!5dk*Kz@v( z9=zBPNsJB8ZX_rQl05)0cukp?N>O$uF zm+SoBZLJ*tqh3e<1#QfoY#si6BZgw{S0e^o=Ob-p#2E~f?0ZPBejf;GB>?odo z*%G*_^|;&FW0Z8|WDhl{y+(=mF+kW<^@)aC zS0?9G>UgQp$GFh!2>|7ZpRoht*BIdBv(GCsX+zPV5=$p}wIl}xCP0l3m@}xoKnAf4 zZyE?7fI43 zJ=SoXJRVs9|r`%M(+mcU8E!G~wm-Nl|sq5dCuK^#Bx@*4^e&`-X99B|U9I0HIDnkTb>tp-6(nX)Dmv0JAZuS94Us^8U-B@RWJP;6ruvWTrLBoeid5Re ziup|kl?7PDul207ua_@Ry6i$my-rb)hqh@>qrxEjhS@?P#m1>BE>D{5Lj8Q%LTGup zR@LfR}>j~K+6$a9=YPuRH6DKL$bZPud z@UF6*^x)Ntw?|i?wW%<@VeX;?vl~VaO51*#wMB?C*|B*|5KnYoISM#OCIy&RFO^1> zHu`xHZR^venk0L`Gd-U)5yMgRRD_3D9-xo}rf;^m8604EEom~k1ld%t)7d@ho3rV_6@a;;#X^G4-YgOi1B zIb3>lQO^)*)e?$n#_vpPO(VW+u;+-RcQlQaXvIynnVGWo+{naj4g@}$Je*2d2wjVL z>h)f2lOn2h{8R6x$vm=6a?GKmvZPG0jOj#n;f$v~hwJ7*!kTBT{HN+#<0%$I$Q*`+ zwsPex8SGkNT6Xl6qmiMt1^#^&9P3G(a;8RM?b8 zsz9fL=*gxb?-tR=DiQ`niyUrkX~$nXg|_JWnex~J^$k=&YYZoNoeC$L+l zQm_QVm|TG@+X(3#BaID}DsdTEsPUq<&PAfp?aCkUht!@Ef&m{~P5pw;&3YQlnUAvY z5*g;Uo`8XmdXHVFxcJaot>!Ec$=*fOTA^)IPPa|7QqnVb(=H}ub+E(OprUJ2)z#VQfMGdc@}9OpQoP#6k`|Kx zcI4X-l2vD>ysA&BqGe>2Px0zWRBrm=mk^DtGrooZ5-^e)8I(wz`fSAAORdr8ytPQ- zT4D*42(@bm6x0aL0w`MwCzPY>)_ds)m2re3d|}MJK;ks`SpN(!RoU(Q10w)8Oxc;H zo1N9s#hGD!OO3#22=N?VcE*04n$RY>7+hx^;7FW2EYLR4Sla|#g^dOw7Q9YR*!|5~ zskuux!$MVFkKlR9oHL)g`&l-oso zKRfBbc?+PBW;0}|sRF;pc45r}bvE#t^v$fkga9E&7$S#AM`GJm%_!bgqRiF_TY;L@ zFDTY$R1p7OOAjKH%P>RywgU+_TE?oy$)pF9a|k7m>5O3(XW0r%zm(3De=}h#%eNHC zqW5>rkKD)0`Ya=?ezM&kr>U zFO8#pf@N5mBZy18w~>&oR}}(~0WRe3BP+;@c@f%4ImY?xnHeUH=j~qOAgWh#qykx~ zhxB1cYL5(~8!9>20|HaytfGStMNIoLjP%p~nn4{4MgK?mH@lgT6C*=1Rg`u^Tjm*d zjW*VMco9GIrbrdyDkRl9W+i}{(b829&n$4kX8~zi>$#sG8-fDW@7cTPPDcx5kLS`; z`4x{`tkCl|V-1LO^0{J1{&pvQI*KVIS!_vn63u028k63>+Z(Tu_OZ7)!OjX=6d5I4 z&R<9T`1qf!*f-&%q1;PS<91F+?yPUkvbSpHX{u?CM3-+Jsh2kk-vOqZbs4(-&E_=x4l8~&QbO@PX z+q1+;Rua@cuh}PG)!?OP*9n#XUdEmJwOjfpLD2J+&6VhZYT|pl%xp*uJ+1pLD;Kkh zhK4H_h$3Hkh#;(Yq?sF^h;(=ogC!~?L=gFPHmcb2)8<|l*KN7%&H9jh0`J*rF~GU$ zAd_6A2m01@1lAAeP$jHGuzWzDVli8VgHTB_WjLXig5)!Z>-(a}h^K;nqPeU_kL5FBIgBx!20Q}&Km{y`v_=B`+<6?C|iyDr|l8|69ZY_)(L2N0LNoUp(6 zN+_#dm=xb!Db4RhE*|oqtqkl-U99R!`&|I2HTL8p{q|4Iwd7meBP+0-b}FVABsb{D zc#el*88HkJpA4ul=+!aqfB>)AFrurS8D}js24bg3dh$BSeys+76xaEs3%3X|;j3)6 zLNSSbCd5JrIMbBUqf#JOqNoqRw$>#G+R?*#ETZ`@1EXu{2czw81 zWD^mzr;`J|r&>w{RCjRKr*`6ikYt3m1&G$(M@NH!J3H8UaLaboGjr7|wCWD0+vMrs z;afkOSG4^2?qUD-mY*sO>YLv>^y-+??H1n3HwxLw7QA)$hNT1$?;FM=BLL|#6E3I* zpa9)YK)|Zt;{h}y9%^qBSh|O(7I}w(Tb7kG4dZ@h%AK(E6ot}0BHg_k!S}Xp_|R9D z1&%C)qb>4XlcIhe5H#J?tk`l^#mWaH(LH2M4$>VOIu~{$&h=w^u}3c z<2vrJ2w#nkJE@vhynBnA6`pb6OO&pg@npXk^MVKGDP~v^7v4TpBBreqRLlEgBz{a% z$e6;W`|h>9ln)%%Nk=CTk~^YhYS3CZ!;i>v22iRuFPz(bapf$k4xm0DZ+%WIx4m<@ zGH`G4JJV5;+*K%rf{z}$I$;#{%WNv$JK}%igjP;iH;eu8Lf#NNAsIw%Y+22A)g$Ka zqX&*TQT5W5?}=Xy(*WHr;#Xn-HTCda=c+ze!0nf?Ou&gT&bwEigGSu3wxSNi`du)f!HGHr)=aORD2&_+$7G6`k~nAXqC(`?RRge zuI~aj$PvT)Oe*zD*R;%iVM|CgKaC2G! z^JN3HZ&gpwlXOOes0@sN1Gd?(>_J)d!iP98=mr`^w63E;k1Cm8(1eXZzdCRU;S8a{ zV4Hk*2o?0&RZ8Dqf<}=+f3CJw{QfQ?;C>>LiO{aKpCqM*Gj2e+-!HCMWK8|oZE&!1SDfUE*goA+{r#8!*3hXThiR*Q3R(O%k-TogECvSd*@ zeU-V*91W!G*2Zs3_aiR$(keTUH&d2!jSqyUYX)q zGXOQ=u$!1}zFYMNQ{5n;-V}cqwWvj0Kc1gfczJZ5IL|)lx@%CP;+Ce39MRctadVF! z{w0!7hfH_a9PCc;fp2!t59SbqGd-#U-jC?JrP?5$W391flNNyajd&?8y&4i^|2)<5 zX%7W~i`IGmVHeaDKk!m`62=6J9}>m4@#b?B22G`8VzHPvtisM0W|2!rPLqmbY%pa5 zZ07oz-0`5Y&$5IP&{h~RPD-RiF&sF+Jj&5Y=~Xcq+@j?oyi>e^i55!AIgqZ>uld{9 zBRX!IhB?-rq$4HeBFsP3eFJ_x3Z`Ci0B}chE-9Vr1KQ4LC^w3OAq}MGs;kP*$Q>8C zmBXR52OdJF5oY{ceWtLSjiZxErSIgJ>uUKsi{ z%Jdv-Ad^@-$@XpVA#h0l+u7#62>1z&tNyMw`;xX4wch~0pMBLB{Wpixi@YC{^c-`+ zT29c8$C<)e&RzrWTz(2V{g)y%q<@%IRTft8a*@=yPF} zX~+?(iHp5LH29tPgie(=fkjcFl8R214>Ye*4UMRfx*|;c}`HwqMw0mUN{OX;Yj++h%0@uLR$h$R=rSx$Ev#g@T~wA+Qh18`{4jV&U6xJ;^w? z4(?q;Pupux+q*9$1nO(m5sZOhsdZR~J{0jBJ^JExPsA1!2)g_)hj{bJh$O&9ym06b z<4sZd-6f_BpHb}547{txoG8JGYcE4d+wi7~OK$-#(uUS`3rH?lUIMOf<_aJOua>Fv zXKi4|DR`5PteDXRav;5O1&4zi#KkqIt+2fM1ThgThF%y}CNRf4g-GE2L6MF2A_67a zkUG@YC?=I$b|dzdrAu>!A;$!9wUv>IQ=C+R=RBh+KOisCZ4t5z@ar=YX@X(xA>VD9 zR-Sw~%rJ|5(g>2_5?8k#dT>T@7a9G7W2YLv2ju-56Sr4W<=D=^2L*mCC zQ*yt7h*QGfD+~nbT_xlL+EC`%Hb-kCC3;$Uw2Cf{-7jyl*jmFgBHaiX7X1*2U$jfG zHGo74VlI9?2SB!!Khb$Js5z~z@(u9k*+GY6=3M4AY_Kz@;7mb8siUj-Z1yZZB9#P^ z#3^CP9GkAjEBGB*MEIy~EM_~RqGaVZ+q6{U0uOoc z9^*whkCYn4_1!5CuJ~=oX#pM8VTj|G&*}(od#)qT5Sr>$cq4hd;cS+QX^uonZES1X z^!%TmpSx9F_$PMRUha_%@`+}l@stiVw=?HYW-e%-H2WppS8I?yW!InYQ>5LiafC#7 zDnDus`X`CelC$(w$bujp9NlD)ay(|B! z=fuBm$mg$GA`ns@?f{+Bihsfi0g5ib^hxpQ9|!n`825?b`w6k}B7J|8vIs)$<1MGFn_7GktXvH}P@){4$n^l-Nm)|tmsa|2{orT;Qe|nO zSG~@Pi5V)k+O%*>k}2KUQue$-+Cp_G z?#K+1%Tm`>8O37SqJ)Et;#ZZe058FzZuYYoIN7~vidvrI(8KvM>$Ud~l$M{}*Q$pI z0RFq(w_+T8neOu+MBoc{o7U#2LwM$U$R zIoJGKI{Ft|!xeM%osz3V@}~fHD%>BCAk3#H8jGPFaDW<-_zSJkT_(>D4z-_MWEmCz z2U~l5ppuN&esxz+2AFMbUYwu5<*e*DmFuWYNUEJ0Cw6nF{?zJZDqm2wl`*Z%kS?*% z!2UB{SDrChdLioiM3vP>TOrBT@_V&D*WS_gx{l_o<215z+7VM{%_I(ID-(?k>bIDy518rydZWLsk_VDn}m89who}x2B(PVEG?Tq#G z7zEZd4^CJwr9C#-rj6iBTVR*e_?|dtN#l|IoN?LB-zwFXbcOJXtQ~nN zxHN4hpVpUgOpIZUdZfN#5VDh~UG!B@$7v% zrIGh*#afzsL~8YOQ`TziP1WUD4!|Xv=9c%eY-gSwIqr8~WX*G_?5Bd3_9Gl z3R@)Wbzc)VM+4~HH@jNd_D36n1yaill76LRH4+maAn@|M~=@#llirslM{ zuo=@o$79WMJEA5Ra}n%CoK4nLiSl03=~pnwD41E+D)lpl&R<2PA-vyLK()@T#Q-_= zKflBDL$ik6TBl_+e6xLNL#+-E>G@IKjtg2y=XBt~D%Xl@g0epqiH|Yb0)CsY({XDc zJ}Ygy7D)V%GKjh=AWATPvFrjv1+FF66H~|E-+TM}JD$d*=MXE<`fG{Gx16w|Nf^Jt zYE~^q(c&(8YbD!}t}P3#1b5dUxL?1w0|X*w5$EVFbr)K?Ez~_t`b4x{Ml$Ddr_a`k zPg_jP&zqePNMNPBdRf8viQDN)7BP-Up(gh`ztS?fkU%Hzq+wV{fq%uKKp9(*d`B)^dByX%jeGX(X^t$u z@?`I!McPpJk^Qr+;auVx6UsjU;nRlxcH=Uw)d$*qHC0KByuj8u0S|V-|CvF4j@}@8 zLO@y8&TKlzvBD3k&kGwN*DPhmbm;+rDmG)loBXYJn0NxV+9E7&x3t`6Nk76B7ia)& z0ZXB`SlIx`=JM%%0vDM9Pepvjlz&9}rlk_U=;rZEsJ=TIP6UXXokbZ! zSS1Hf5Qt_vWyoL}s!ItUZNYcoh~sZ|-o;_|`yUH;yT8(t02n2QP@L;J-m^1{C`V2C zd2B-)Ug5a_7Pj3bi7&wvAsgR_A>ms@gkeXKU>*U0*bbQ(0t&PFiwp)K~Om- zxtyT1(TbMM@C@3?vw*d?ooIvB>Dt9p^cwVoi0d;|z@>bAKk5XbAaM(JN+$Yh5J&_F z9>o3wz)_7|eBa;IZe@D-al~=h_SWCwb3HW`L-B}N=KD!Y+GPdffOro6(>3ss>SHE^ z|LJc4j`$0}iCxT#32c4=xRa3)YK`aPF8~L4r6wtur2PVLEYvia`+oy)gr+KqXBNI? zvHQx967c#P28;(~0X}di!#C_(TE#QyzWQyFNu;zd6&0r0`q(f`U{J>1 zAPC&VnDr)n?*>?MoaLd0iD3q9Lb~c)7ZHfH!xm$HT8akPk5_9YAM|K2yUo6?{6vgp zau2Z^Riyd3-B+!|1dZXJj48#ZO^}UoYORE7x%#`Wm-lsN|1NYSRThtnHCV$SWGHaD zV22}`LFZUW(-;lV>9QQ=FINHl={>Pxs9lToAsYb6Ceo zlx56nHC3Z)&MlZhtP{ezcCnVC7RJrEl!HO}P5B8dSYc&(vLLKu=zs7xWu{AJM2Ils zlQfpt<(-BS%E&|XX4q%Sc?=Cq@=IfQBPW5G7pXbbi5OH`vY5&iw_>^pXL)q(_|0Ak zvN17<#sPO24S1BO(>%AwlSyW<81G0TD=>%M94OEoQ7GQ+F0Y%i#BwaT!}BC{PbC{0 zLG#+m;R!t?_G7>2A@k2Bd;Ah&ANdE*jQv6Aw;rgv!Nshn6LE}z&e;!YKF<+>{XI`m zvDnyJ?e1W|nGm14f*(rQoGQ(vyKEt|_;T?b;Ih@gw^U0!2f)?>wk~=EobEq@P@0d7 zWIK-|qxnbX7Lt?`jix4cT=55W3GhtdHoF&5b94lf=ZwfOZkycTzMj zYqAJ5dGYZ*$|q4CA;bN*0&SDFL$NgBB7j`D7%yTXdR`|bY0!GBKv0z=m_T51z*1?~EKdrV;u$pN*L;O3ntXvYQW}wN{&Zg6 zkKG>$l0*-FS!)j}Y9!x>clm4>PL%N6&d&IhVBU>1#w)ov{Y9K4^JlF`RGwq0p8Jj) zd5_-s)QgCEb?-IY854lCxbDf%cUk zJm@Fyv!pEg6{Z=9Dla(C)T|xEkps6CD(^2@gJ6+pc@{uT`bPHW)jkvYcUrRqmeF*9Dq>;6m$byW=9#RG~svkhso!q)UD zN02I4V7d9|Ye{`piVbSA{4(UgtHz;q$dyf_j!BB|)Zld+KKg3raNIjMN20s2G!fLO z1yKrn+SYvv`w1D0m>B`**NJ}+(7nY$LTI7A{KqjN0VFUis`ZDG7L*LU6X!tZk!&P_ zH{gSHnjanni~@ZdV(ae4JqD=N2~YLNbEk;N?%Q94OU zF55X+K3{>#Ru7}^bji<1q9Ki5-(hiK6wxfN6l2j+77*>GTU4n|s(uY+HKGayr20yR zevnt&Il%iQ;p1yMIicl?=%TomFC$Ml3`+28ANoHa<2=z~mDZ4Zt6SndVHmm?_n@F+bJqxubW|9NQXz@K@W z6LOm&&%=;rLY-=_X&;YHyS2kxx-RFE8zV6qLl>kAbr80hz}L$^Mm&;V8lH zVAw8QiJ>xhe65)MTT9z*efA#wB+ccAZ=8X8h1A(#xB<+>(!IE(UPJDzb)X+7uNla> zdXwTGXNo^dobpgOW&}`~Uj_LOK-=MpDfrazL*K3b z`_}|Vi;+jm{@iia-hL~4F_9K6PwO(jpSGJ~Ek25PNTfrnVEp%bbp9l~XmwtcDD}@r zvG}sMF*!y-NGZUe3EH+Y@*J4;*10Q?Ii@G%1Hw<_4vAEx$}#XqRc~+)iL}(a?0m;l zVSNSKC1oMjz*z~~LaYQcj)d^S10Tc{5E{k_p8mpVO0Y9?9^|m751kGV?`zep?luqC z>%GC}m3FmnFg)aO30!JRrWWqb*R+SFUoC^U-4gQ&Yd&ujUK3F5<7;LlrL0Wi8_g|0 zw*IVndx$S5tab<_zi^9VB51jgabYL#QqF_KCZ@d7rZ~-*fsDg9(!-lgo z6Ehl=u1NiNO5;jvFpKG>^z_`}R47UP0y$~@U+gkZ@Era7qvU}FA+@)h9rB-S5~v6H zBR?A0^oT*46cRd4H*6NXt^z-1@wnF1L~a-AUTCUcPHQ}W#0@YX2&wc5Mqtw0@xil| zP?o-pextVbd_(cVLc}$;Pn2d)lS>@@b~8aHtuuR@=M_!`)7-`Sb5r62`o`N+Fxb~{ ztT@cDgmv+!7O2Tk?~&6)^%#s+SG&JNr#X$^M|Qt^%wfXJoP_YxUak9wNE~*#M;D_+ z7^KRzNtAQ%L30qEtRyl(`qz9FBaFuhH;iC-3c&DByj%>Aly#;6cEY6Sscb=VW|v($}DG##%n9t$qZbBe}Hfi|l9v58+sscbtGkc;x0Kq3sxa@5X^ z20*syJQoBf!+P>vYz5eSELT~VgQs@0oF&hWr&D&M9Aq%R!maL5;ZnBU zo_B!1wOi9VmOWYuhpA{_V$xf^1KD$zTe|!2V2%t{iG_t$3tgew(C*kAROxw^`5wW7 zNn%fpB&>07z&D4Z@?N(&G_hNqZt46^@kg63d@Lv^ZKm@9n@T~vg>?!$@3UITk;Uc- znX9@s!2aD|?U2v0Urx>b?W{cmvpqTRW=A*=+Z;~%_cx+8ItYc2tNPPO=-?9EAN*PS z{z`G1lT5+jUL;up`>E2G^A(6>Qk+9Sv~esrw|5^^yEZxM66I`}F^rh^IQGNWt-Y9e zc+;{9yC0u+7~dY^xAfk8A#eP*42R`lL)uuy>-4vfqzDjKTMN7UJZCJ9Bglg7+v{qFnJ-XT@8aRYIt7(%&|kdIsUIXSpL+`j1WBY zQucrwE^h`8Rsr*5hRlp?5nn>5`twg}!!q$MS)Fc+wRt_s(5}_gLNff1FRV);!#R+4AZTGe ze$J+R8*oE>Df=O8L7aLPVS#Z^!R|Qt27_$su7$yS&+sZS`tUQUPH^s~_EaiYZtc3J zGsIeSm9rW894;h}%UQDBraNPR@W9q9sTFb@5tOI9RG4zE#^Y0{txoup*Z2jv?BgF4 zY73+4-vz%Eh!kJV9sexEO2|y#;mZ?5|6iMe$SC&y)y39%MEeE|3UkI9KG(R%>YPsV zCmVu8JuYZ4Py5p+gna{HcuMx|Ejuv#7?xD&iayh#sN^KuW~|st5x+j1wWz5P_I1kj zI3NxGML9?ur8ZEO(tA28&LhiDDQx(v`Ewba6yD8Nk5lOs{k!Y+YCJGIFZTpu&wg=y z4Cak0Cz80C_57GJ)X+~|3%!7OV>`@~AOhh(;wGJgMS`{?hOq2~=E~l70k26EHWj1b zwnDB{LX8pQ2IM(-RP>sXs6UF*nCu57GW;FoL?%ssREmkrS^1<7S#!BpKy+jZo&Qn+ zU$=oCg+)FF^^HZD;B@SxqS~d=GI=MDggj_`N|w{n%S&9CUal^#8BFmtFpdNn+&yb5 zuGUEI(T{;pOFy3`THpO5K(JfD%Rp{20{{g=~;gS(xr`Iq6zU&>x10|ujid#V3&7&rgp;Zgb} z8~*ckaR2|gl%j}$u#5Ch6Xcil3dpXlLosnWlm$3bQU52sRg^Mblz4x?%H1fqB*{(oijisUP48Mz~=d zGWj+$A8~xc;Z&+zfDGt+vmgr%e&4YZRTDP7bA#V1NDZO05$tfZ$8OWZsUaM(s?hDP z4J%@Q3H$a|(C>LbMMI(gQ>=R@K()sf*a`d{M` z@^xVU-Pkx7+dG>(82{xl^LHJi7d^?k4j>u2&R;E>z+lH;U2%VVqmO1n7$kxdddw!l z1mZqo_rhty7Do#2T3t&a`KHnQ<|R+cB@UCg1A+A2fjlU`sXPj3TW&)gTtC*e!*sr^ ztj^LGiQ&;CRk78_bWR)>tg+$_j#ZBci8~U-q`Ovrvv^0DkCjV{cqvO|Lob$p^a?_0|21U z5&!_{f7|naMOXh{c6+7e5wp>p{Iiqe&k4!`K^Jc_KVlVi_2)s%18iy_Reau~PPMUK zXv5ra429(1=C=2X2QIO&MEywiWMc>4O+gs>{q675`I^9i9ez`8@hVzAXq9f1uR2jLg=CqM)i**obx7$KZxd}Hr|Oi$05;Rn!bjsk&5Fm(Ue(I zPQgkzj%1y-sOC&`Ag|n-+b6q#F|ZTJYfP1l_3*O7*L=1&yK5%^EOT;m6r8gzS56^; z?h+>~L9m8(!diVDMcIxmQl^9~Nlz&uC!w@R@(EC8Gz416v?bf(ijcHam%5x3TA*2& z6u%}VRkQ{MUwf90LN7Kx&)k4uk)?b{tD6V>yr;_tg$EZ1;5@ScnO(TA6gLww;jkNr zCi8C(uP8}ZpaZE`_=j(sIkO0(ok9_E->*;Mnp<7}#;`p?2xaQcj@-ncuCzP}Ge#uB z9n&0kU!R>qlscRUy;#K9{FNRxCnP7NgrBg)ED@}PjQ?$IMe5a<MJ<9ERpO5L+T3tHUf-F!CrY{A7p877dC0dq3cU_{HjxSNAktkQHF^{N2uggpl(v550;Sr!C(mdR z$iq4*ht*m$K1W45kckuUAqk^w?#EQN+8tRAG$IK?5I!BsP>>!N11q&GPYj#aAA~&M-gbGmaJ5s6Qe#XXi`^~b z_%sZ&6m5%PBeq)wz01_T3cf1_uOz1bka zJSg=Nb64%M`DRW|kLst(9h1g-r-8t7lufW?yIjqNn}v)z#V9cX(nn=8jnwR*$bvi( zQ|*-e6vFBW!4e{z-9gAupK}H3uf zz5XoWh!Y~e240e%l#HcDQzf))7yPBz{a3b;FXe80&93u4;~V9nJNbs(iFzxn;q$eB zdA}54n+B~XNx@X#tK#bbVqBd&8la|LPVx6*lLP;hehz1N|8(?(8MKQc8NDbGS)N!9 zrRlO?uueJ{<1PBnJuaP=Zt=yDrD!MDc2mTSrC?H%} zmS$zr06}2@1=b0&2qFn50fo~9*SB{ODoCX`GSua6){S0~D_q8puRhkiXhRb_swBuK zz^~xxl98hw7{>QoLK1k+TBUOXZV7qeP95oGoqW!vjODtm9CuPvj#Uh9f*_fSr<74z z2Y=|+Wm~A66f=lF{`d}mc+Nk6U3BybHHHv!)VJBLmyc=>88EY2;VDC#$C94lH&ZvW zvjoCq_ke>UfUmFQBD)#*2^MJt9-qj^-Zt4$Lt}gs_rqM51{()Tlk<@d>EaOdc{1a> zwU6`=1L6XRXAkXw*7`jy2Q(g|MX&a!X;{k9zu{Q^+3Jb^|c zO=rq>W@6Wl5>IbN|C91F1pbPUhhOW8g#z98u zheLCaygW&l(>04KtM@qXebZEH%TGTn@q35kFQrGuKy?&i5jlG~U-gtW-ivao*<=HI zuyZlFLe@6#?{OaML7>$CGjVs>zgrOABe|2=UzL%S`S2c5Aw;fZFplofj7dt)aiN#|CS^%w|e& z3KsRWOr6q-_4s37%{E>2kQr}KS()}8G^RS z)wG!U9{qb`)ReUrN$=3&9BM(=rHMXd6{^`e9|TGE%^T_)eW(WRrfmzLx5y!ZNW*YI znt@c|hzWBKJ_MzSpEN)yhG~gGT$tIGXYw?U;D{=1^i0P{PtG}Z;m$71$l-8abxEMg zTqgkUEkI~5x5)l3r$F?FZ1!jixm5%{ob!84Ofgl7?Cz zK|KdgJBkRW*k_*6nN`AxW$QOfc;EYFwxJWuy=hwHH#C>Vq2>;e9j_3`2JuHA)#cQ zoFjOSZXQ~89*=(+D;0HK=<7v9P&ZbtS5U3HqWM^{I*xTTj1j}Z3{17J7|WboVq$uO zFrkR&ciJ?K3X$JcEvBB7m5FZY%|d!kg2k`W;V1mW7pbS@lr_GAFkF+WnTG)tel~+o z%)GCKX{P0QmgBf$_(i_S&N5x=x@_>Q*`3X&m?p3KPW};6ZgbkJLtHs!y*E4kJFUbf zzMz{tIUF7g*$t$)g#ts4Y1z_E-HlK8Jp`W!QW zIBy3jL?6v~*9j|kCA$y7VaT8^*|K#3aZXet2SqM_c6y%IpDJt270CqNQgsT}^|z^zSp~`RjFXpxv?Eciajy|I z@OBMiTU)YrIn0W4h$XToyuCh3C-*919-69m1H`gFf7JIe#WcyBLX@+r1c)+ep!IEE zE;SE3WMMkR_(IhhCqM@y;qB+spSr-fEb+LTw%)x=x?|xMl98=(RTmc}O(}K=5lfhV1;$SY2voPi-nWP*OePzawZH|5OK1 zet{P9Q_#XQ#5`Q6*)m^ak>*Oao9>KUmZ=^&27gM^q7Bup#qE>GjmB4eo@ zaMz_>AoL6Px96%H1Fs*8(GtY-#4t#OpaVayKPpViFJ=QsU=?ENfxe$3@dLG6F$sk$ zVmYE&y<3iOPGS^Ndemd`taiAd zPi0osPVV!#c|5vq_&@x6Vv6$gsvhoG>&^pyhmYjn($3OLS0KTK5U6 z%mO&W1(aY1^=-ujtP>`jO)+a&d%RD>J{y@0u>9Y>ZikK*)DAJY3hr9iwS5OaK&NbINkzSdX zeQh4@CE(F#HxG5*#-x#!#+GCL#rO~JS+{Gh**Wf`cdwc2yV7gZi+^vJH_zr*yzjOB z7(Wd81dFKhT4cVjE9K0^ps2*dsp~m#8n#?t}|A%5CPIl*S z(L1lUQ`#7>c2L?GuW?eEiEE$)Y~MWwnAh>6p1l8L_oeZ zJN^i17KR1||ESc-+#Oe#7qRy)NBmuP}FUUm$oWmAtKrQ+Ah(yy9y5wG9M{@i-R`Rd?mgj*z z0xYsfPBo7g_8PYsDuK$DF1aCn8_QExTmIsFQ)8WnAfrHWRcoJ6|xVs!NvMosA8i1nD*~%W80JM{t+K)+s(jI6)MC(-Ng#M zZikKzm?wSc5=sYkMMWHz)<#4&Zd<)r5AeO6Ye+RA0gj;{+B>lQ`>A zH341ZQdx1G5m0*sbzwQE9Xd|H%(X!^e=@S<`)RNsY$}f$?G!iarMN^GurA*dm>axj zIo8lTmnzkbRE2Rn3tz^a>s6VnwxllUKwK>LJ!7;^>~EpPxTPNPz{i>OH?mMP+=N<* z8b1#Zd;rF6;KzOCiV?Jg*jSUTh8!{FeQBA> z(Dt8AyseK;D~~S)bF4pBKsi6+J@w*Pg(Q({!n6qBW|@5Dic;tC6qY1Su~eR937FaD z{LBpQ9q?sDz5~YP;&40B)>H-nEdr2AXb_?*2lWJsUef2`G8jGhg-1s1u3)?N~s zznia*ei-(bW{S}FgaG@jQSb{yCNiT!zZ;3-jr10+)~q@nB8SzFHY$prH6Gc^op0n( z20YhcIMS|^4P&Li@>s)k!MkgYeVAe^H743do`jhk~j zMuMEY(*{q7BuuG1^Nb)jCIN^GR)vduXuN&EgQ zHhBP~?`}?rLpO;li!Q3q1AVLuKdEE({k!taybFw{f;2nYS!P#24|mTh#&^*onTKOc z^HdSL38dhGW?a*@^Hof<{wPF6QUI2f0k4He3&Sm0BRVFSZNx%Yotv^wfz=No{Go}7 z-hh{TdFR&NL^YI3t?gin!WRZ0r%P4Y z4#Q_h6eUdM{s|jn6dbKt%rwS&NC(6XVWmvlp_*4KsyRc;RilVjo0$3_9TkTB{vn6F z2M)_)A{quugFNNspSX+PDP{NJ!+yubzp#PsP}S3td1{#p#mFwyaBAL>Bxd#OR3hoL zE<`Y|JJckOM4KSI^H^=HsmVM?{`VFUmk{5bNo)bKjHw{GIvM9WWOHl)oToG2!)Q%s zGYQy*+BP5v5ex(@ui%=RnlIc4K_k<@uyETJF~a>-eo({-usq<^6x6wSA-L`nIk9Y9 zdr|80Zich{(2|#xVfzOqRUqPRJ)i;b3WZ%KI}jZcJ!Z1`%hajwOT_p?kBv4mx%Cyu zga63>l+SxxO9vxz9B|S)VBK{zDbwJAZ*CIT;bwfXi_=2-GdRTv0yq_`7Hq;5lE|%?h_SnbJT8W1b2or>kTN4H*T#?hsLxVLv(Raox<> z^n~4V8y)7cAm*V;iqV39e}5$Jeaat*!R^azr69vtzh0b_=vXttaNeZAmeFo&;V>$I zIAeO{Fkx~8lpUqb#ee9~XFYEp*dBDRu;s{o;5iH|&a9pFWjaL{_%#SB2I);&tAv^Q z3q)Z$L4~6v142c>qICTfhg}_Tk$Id6aEQ)Pz@%8AcN;IP2T@LYL9_dd9GCym>F zFWfmxp}Tcx-7oqcCr*-(9J96hvU#&BY;+nxFARHae1?^K6w1+DHk;vIdX{;@VhNaa z@krhdcEagR>9(+1cGWVGKfrqe1Hbl|4fZ6r@Z^w_>|*^?*{(d``OO7=o+jLuw+GiEwEM8Zn^HA4gp_f;@IbdS!Zj-Ls>hv?VPc-{nlR z_Ky>t&9I@PyuE$9f*H5MMYu@}>Zy3#x>3nEp}MhLA;a3Spb9H$#9aGy@ytL;IZAno z!|HZ~&B-KOK&fS2Jz=4BTf?(bDhg%Lic9S_Ce*Qi0-3`di%Q$U^fdeO!ObEsjG}x5ne90AV0ADzTqOC!<)3RYXhq zsNDTQT@5awXFn!t@Tl`HtPGYGIXF>>UsGLW8h{a%*6`U;u;F1y5I^0KB&SMyda>Qi zoHavc$^c?z4XIZ@RvsiJHq2cnh5{u>TO0{F};*Gi!??Cbc&~Rs%Lhz3%X?R zX3u>JR|b_-nphTM6Mw#SQ0rPeStzXP66)zODDO;N;DnxtL+Q-&ZuQq2*$DnFDMqtl z_@_k=6R94te{O3T*?k*I;$OZqh6bdAn=u;e%2OxMWwU+vK7za?95(2=-!_+NNv65D52aCVEx&@#|?xsR1vI4H+reF*gjIA;Vn#Ygz z-)<7BVRValo{iI7fxIaIL{A_&qtm>V9|rb`D-_-0#b&76FfI$0JW{Q2g^^!u{1D0+ zz$Eh}tx!fouE{S-~Tecrl<@M~6HFceg zou`hKpSw@f#vZY+6`izOx1q&&O>Bfy6K5Fu4<9NyIR&aDfat(wch~g>NT$MK$$H8jwki1cFB2-005*FdsD;4AWbu=aXY(H(}@EO`O_eD~33U zGTiFt6`SNM8m{!nDuGhS>PC*;l;12$(8RUHHQyuGuQ^0=K7G+Ii0dwXlMzr&u8Cd0 z^Bh&LiiEc623}WY1biV@)KGfA&$qU)=VTDc+4bftOSr*_8ZcUbD`vL6m3G#nvjiML zL4)WV-52|fHTMye@Kp=lCM+v%--Q`28(N#y6TqTDJ)}Q|d~{SFK6&5wErQ}wVUPMU3u!$y(iQ#4o%3s{UtrgB_ zHigbFCf>JGAeSBloTn(0%nUK2&l?=r(NLu>Atu~FuWseZGX_ggXvsW(M^2i9Lt3tL zFGxYYVwWz8Egqo7JRvz#-Kv6dqy_N}F5dO|hJ0 zVsK;`X6c%w16gsLX0kYKEDm4Z+X>871dMA19O51f#^r^^)yUHHD_kr$Q``_FUPsME)d_xg8R(Tx|Z1-X+Y>I2w>>HYg( zHmOq9hl@vZoCP3Pw1Y-PG%4;8yfvW>zY0Ls$>UJx*+RsPEEh&R?t)g9doh0*8NQ$w21T@xH| zKzOG1=C{I>WL8y#!?l1_4McA0l-(#lIh6ZleYs0>ij4)rF92n_@OE(*$DIFW0Mt`f zq|>)NcOdp^KOk6S%V*3cN!S6yK#)wu(tjXn3w>zVW9*JyYEO2y(}0rv^hWz&PV9}w z>PBt@Iu>RV&!_E5!U-uwBo;oNNEP%Yh5Kg7HV*_ugz$N_x_r|4@0e;)?q>3E=Pxq% zbml&Ar_?}#s~yj(-n~N#!GbZb>!4KjHyy?b;5`}sW7J@V=FT#Yaj3)_1=s8J+5++VujF(xq{2X$p?xO`&l#oG~n`zihI6>S6!7ORUi92 zS39u{yoi(=Cb68!()$5i4-w#k$OfD!qm(jJ>Ll=g>WSkdq9^PfF`r|}Gby$i zd$b-cnPVNPb1!p}X;!9dTP*@Y%@)#-^nAumm+M}YtS7SKyA$e~n$v?5B0>fEGB%_j zewd(C=tGV4i?rcGwucoLeO=YTzH^j}N(#}soSh2JLGovTMDllP+tvPLyp^O7Eqp<* zUA#4tW0a&&J(W2-Opp3c7{DD#e1aR6A27+}-$q8^W_rd?ZFVz)pV(>7ip`Fer|gMM z4UhJxtDX1r9rgcZ*VxXixL{=NX@3QASo~-6)34)hJ|=A!6G-{~AG<~oQlfGma+vzW zP+{}-(XU;@D`RC$UIX;3-&mT6@H|4UyUfZ9B>Xc(ZJxL?K>JBj)V{>eu)$WzZd~%UD|thT3|MY7Z{`xgEG$0mpt4Ow(@qhgqAVNB+{k8;=rx)pThvP>AP+J{rF$0WGyhLV| zPy-JXc1t1$;0UE-L3C}i06-_r5$6qfk6+}6mNWO~Yjy6ZD%>9z)8wPf>EG4i9*C-_-WA|5pIVca;aP;FM|J(Y>d2%pq(k-|NE6} zAeA$iTam#$=}LfuPqy z=yUlBa+5`|8CFHpG=@2}H5BTl+heHmg6WrBk%O1;48*92@wDGmEznX7b{*oYrGnAK z2Qy;(NWLfW!LShLw}Y=VaJJw4f+o1$)fY8V+0zFrjF9hU_dz#G}VRJ^qUDFyO9Hy z_)jLl!_4!hvfwXEG525TH^v*sVXh>-!+S?*l0VA8XO+bsRbB#guEKKYPpFPTH4K+T zh_==?6^?S&>usJk($*hYj@uzIO!$JK-YI4xsC;v!B^^76M}EyrIYkv7(pnkE1m6%r z4wi`vCK zc3tuE!w&U_&6frNR{KK#I;$1$ISaopj6Ts-0j1k9EQOaye-KP~dVYQt8?KtE)UReK zclz3R%si6=uDukT?*FRO2?rXQ)q1viR|J-08<6rAh*O-h+a&|6+~ecPgH!>o{#j9^CWEUD*Z~?%I=rUD$e+M}-_`hS%1_%a8*L(%U&8%Cnh@tW3tgNEc$~5W z_AZo`vg}Mm79O*lo>@egE)k_4Ep z8rJ%u0p4;5Vd)S@X8c9r_9!)xgN=}W#cj;&*p8WnmKtJD;}Sj8H_Bd6_YgQg$@zke zv%`7Px-PRY(q|*M)oWL3A61`GEeub-Z$+HbHWYJRXDXA^++p7)mMpQBf!@M4(w?er+(>y(FL zY~96I#x?~N)u=JY2B_HB|8l%|e;r>%)V%_pNLP@Bvk57Hdpl*JHrXrU!ZNf%95Y!C zl011{Y7HCG2k4`wKKs7q8Uzgk!u`e4piJn?BlIoHI7%2oDB{Q?e-!WZ^ddqHDj!9H zve?P6`fW#yN&EWV&5#wXZ(vK72zvMR7Cc!vxW5U;@N$hq48>Wcc&fuCDI?sjA-$lT z%Q~07SiSzojl_g=8JQg`P_pzcd6>e^52|U=8Z1E8`7bNb!K?q zFxSY<0Qb4>>W;$LY?ytjFfdh?%*gf?QdaIY2@T@a@s8RNK-t34pW~NH&jffQ0qKsV z7qDa7nJb}O01(idnA_ zYYGk=uZ$%1jgLM zDaABRXSBxnY=l}VZ5oy z&mmYW$EOdEkNC6LKKZ4lV&vD1oxmw88cx_X_Vl3gY)6F4mW-z^iB!S#hT3RQ?`RJ0i0YB4rV$)8Y) zKFGJ#>TlX54BWiEj+M}ynXn|yH2}q?G;ADs> zP)_~pl?!Z?l;J932keMlY{M{r1V5)~Ws7y&;8yNWio=kdb=FBDwt>;qBJH5UkW;YG zG@%%N-D0d@{hz8C0U5orOMej1Y*yoSRTQClu06H*Ld^#H84%z4KKP-P0v(cO&Uk0bu^dCMCD>I?4LAM*!963%T?lqVu z*FZUAr4nA;g}+4FjWUF?gYItA;XEN~-qTy&61hom9q(iW&XPZ*I~@?N@#qKN8Is=; zDt@TXEJKM?I?3@aOLl_l2x+sPQoG??b$H>Ock%Fhu@ru=#B7zdtA+_XBN?9k4-e>q2N_t&x>Nm82JkfSh-11P$x44V+a!!Ai!2xuH3Agdg(FKH1 z5_wQjPQ6d*dob*$aR2%xD(n@sqs}mYJ?wy|jJ1>}xJ>Dv9$UUU*A19p2%0hovFO@N zv7*7}1OR8MP}|nz%smlC#_KZw=|vYKxtihq|6GetsK-_hDUmwP$)}B{!op|LNc=EZ zV=9Bp>5~hBm*mIvCRWd?)p!9XWM*Qgy{F{q%LF0y->-WBQ)KI#3&$Y^S{nTZ@;@4Q z1=pQRqZi*saN|Q0EvRn*rMCN2njf?Ze29G}95w#Sn1Q!wMrNZ5)sid}b{XRYvo z11r+2hf;}iI6oq82E1eQl|8joB%lcha7y@)57>}qF5s77NwRYVNeh||E!^v|YT;1~ z{1U|kp|j63j2%-()&6eFpGcv&n(6Eh#rIzva~<?OIA=Ev^N+X zsg`nR?>^S_+U4;H*bTKfixXJ)^|evj1uwDJBs=7*`LWX3QiO5@&(4xacn?!byLJyk z*oR@z7&4DEe#_HDFzZwVNcesh8wu5?=^;K9#yucaLOsC7ryW>~x+*1jQ$5{~#13Sf zEu83dNtDG;ee}G7|2q=I@M?TgLm+{?qWU5lollEowFB)7 zUb^%pD}q;aVD3xqekd@gmrN7JlCj_i&&k@oY67;%T5VRa9C)oLrQCQ0S?~Tk)%&MY ztCy5299qmLOu4MszHz~wqgI|hGJ_U32xHui?aI@vIr?bXZp|>E^g{Aq=5Ulb!^-@| z12u8A7Xcdvcu7x`m-z_}GWVm=Et<0!t-mkZzb-H&uqNB&EfK7v z<+DVEFvbPl4oSQpgB1F_{q3`d86HuL##JYkn%>Cax$Fs+te+X$RR;593A5`D!itpj zMoyR-x&Xllm}9v(H4rtbIyq#=ue#-f^a$7jEfEg$&GbOC=EFf(&;g#Q+=_svKQ}Pd z{L!C0uFgA$YsKV;ZBT3CxCXsbM&oY6DU6XCTW%jUX|A&oP)fF- znNN$gN;@_w##$#N#q+u(sabg7*a^9p<(qu1J~OAv>P`S=EQePE{00^PmTDm@E<&X8 z0qk>YR}2y6O}0icMKO|NS$I9<&CS7)l<#nTFGM1$?tsN+X zqCZ4F>)9;+X(n>)I{wRcj^zkk)e?c6&#c9wLb#^u#U=96D1-7hD!MlmvoeVt{-0cp zBl0^m6rX!O_9XEi5zYj;UNZSE)Kjm>Inz8T<`JPrXKiq4Eb_Nl3DqbRWF6B03^A&G z{CrZ&yiiPP*!Kmz+wn`7Qa=n2K-hg1PRJh~R2{DrmY6KywRvlj(Zv_)KM1`H^8)IS z0>(%{9IAAgL26lb{#Ar)n`{h;)Fou+3OX80a~-2sz0(Q$zjNQ zA{MlTChD%ywY#A8L+~SfWx5j*bVRT!bOaBD4G&n?vw!f;^b7(H4|tZ0jX-qd%w!A3 zt4X_w6A2){=Lt!(GJ31BxHHLFYKJ}a8C!^egMgj*OD5kMq~o*&@V#)2Qeo&pPn*t4 zGK)&N?-E!3m6JozdV%Ky#NAp4 zQw2J?$ij04M*o-)y6W9a!X!qSe0@`rOp~f$imK?gx_fzp#>sH#C{}M)x#|T=4&Zsr ziZIlxjfb8*tQ2&>alr>81cjW5Vi?>dI8cAt@(TTc;hH0SIg5O3|1lx*q9wgexb^@` zFTUol0m}?7(FxM=xB<&NJsH@#0Y9Zl!Cs85=h~~hgV3hjSV6Zn$jLRYI3KHv#V0`$ zB$!)7@UZ(fSbS~{F2X#K~(wl_verSNfpl9Kj$fRdW_ZGf8Q7A*Rg>A;P^XaPejht$SrsZ$D6aO zggFr`7fnY1&#tj(g)sy*^AWONG4%(brg48E5g{LP1~;3UT;;rGBkw`oBV zSxX_r^=^=+Ik9;#A@gKDW9D*6`HdO6{^b2znrdSwjR+m$KPn`K&@;|JS!c~f4Bah9 z&bZ=)$a^xtgb{U=rxJKOW#{;Ly{BG{Z5w80IUkI7@)vc{5}ZQljvV(HaGl9M7ZoGA zT;whb)KKCv&@EP2&~-#-{a_b-cuw}9&m)I^tiJy8N2z3*ZAad1YR$H1GF1`#((gru z=CLniU&C_v?fW1v^`Jh-)uooMWo=wm^OrH=!I9di>gVuPpV}_y70tMh^o!kxgCR6Z zXhafOXQ33tlNi@%!ID}})Z*eZ^SIB7l{W_BFM@$WRW`p;1)^G##%)|Y8VE>vE*qio zENw`mjxB@bdJaf?j<1td$jZ3^&cH0F!vl8IkS^##fMbq}U_D~L4)Q_v9J@aS7bC)j z11Xclcp2osMtQ#E6W~Bm!-EkIyon0os#I))Bb-NG-6$1Df`>D3zFV9&EJ3K#}fID5Uvf6==A}o`#37^#=JHcx}gjs8Z z)Mo{v`D`VD<>=PJ=1fG5m(NtZBla?`AES=!u^GQ=fHsI*hP_M?>|~+#6;vyOVVUFC zLAC8Qml~pW4Rn`EDrI57@DaW0tZB@PUXKUc1_Qqa7lxc4F}s=3LW`TkoW-yiwc@zc zo8DrzDxGr5@_6j*?&suR*DA?wmzo-hOnMJt1F-(zgR2XQJU2% zyHODYeMNOWZN}wUdb*b&nXeUTTJyC7M(xfw>jb8~IM@)0VI^Cz-Sip&AZmrZ4ia-u zoCA*MB+>l!>zL?rKpjy8_&Ka?GFu_B?iApgCj!$fHhJh}EgP(-^MNBZuRH3rT5qp9 z-*?+Nmox_|vyj`5Q8njuG@|r&RDQy$gFho+2Tw|c|8=0mkdxP!QUlaS7UyP6=(u-J z7$^L?;TkV7?cMEg^~$pEC_h5eyVF?ic~a>pO5`SO2>NuIC1L@HjG{mSyw~PR=xxcL zu~=}}h4ZNxsFmb7tlBgq-<=P~bGq54??jF-iqN1sPY48ttg81s943*bv{`%& zttjts6ED}w#U3Di2oMXz4IQ)J0{=L4qV^LPjFMPz=;(*5LHPaAf_QU7{AO8luEu6H z0OR@*N}f<8ryz!Y&l7r%Nd27S-+Qg#?g<_P`l`|6Xic*!P209{VxWfIfv0CIqU1NQ zawamw#m0MxJwbR3O1;PQl-tZ!jm= zlmWbDxc=MIm%*_v{orP47w9@saKIw4*ySCA)&hcoM0!ah4J3xZ3mJKY>^FMPJ4TIr zVJfu4+7J4fNJ?1hgSn8i6Xw&`twJakJs1aWJmaH(KM8+N$y~BF+-c-HvNt7h%o^dS zd}i?uVwxOoux$U(4q+eX|Kk(x&&99*cPey`zxdDlzghdvLss+Y9Jr8fu+ev{0UBl1 z6ooG%CjDTtGv%zS6~r+ZJR2p58Z5Iwbt-l=h=ZYeu}@r32P?K_V*bpBJ7$CgfkrSDjlcndjBu6SK0K78h&5D0R0p^VjiVlHl&oDS2? zg&Gc-b|4+@O(l)NiLUPauMt;EE07Lv_mYg|Ic^{H^)_%aDZjFY)PLM;ShL!34{PG! zd46}JmK!n3Xq#|`&3M5N{ib11ytOY1G$FA2CtFWRex=yGmn?-*V47p>OmN6km-H{y zQd<=b%Yo4#-<0KVl;v)_rNK+ZGmjf7-5ewp{Admv-TXI1t|!#*dMUVITLl^s$Nsvd ze%<hl+#H^ zj;VC7CMUSKG~Q{>8L z5=4R5qy)@inLQeG5>^f+n7%`c#2*5Z*?f`uud*r#6&0J}Yj?z!jVQ z>iy3X>!+E4XsZo&Gt>V6=s)2+|$W(%s#HAl)q~Al=;{9U`47 zoziK5r1IV#^$(AUFX!UI^JRYb%ues_>}Z6?wyJ9})>;?6d*!gR-#9ETn|L4`{u49u zq`1zMcEk>DrB|XVg)tv7q!Otg35CdK@VqlcKIo|Bc}em~rp4Q9z8!<%fPy?lg{FNa^UAI*uE)5a z^Z58|=6%?fx$nOj3sT!`ns0e;L~xk@PWj_G>cQ*B8qq{hn6u-i@|kAFfwKW+iGBO~ z)ZXsEsC*}Eip(W`1#*X?ggv#MOjc= z4;WkXzaNxKTSNL^hD3dvM??ustog;%n%@(;Jm2UWA$@C0;Bm{ZLx=uA@9WT^FMC!8 zK?QW{z^5q#ldOn;{;+TK=uHeQEKQBAtnCc{n=SO4Z7b=BHmeRIRH40x&z0+|l%Q`$ z?GG!E)c3Ns8$nL%+%2maBRtueu}O{++_33T_-7d8nACD`3?n-DX2~hQ;hJVvp(3)Eg6P+E zh~M4eF?ErBhhV41UK*RG<0l4JJ=(B4QSR_K(2`ITj>Cf6Mlosh=|oZEE0t&GB}OZn zuyY!=d^MbjEooYx*)cfob>=*ER1YqzH71U@eI9tAL$i|KHodDvNXzczQnc6K?OxzP)ckaata} z?NP$)_bABx#7#(g$0L4fyIl+q8&A`Knm};*L}Fi6m@(#9su$h=>GeY`Y^Yg!jmG1( zzDgo&L>PM}X>V1d+hK;dRHi5po`ef{?_0+@@W50?fu10w3z}BI_`|+#DfL)$WyhZPB3%+`XA54o`U!=yQeM@1l*Q4~LEnDIZBoqrl{yX(3 zXoWcI@60JOZqH;rLK(&xP&>+%gc+&R11KC0ngeHiAR;?208GVob_2frz>bE=E}t zN<7XnN0YdYD9`!feXA%b=WBnw_w#;ILh*XEy43P#%#=-Qbhb5I3Hc8pZfao(QdRW7 zeo6m{05d=V2a0#(L%x(fs_AymhipP{^Hl^m4-<@Vy4FEE)< zkvX_X-=!Nou4BsYgt1Z6g)(a3!%LFIcAHwCYG&mKv9yHQ0P;S!MCGj(grEg`>m>aKW$kzYlte!-XR?fTZYf7v z!9vmFwDuLLRX_0&l3~j<7%g7=Dz?KVwf^!{RbfsC`jtk=6m1!qxAC)&cUtvFiPw4f zc(#W39&tDL7?Qqb41vL8(2cX=qcmc6fRNU)CJI54&^p|}uXcyr!>F84&rzs~2woA( zt#9nw675r+37#Y`V~_kK(qG}`LVTM^v8NbcC?u+MlcweX;?Qi+oQlN|pS28OtLJtR zNwgPUIu`q}O|n+#U}y$qnpG|1tUo&=t}i6}q<6eELgotoBtCarLRYVFT+K?J_#!Vy z`{baQITrXcXi+jhL)WS}66KF7T_*P2tmk&n=ON*43HI^#n!>>6I;2j<@wEd+a;79- zSCw^li7YfefipLr>SXK0xXXJpvM$^%^!Z~b12Y7~6^>38%BBK3Q5Stjzw{3l0({YX z98d7upTTnbW?o0M(NbaxD3b3PZJyY38ig?%ftA20~7Yynk;LQu3fL6%c=EurHM;A z#baTzaIeE?0CjlHf;$R0tGiVm4dP;Z zc`-S9G8XiL;}h%2?!D7gT_{IVgPQ3>yc>sJBeg##fK?+Wz2|h(bIcX&1!P&oH5-=L zF|GqumeHZNg;LqX2=`g!54+$CP2}I+sA?3s7UET{(7ECz+JNNH2y9s0VGtp4ob?)X z&`M#`*Fu)ls-Am6nN7N$pov>RE4U~>c&(U8k+Kfc6NxSSY);Mwj`xfVpG%wZWi{qv zqfiNwOu&43{86~{qe7wMJ4QYE;oe*nV`B6w%OXBxg+XH{R0YtghuLwEKw(WY^GRm%`LXJ*85nRiCiDy!IiR2n=bA|e7!%?y+Fc_k0y6_QPc;8>c?&~(Qmy8|NS zj9TLT+R>Surx;L9feYmC{5)c{nr;-Cr7k{5m#ewmBrlSy(b0k?V(XYFNFHyD-daj8 zTgrXs4GjMBey^0Fu?@EcR^5T_lm*5Kt%0+f=xcD@yN{`PuX1_~n9L!GfG z271-WF*}%4T>E43N)=S`Kyvt~vc~PFH&=5T6s1LA_0ftT! zH^_w6-Z_bWQI3EI#r#~Ut`}2y3{7gXQ@or0#>lF=9oW=mg1Uj89-gsq7~^g@pxHVQTp3U6S#2;&Tl z`8^-;36PnuM+epmig9gXgk#)?-2y47OL1OSS zimzBJ%Y8YyXDxPhQ1|u?`vyoe#Si=mJ92EDtK4oxJ@%@I26@LL>#A$H>%_yZ&iH)I zPn4KRL$BL6%GW2q-#JKr>hdDCzb#HGHyrjAJM~9*K3!BIN@2)X>92#(H;|z4`?kSt z9k$md=*TzaM{md*?&rb~OWbqJ*5dRp3`Ps18{A(Ce1qs(qJX=@3C=)su4L1|gM;+v_ zzWS)DDlslm3;lKC(2r6UEFku6#Rqhd_Z;M<4DZi^clf%ABZ zhc_@imCmOpCOK>Bg;CCrl+w!=?Tl~CDoE531$U@L+J?vA_SOMHJ_xrpo%+vYD?cd4pCXt_1Kv~ z51(CYczl=S(ecD;1(6d^9q zMxVC;ZTnM}kLn;TUMQ*&50fa1t(iJm?Tp15+oQ*QOVV@LQ{@ylf>^4?3W(Xx%jr(_ z)Sl?aggje_YiJ@ACPv6`eZ8?wmljm3y5@}eWOqiYdIwxja8mI4i?Yf_AJOdma~V=f zlWaSZTj66euvqjF-G}h{8sZ_%V@T8u9dME)r%1Pl2=C;Sie>UF#=HAhzHlT?f`clb zF;Fpw+-Et{b(Gk?v-J?ZXUz}k&RL}(GV+V}`^H)5eXtbgvpP7apSZzCtRY5PZW5hn zg9m=LBkJADV$xX?!&RbQkv@NM8qjtQ{`<2AYuh_m+vyq`(mU8#{Hfdyi()bdM)~?3o&~>H6hJGhz~gb;1I|Iw7Ca1x zu2@#HOHVjE>cvsX1$#?cva4iA;g%cVhT=r6s6GjK#h8-p2xqI! zRfB3i>k*?{@FMJZK7+rt=4kRx>KaiGLxE}48j75R`EpqxvyF72B_+S)s_^S~#taU` zY8(~sLLZr>@>r5jKOA^?3%Yj@J)$qToe|yt z&^Sh`CHdBE5nbN zhHF#NXl4~cW)~04R;5@);r+Xy{F3Z$c)2|vW-6;7_Ub{AVd`0pc^NxZv*J65j%;UGtH?midzllJp-@jLp|Ac}=E5>GokhnHwTy*jG&-(~g5y1@+W@BMZ7ej585jhwj82z^7F`%djV<-%r zotYB6kEk63v{<6e_+{k6mY85h(P$=3U59v_HI?!O;qy0SU$ozEhqN!Y@_bHy(~u@? z=!Pe2f8Oi2t{(5wiC2nVLhz2m^20Ut`(h9(nd)rG{v)$a-qH=wlAIB?BP>GZehn-3=6FqrOurEqC7)M-b_p;=IF#J zdrH?ci%|}-P`dLa%=y`Sc)0c3AL+_#_%kb?T~lhiBS3e!=yiBD>N;0zEMjC`wS}aH zRz79Io2un^sM$erHkXgt*EU3@QO{%TY~DD5*yGFVD4nh-R-ct2fY!it$pm!sMUQ6{?e(&GoE4TUgA4m z&D50)x}lZ5<1k&``45}r5i4hvW#X?OEOb>`bk9`^d+Q!ey-c{-+?KU}&SfWP(X|5c zbeu$$1r9FFv`=p{HU5^GJ8sI|RvKcx>ONiV1Xj-#9Wf7zBy#w$I&hpG_M+0x4h7Ca zg#%)1+@9QAm)?4zPf(x+-Rms5@x_=)p}@eXuVTyOsL%I~(FNs( zCEoDAm3x~-D3q6$CYnfI>7-K`#NA+)@8Qhez+?)2uGJpd*m&IZqG&wehRc#Gi~usa zxm{tLj2Q!RAK8G8-OoqbQgmA9sX&!36tR=x-+Ugmg;$E+a+jB2fRfSC-kYb| zw$pby7(Pxd%#e|% zo494@{3d+BQp~t3bOa$Q4f2CBOZ|yk4VlY?2PM-ZQ3$koou~v^aDnk2{I4TP#4(^z zG~v?7hBYb?!b{+g%3HOxUrnCnj8!}UGtHl2s6fDoHWkd?;>n6oJUxMT82O8wZ zchSuJoLF48Mzdp}o)y+C1@=Zq4*Er|wy4DM>E%0aC^FRSKAmf7a6Yn|v|RKV$5HjS zS@vGMU)g9yJG#_)dt$d_{Q2{AOgI0qC{GHQDarxFw@;K2NsAK&!Q%Z)Bw)nTH`T}_k;KVf;$gtivM=-=;>uG_M)lRho1&=U zD%*yvo^AW`$3fzlM-#I`n;UW#;2cFZYTi{7&SQHImxpXpS0qBp2eENt;s$o;JMvUl z?xp^M8^Upan1eu{0Vn2D4|58*h{&RCE5piI>07&XkLI zG|e`)8QjwGh`I8@YBKL2ykKo1I&}U}A249)4kPFMxsM*Y;2F4^Ao;u3?hK_VQWl#p z%>=WPBo}7;e2st`Z19|N#nT(t?!C`^vuJufX7>qv$pOwMS%DdY*YdFD+Dz?vW9&7=TE=@AgC!+^2;mtRx>-NCrXv$XYfZFKca zEleFu|Gw%2(R{MNsy{z`7_87GVH?!xNu9vuprfzjJc{LNb|(6fOiuXIE{A-E$Z$Gs ztI_0MliT6r0NtjHrvv`Cd~vOaDIzGwghvj*@zjck)-1#dwxLI%CGEgW_k2EhRfigT z2nTM{veFL|Hak*4(oiQg3O}=$@P0pq^QNevdK{ycHZ%{@Us6%#xxorKEAyT9*Epqu z@DFH}9%#?3WYzQ~ua&efD|5%yk0n2zQcK6b3Bk?V>8P!Ys;MQC9(Gh9n-t#b*S*`4 z@~EA{7K23-*O4YI`wbRwwCfxiuf`gjX|CH)`Mg70pLmZRUqR6Ep_ue6#x#aR@rUM&7NUo{?RPyzOn%P<2hJt#u+w^a4(jaDT#Jn3^vR{c zJzsXobXy?MJc0a3j0Oyh@hUzFNG?qcbiX{K{8y>&+!vSK=0I&eETt4CTF;GdZK&zg zopVYI(@jFa8nbm4Wx%WHxV8G}u$+L~7T?%s7d&gxb5y+k<4FPa16QsSFz0&}V8J*= z8Q7IC@KZ^l4NdCX5RPp2KdfyBDGZdz;rj*Z)(P5rQNiMhF~hK9+eBBs&|z!HG6};< zo(pK=7Jfw5-D0q@d4!EgVS_Cq3G>w8J&nhHYv1*0C|ZPNy3a{VWNk&yjv&dFcB$KK zDf6kChQ!uy-gL8no9!fHnp#7%qd^V3rg+NX8 zHu~%*m>V<1@$I;|2{xJSB)zvwECtI-p%t{DC8>!7S!i2G6>6vq>s*#G7Y7vEPf?eY zVm`ufQt4r?FufkZ+yEyUM2jh5g5uiU6hF2Q`Y2~5H&7GX!H=;{?FMO>C^M7BGTr_3 zuI3;Tt1YfFMpkLDo+!0g(<@#Jg)Hc~IQHWiLcFfI@RvI6R(Dcl)l=q=ZpnwBhTUEyD7P;3b}7v=)#|_8c*)>cpGy2A29@Kt3OtZVgE!FGMpW1lunE14dF4zQ8 z281#CLe66eKIgqNd84zS#20zSzKxgF=f1X*jQM>&xLOaaviAqfil`b&a70=_LSWpA z{o)})iAn5JtCtzi2kd-cTUrE|p3}8*v4RzZ;jr>J>iM%NN*lJS=rh6d1E zf<2$d>tBhIfCa6-*ax!P;M3rUd?v5)*eaH#ceuiws<9$Dow zzq_Bl4LMMh&_Uc>T-m1Y3#oJC^G6oDWaZX2rKmRz(Yhm({ACmb&Q)9#9CxsfZ+Ulh zGN-+POXI;xSa(@ft~L!*9PRfp-d}g(b=)`0ObjL>kPxbINWhaW@7UC`mVN6m-NWGM z(9z?h&pHslF#(1(B<5K90*-)nEX5u9XcWK2G@a`g-~WqT;*%(UAgYq9GEnR z2!a-LlzaHZ5QVoi;aNMYGS`^VirLM$d@!uzr71TF?!u@gRpeV|9Sz}~dG@kyekejQ zI|wx~pTDLuaNPO?>a*AME&Z@^IYd?WWPeTzs`i3XlYv}c)d+&!iSUDGtGC-{m7JSC zJku;i+5=@rtJu3g(I%yii+d) zJ5b6qZ*D*b9$N|MY?&Tw!c;3iB1%(w>+6OO&b)_W-JgqNXzo39eqy;LX|GkmHXq$| zGN-XsHTG%}wK8dEa%Wl`p{i}kkGsm}v3*#H|A*kYWX*v3V6+iu(({^`yF5+f!B*Ul$x(j6DoY{q%Sf$1RQqe%wR1r27p9oW9pqj#nj7-pvmwH+#Gu zFY|D4_dTI+xn(o(9KkW$687-=v{3%2+95eY2DiFM3-kb72kvf6Qqd5h6}4da0S1YL z;2Gmm449xfStXxP;~*OE3}Nn$S;twZmcmK?3H*WgS(P7RuB5SE-M&POnJ zpt#LO`!>~w89llmRBI;OF-0&6S}Beyv4S)5e-bbiV`^+vs}`xg^wG8x|2Xv;>A=b| z0Y&-P(;eNUjrAZ7V|xOfYRxKUzm=hDJZomh;g#8AIbNX~-GMXIcFY7P>-goLu|QJ? zPqX2~MIalGx@tCVXlG|__tzj-ebEfa5FW;OS2Hf=cDL*rl%5I|4ZVicZPQRRZkO%2 zg6&7f+YhTG-8>=VP4rq44~UPp;g(wMY@(O7A+c$ z*I)k-MkQ1!)}eR3;cl6RfCqv^d~qr2=%_==$P(-=CUh*XI^pY*Id2Q(vwc|R-0$*= zxozMsh9)S|#Xl5NRdcX^EAG_ISKVF{Oa;lJzYS5r;fVmRaxS^Jc@w?F$j?}QDMJiV zvb~-sg@)5ITKR%krOIUl(IpM=v4oNtBBO=l`-OqzZdGUF=6+58>TS0o4a6neFvZ5 zm@rFthdfeDWZYOzw|IX(g2e)2AzGa>VWr_7?kIZaG5V^|uGr?LKy$)NjEM|V^t}z+ zVJ4%9&UCc&qM?;Fovt-OS(oz zUtUbRN`|jAYd+10`e;boHUT->JLSL<`((Hrxwi{`v{24CLagrESIDo|n3KD2O117# z9aB)>tf4Re3}OlHF6@v4^a&UJCNX|@)qastuwTYJY{>mBc8ZgmUP z80K0n*_D{G#uFk4phVntJD9;Us!7Piod~LvN!IP;9t!9sQmUYq-@6zuh10?szLbPkUBTUt1@@T z+c#9`QVE^%ZnNAIE$^+|>;TR%EZ=s7OKp0@#U%urPbg~_KROzgcVcA$n_0i z)!KgiaGy#)WzQ?*Ay4pb@xeRFsonR)=mgD&9c%dcpkW95ac=9mox>nBMQ-VF+O{p%)z0v;;^tpm%}zeD4$bt?l8G5J;aL)Ct}AFiqF>&-xRGLCf5;*}0u{PPVK&OEbi!(dtS4ChTUM5n&x&p8CF`enn{oCL`_35Tu4>8cd^ z4P-nzh6DG@Y2jZevFO8#%!V-GA(orC#3Nw&y!ZZG?|dxQbA#+WnzRh_wU#9%g zf6qoJ>^C|(nEt(Bj#ccl0(@2DDGjEt&jfgRy0H3w zl=)prnj~alc+%_%oBcga`HEZ9tf6yaJN>*yaZ`JHdudTC5csZ?SpqGEjCAH}!Zlsc zGqbfj2C{iQC^XgA@>6OZSF=Zu8@(__prV*114Qz0kC!8E&}g8B3?~-YL&YUlusj*p zqJF{6LSaVAn6PsDL+i7>Z5y#FN~&)7AnHe_xSTzU`bJeR=%RA$(_~JaZgiJNo|r9W ziOS;X3bLwZVY!AMcqGX_dw?DX9-%m++~u0a9{^>UBx5{ufW};!!O_Gbp4_E$tHX26 zeg2Gs8Xft#mUyb>`tXPKIP3|IOq#`Us_73JE^UyvYJ@!F+GuF0dQP1c$zSv@b42K; z59+6;c-=G;cX^3)Y(VVP5RGK^o+0Pi=5!EDis?0yUQKzpHLcL>X(b97c|UUSj2ld7 zFOQU{HhhTb@tY+?pQAZpyLILj!taPaIbRGdfS{os)-4{U;VB$hc@|9v&3_$j|CSyM zlRyL0&1!ZR)RHnLc}O?C_{#kP0`)_jCX%G3*8;lIaA!=(nrjP6YbMu8K6~Y9+pss* zt=SmqR@SjMHhdZn;gaNO%v;BG=dz7_?4xLW_P$*TN{^%hjWFfBdB(TZ!G-oy|25UU zRk;mmhx?vv9?8brwiurcpa%$22~{mfn?1Ej@^`a^VZA-67Cv^(LRjV7incqw=SCx! zc?<6sxp`COFytjt+^CvZ${VMd{&)VN5F1``$wh5-J4xh60e*S0C{l#vVJCHPa;I<;S_~4-}|$gwu=zE&`Dwtid3Yo zZrA3)pH{=>t5BN;$p)ZGXK5BSdq~u1<6(>UOiWqsjty&2Q7;#sN1jHabFYue zSr59b+3Z8m+;Vzv`aTulFNpy|`~429An3cfzxiIjdkw%drie2K7-wD+1IjL&`lept z!p`?|EQzBj#{s;G5w9j4RPj$~r(ZEYJc^@ZcQ)#NR* zS_}>o{}$QgdRy_QY2&L`a!+QlYscZXkxhZf11vK|pTpBC&4bp=M ztfv>Ac+@5K+*ct zg}}u(%8gl$MYml_i$ApZOlw@FL=etTBt>E9?BfX(L4am^5eC+69|8voj^R0)Ks$b~ z7q#(gW@&U!@t~T;+(piJXtcx ze%a~`rIaKCcJ3zPlJM{oVfTsN_s!|(SS?Q^Uzs@`Hv!+OA8r*Az(Aj(nc;0H;21YJ z)Bm0`0ja|(Ltm@y0e{i~p49ZSWohEmZRH!uL1Kd3efCmwpNP#}lg#yIZrbl+)1mu1 zlPw^Ah)|mCkYNg2>JEmz?YHXfNlTp2Nn$b z+rLr4cKE;i6FBQ{Y;B+iocD(YgZ#y$@UPS^Tn4I(M^P2fcmII)<<07^)BtS&+wl^( zMD3f)u-7*+wA7_Hv$wYTkNy`+{9zLLkMw2jl2q~mN>jiu4hVe-pq~8xwIGl7S^!Tc zUzL2LdW`cDKw293<{Kd7MS!^dojk~81wbxW_->r{a%HLHU|@aoU|`ol2s;8@-|vJk z9pV2w*$i9Fd*eMshGPH$A!abZ)-J@I<)uw`LG~MYI(?v7Y;9rv?a6QefdS^H(eIbl zl8`?RU z>Kg*Am;z^4E?BsN>^WOL*G9krA4~aVDE@_zcDz#Ae#L@}tfH_2y@QLxRkfyl(Lr4s z-~|)lg%AY5=kBin?7y9F`1*AF-&gv}20`^bL%ec8GkF0VPBXYXH0_|L<&Ci+%JZ1u(M#DTC_EM}@y&+5peW z|BGr;1-q2=1FC-tTv1{MVef&`U!d&u?M!VP?7w&>BU58bU7LRbR@U`)oB(XX3E&gd zuq)Jl32bd)_)h?n0TrbLJU6Ie&#V6mKpVKlW@GrhS@|bxyNH<0N@2)fjQSi{ zs9PY?Lo)akhNYo{ovHp+72?-c{XH^&b>z8*zrxYA(=!Du?aC%iHNJu`4um2c(k}(> zWxG24BmMucQNhJ|(24?bTLR=p1A>0w`xT zo_~R}`NtsKGmF%74roLAPf$4Czk;%{vj+U0iJ_yto#Q|Ha9jge+YJD!0pN@QM2OBl zzk>R2mjF0i;ridQhy+a*h2j=RNV0Und)zfteArYtv}@EUvMs0L7;Ks8=^ zfmM|KSjEu>{|o?H75$d{SLw6=AbrqK$CUFk`HP(SzjZK!{$uU(MLzs(hunzJU|`88 zU|=L5D&_Y_`k;lyZyC8*aBPzzQCUE5nSkty90X{$@K22VRA5{IOD%q+upGeh1Q-L& zi&KjKh~=aYc$)8S^upm>0rKvC*ZMHvMw3Z?WUspPPmn*QSy#ZK_O~Vd61}4U`{4r- z&K%iR9IMUDS3P^lj9!7c#Lv>@O^{eJ}i+GqG*-E0Y5kO30;D=co%{9VXE zfL%2PFfbMn#NB~EAp)V=!uo-wp%tK5|35n-2;|#o0eJn=cLmKDmtX%0`4>6X75Ib= zCt|1rkoq~nAv{*lx{u9<0_WoB}{G(qe@P|zMVw}DJe|JDs$7}Xan7?vP z-|C!ybxl-T3Azx#CnNxDgBmLi(6an4+V6iaM0}B%(dk+k+BpCsrvuH=uAowk+ji|3 zKp=Plwf~jB=m26K007!KpUf1fRE(6J#hv!>SK9-?4KxZ{tOSY&v!MKn0=u6`5gdY z2>^iFZT6?%0sQ=m#nrH0^{U<#0AxuHDDd~B5BOhT=iwtN_)iLac{}P)^naaHVt!kz zuPJ>f+RyZjOo6W5*P8OT3FWV8Gl9^a`7Lb&U|Yk`jt&SLb`NZ`24%@m6a}MHv4`pybHdx zf4DS#HB8X!#g{Rgf!O+2%NyQ7aFMKz`#_1|8#)8K(#MdufhHgh;+!_ literal 0 HcmV?d00001 diff --git a/mongodb/.helmignore b/mongodb/.helmignore new file mode 100755 index 0000000..f0c1319 --- /dev/null +++ b/mongodb/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/mongodb/Chart.yaml b/mongodb/Chart.yaml new file mode 100755 index 0000000..7a7dee5 --- /dev/null +++ b/mongodb/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 4.4.1 +description: NoSQL document-oriented database that stores JSON-like documents with + dynamic schemas, simplifying the integration of data in content-driven applications. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/mongodb +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb +sources: +- https://github.com/bitnami/bitnami-docker-mongodb +- https://mongodb.org +version: 9.2.6 diff --git a/mongodb/README.md b/mongodb/README.md new file mode 100755 index 0000000..27035dc --- /dev/null +++ b/mongodb/README.md @@ -0,0 +1,565 @@ +# MongoDB + +[MongoDB](https://www.mongodb.com/) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/mongodb +``` + +The command deploys MongoDB on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Architecture + +This charts allows you install MongoDB using two different architecture setups: "standalone" or "replicaset". You can use the `architecture` parameter to choose the one to use: + +```console +architecture="standalone" +architecture="replicaset" +``` + +The standalone architecture installs a deployment (or statefulset) with one MongoDB server (it cannot be scaled): + +``` + ┌────────────────┐ + │ MongoDB │ + | svc │ + └───────┬────────┘ + │ + ▼ + ┌──────────┐ + │ MongoDB │ + │ Server │ + │ Pod │ + └──────────┘ +``` + +The chart supports the replicaset architecture with and without a [MongoDB Arbiter](https://docs.mongodb.com/manual/core/replica-set-arbiter/): + +* When the MongoDB Arbiter is enabled, the chart installs two statefulsets: A statefulset with N MongoDB servers (organised with one primary and N-1 secondary nodes), and a statefulset with one MongoDB arbiter node (it cannot be scaled). + + ``` + ┌────────────────┐ ┌────────────────┐ ┌────────────────┐ ┌─────────────┐ + │ MongoDB 0 │ │ MongoDB 1 │ │ MongoDB N │ │ Arbiter │ + | external svc │ | external svc │ | external svc │ | svc │ + └───────┬────────┘ └───────┬────────┘ └───────┬────────┘ └──────┬──────┘ + │ │ │ │ + ▼ ▼ ▼ ▼ + ┌───────────┐ ┌───────────┐ ┌───────────┐ ┌───────────┐ + │ MongoDB 0 │ │ MongoDB 1 │ │ MongoDB N │ │ MongoDB │ + │ Server │ │ Server │ .... │ Server │ │ Arbiter │ + │ Pod │ │ Pod │ │ Pod │ │ Pod │ + └───────────┘ └───────────┘ └───────────┘ └───────────┘ + primary secondary secondary + ``` + + The PSA model is useful when the third Availability Zone cannot hold a full MongoDB instance. The MongoDB Arbiter as decision maker is lightweight and can run alongside other workloads. + + _Note:_ An update takes your MongoDB replicaset offline if the Arbiter is enabled and the number of MongoDB replicas is two. Helm applies updates to the statefulsets for the MongoDB instance _and_ the Arbiter at the same time so you loose two out of three quorum votes. + +* Without the Arbiter, the chart deploys a single statefulset with N MongoDB servers (organised with one primary and N-1 secondary nodes) + + ``` + ┌────────────────┐ ┌────────────────┐ ┌────────────────┐ + │ MongoDB 0 │ │ MongoDB 1 │ │ MongoDB N │ + | external svc │ | external svc │ | external svc │ + └───────┬────────┘ └───────┬────────┘ └───────┬────────┘ + │ │ │ + ▼ ▼ ▼ + ┌───────────┐ ┌───────────┐ ┌───────────┐ + │ MongoDB 0 │ │ MongoDB 1 │ │ MongoDB N │ + │ Server │ │ Server │ .... │ Server │ + │ Pod │ │ Pod │ │ Pod │ + └───────────┘ └───────────┘ └───────────┘ + primary secondary secondary + ``` + +There are no services load balancing requests between MongoDB nodes, instead each node has an associated service to access them individually. + +> Note: although the 1st replica is initially assigned the "primary" role, any of the "secondary" nodes can become the "primary" if it is down, or during upgrades. Do not make any assumption about what replica has the "primary" role, instead configure your Mongo client with the list of MongoDB hostnames so it can dynamically choose the node to send requests. + +## Parameters + +The following tables lists the configurable parameters of the MongoDB chart and their default values per section/component: + +### Global parameters + +| Parameter | Description | Default | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.namespaceOverride` | Global string to override the release namespace | `nil` | + +### Common parameters + +| Parameter | Description | Default | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `nameOverride` | String to partially override mongodb.fullname | `nil` | +| `fullnameOverride` | String to fully override mongodb.fullname | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `schedulerName` | Name of the scheduler (other than default) to dispatch pods | `nil` | +| `image.registry` | MongoDB image registry | `docker.io` | +| `image.repository` | MongoDB image name | `bitnami/mongodb` | +| `image.tag` | MongoDB image tag | `{TAG_NAME}` | +| `image.pullPolicy` | MongoDB image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + +### MongoDB parameters + +| Parameter | Description | Default | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `architecture` | MongoDB architecture (`standalone` or `replicaset`) | `standalone` | +| `useStatefulSet` | Set to true to use a StatefulSet instead of a Deployment (only when `architecture=standalone`) | `false` | +| `auth.enabled` | Enable authentication | `true` | +| `auth.rootPassword` | MongoDB admin password | _random 10 character long alphanumeric string_ | +| `auth.username` | MongoDB custom user (mandatory if `auth.database` is set) | `nil` | +| `auth.password` | MongoDB custom user password | _random 10 character long alphanumeric string_ | +| `auth.database` | MongoDB custom database | `nil` | +| `auth.replicaSetKey` | Key used for authentication in the replicaset (only when `architecture=replicaset`) | _random 10 character long alphanumeric string_ | +| `auth.existingSecret` | Existing secret with MongoDB credentials (keys: `mongodb-password`, `mongodb-root-password`, ` mongodb-replica-set-key`) | `nil` | +| `replicaSetName` | Name of the replica set (only when `architecture=replicaset`) | `rs0` | +| `replicaSetHostnames` | Enable DNS hostnames in the replicaset config (only when `architecture=replicaset`) | `true` | +| `enableIPv6` | Switch to enable/disable IPv6 on MongoDB | `false` | +| `directoryPerDB` | Switch to enable/disable DirectoryPerDB on MongoDB | `false` | +| `systemLogVerbosity` | MongoDB system log verbosity level | `0` | +| `disableSystemLog` | Switch to enable/disable MongoDB system log | `false` | +| `configuration` | MongoDB configuration file to be used | `{}` | +| `existingConfigmap` | Name of existing ConfigMap with MongoDB configuration | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts | `nil` | +| `command` | Override default container command (useful when using custom images) | `nil` | +| `args` | Override default container args (useful when using custom images) | `nil` | +| `extraFlags` | MongoDB additional command line flags | `[]` | +| `extraEnvVars` | Extra environment variables to add to MongoDB pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `nil` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `nil` | + +### MongoDB statefulset parameters + +| Parameter | Description | Default | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `replicaCount` | Number of MongoDB nodes (only when `architecture=replicaset`) | `2` | +| `labels` | Annotations to be added to the MongoDB statefulset | `{}` (evaluated as a template) | +| `annotations` | Additional labels to be added to the MongoDB statefulset | `{}` (evaluated as a template) | +| `podManagementPolicy` | Pod management policy for MongoDB | `OrderedReady` | +| `strategyType` | StrategyType for MongoDB statefulset | `RollingUpdate` | +| `podLabels` | MongoDB pod labels | `{}` (evaluated as a template) | +| `podAnnotations` | MongoDB Pod annotations | `{}` (evaluated as a template) | +| `priorityClassName` | Name of the existing priority class to be used by MongoDB pod(s) | `""` | +| `affinity` | Affinity for MongoDB pod(s) assignment | `{}` (evaluated as a template) | +| `nodeSelector` | Node labels for MongoDB pod(s) assignment | `{}` (evaluated as a template) | +| `tolerations` | Tolerations for MongoDB pod(s) assignment | `[]` (evaluated as a template) | +| `podSecurityContext` | MongoDB pod(s)' Security Context | Check `values.yaml` file | +| `containerSecurityContext` | MongoDB containers' Security Context | Check `values.yaml` file | +| `resources.limits` | The resources limits for MongoDB containers | `{}` | +| `resources.requests` | The requested resources for MongoDB containers | `{}` | +| `livenessProbe` | Liveness probe configuration for MongoDB | Check `values.yaml` file | +| `readinessProbe` | Readiness probe configuration for MongoDB | Check `values.yaml` file | +| `customLivenessProbe` | Override default liveness probe for MongoDB containers | `nil` | +| `customReadinessProbe` | Override default readiness probe for MongoDB containers | `nil` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation for MongoDB pod(s) | `false` | +| `pdb.minAvailable` | Minimum number/percentage of MongoDB pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of MongoDB pods that may be made unavailable | `nil` | +| `initContainers` | Add additional init containers for the MongoDB pod(s) | `{}` (evaluated as a template) | +| `sidecars` | Add additional sidecar containers for the MongoDB pod(s) | `{}` (evaluated as a template) | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MongoDB container(s) | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes to the MongoDB statefulset | `{}` | + +### Exposure parameters + +| Parameter | Description | Default | +|---------------------------------------------------|----------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | MongoDB service port | `27017` | +| `service.portName` | MongoDB service port name | `mongodb` | +| `service.nodePort` | Port to bind to for NodePort and LoadBalancer service types | `""` | +| `service.clusterIP` | MongoDB service cluster IP | `nil` | +| `service.loadBalancerIP` | loadBalancerIP for MongoDB Service | `nil` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `service.annotations` | Service annotations | `{}` (evaluated as a template) | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to MongoDB nodes | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry (kubectl) | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image name (kubectl) | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (kubectl) | `{TAG_NAME}` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy (kubectl) | `Always` | +| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` | +| `externalAccess.service.type` | Kubernetes Servive type for external access. It can be NodePort or LoadBalancer | `LoadBalancer` | +| `externalAccess.service.port` | MongoDB port used for external access when service type is LoadBalancer | `27017` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for MongoDB nodes | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.domain` | Domain or external IP used to configure MongoDB advertised hostname when service type is NodePort | `nil` | +| `externalAccess.service.nodePorts` | Array of node ports used to configure MongoDB advertised hostname when service type is NodePort | `[]` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}`(evaluated as a template) | + +### Persistence parameters + +| Parameter | Description | Default | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `persistence.enabled` | Enable MongoDB data persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) | `nil` (evaluated as a template) | +| `persistence.storageClass` | PVC Storage Class for MongoDB data volume | `nil` | +| `persistence.accessMode` | PVC Access Mode for MongoDB data volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for MongoDB data volume | `8Gi` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/mongodb` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | + +### RBAC parameters + +| Parameter | Description | Default | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `serviceAccount.create` | Enable creation of ServiceAccount for MongoDB pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | Generated using the `mongodb.fullname` template | +| `rbac.create` | Weather to create & use RBAC resources or not | `false` | + +### Volume Permissions parameters + +| Parameter | Description | Default | +|-------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.securityContext` | Security context of the init container | Check `values.yaml` file | + +### Arbiter parameters + +| Parameter | Description | Default | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `arbiter.enabled` | Enable deploying the arbiter | `true` | +| `arbiter.configuration` | Arbiter configuration file to be used | `{}` | +| `arbiter.existingConfigmap` | Name of existing ConfigMap with Arbiter configuration | `nil` | +| `arbiter.command` | Override default container command (useful when using custom images) | `nil` | +| `arbiter.args` | Override default container args (useful when using custom images) | `nil` | +| `arbiter.extraFlags` | Arbiter additional command line flags | `[]` | +| `arbiter.extraEnvVars` | Extra environment variables to add to Arbiter pods | `[]` | +| `arbiter.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `nil` | +| `arbiter.extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `nil` | +| `arbiter.labels` | Annotations to be added to the Arbiter statefulset | `{}` (evaluated as a template) | +| `arbiter.annotations` | Additional labels to be added to the Arbiter statefulset | `{}` (evaluated as a template) | +| `arbiter.podLabels` | Arbiter pod labels | `{}` (evaluated as a template) | +| `arbiter.podAnnotations` | Arbiter Pod annotations | `{}` (evaluated as a template) | +| `arbiter.priorityClassName` | Name of the existing priority class to be used by Arbiter pod(s) | `""` | +| `arbiter.affinity` | Affinity for Arbiter pod(s) assignment | `{}` (evaluated as a template) | +| `arbiter.nodeSelector` | Node labels for Arbiter pod(s) assignment | `{}` (evaluated as a template) | +| `arbiter.tolerations` | Tolerations for Arbiter pod(s) assignment | `[]` (evaluated as a template) | +| `arbiter.podSecurityContext` | Arbiter pod(s)' Security Context | Check `values.yaml` file | +| `arbiter.containerSecurityContext` | Arbiter containers' Security Context | Check `values.yaml` file | +| `arbiter.resources.limits` | The resources limits for Arbiter containers | `{}` | +| `arbiter.resources.requests` | The requested resources for Arbiter containers | `{}` | +| `arbiter.livenessProbe` | Liveness probe configuration for Arbiter | Check `values.yaml` file | +| `arbiter.readinessProbe` | Readiness probe configuration for Arbiter | Check `values.yaml` file | +| `arbiter.customLivenessProbe` | Override default liveness probe for Arbiter containers | `nil` | +| `arbiter.customReadinessProbe` | Override default readiness probe for Arbiter containers | `nil` | +| `arbiter.pdb.create` | Enable/disable a Pod Disruption Budget creation for Arbiter pod(s) | `false` | +| `arbiter.pdb.minAvailable` | Minimum number/percentage of Arbiter pods that should remain scheduled | `1` | +| `arbiter.pdb.maxUnavailable` | Maximum number/percentage of Arbiter pods that may be made unavailable | `nil` | +| `arbiter.initContainers` | Add additional init containers for the Arbiter pod(s) | `{}` (evaluated as a template) | +| `arbiter.sidecars` | Add additional sidecar containers for the Arbiter pod(s) | `{}` (evaluated as a template) | +| `arbiter.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Arbiter container(s) | `{}` | +| `arbiter.extraVolumes` | Optionally specify extra list of additional volumes to the Arbiter statefulset | `{}` | + +### Metrics parameters + +| Parameter | Description | Default | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `metrics.enabled` | Enable using a sidecar Prometheus exporter | `false` | +| `metrics.image.registry` | MongoDB Prometheus exporter image registry | `docker.io` | +| `metrics.image.repository` | MongoDB Prometheus exporter image name | `bitnami/mongodb-exporter` | +| `metrics.image.tag` | MongoDB Prometheus exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | MongoDB Prometheus exporter image pull policy | `Always` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `metrics.extraFlags` | Additional command line flags | `""` | +| `metrics.extraUri` | Additional URI options of the metrics service | `""` | +| `metrics.service.type` | Type of the Prometheus metrics service | `ClusterIP file` | +| `metrics.service.port` | Port of the Prometheus metrics service | `9216` | +| `metrics.service.annotations` | Annotations for Prometheus metrics service | Check `values.yaml` file | +| `metrics.resources.limits` | The resources limits for Prometheus exporter containers | `{}` | +| `metrics.resources.requests` | The requested resources for Prometheus exporter containers | `{}` | +| `metrics.livenessProbe` | Liveness probe configuration for Prometheus exporter | Check `values.yaml` file | +| `metrics.readinessProbe` | Readiness probe configuration for Prometheus exporter | Check `values.yaml` file | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `monitoring` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `nil` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the Installed Prometheus Operator | `{}` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `monitoring` | +| `metrics.prometheusRule.rules` | Rules to be created, check values for an example. | `[]` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.rootPassword=secretpassword,auth.username=my-user,auth.password=my-password,auth.database=my-database \ + bitnami/mongodb +``` + +The above command sets the MongoDB `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/mongodb +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Switch to enable/disable replica set configuration: + +```diff +- architecture: standalone ++ architecture: replicaset +``` + +- Increase the number of MongoDB nodes: + +```diff +- replicaCount: 2 ++ replicaCount: 4 +``` + +- Enable Pod Disruption Budget: + +```diff +- pdb.create: false ++ pdb.create: true +``` + +- Enable using a sidecar Prometheus exporter: + +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicaCount` flag to modify the number of secondary nodes in your MongoDB replica set. + +### Initialize a fresh instance + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify them using the `initdbScripts` parameter as dict. + +You can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the previous option. + +The allowed extensions are `.sh`, and `.js`. + +### Replicaset: Accessing MongoDB nodes from outside the cluster + +In order to access MongoDB nodes from outside the cluster when using a replicaset architecture, a specific service per MongoDB pod will be created. There are two ways of configuring external access: + +- Using LoadBalancer services +- Using NodePort services. + +#### Using LoadBalancer services + +You have two alternatives to use LoadBalancer services: + +- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically. + +```console +architecture=replicaset +replicaCount=2 +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.port=27017 +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +> Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the load balancer IPs: + +```console +architecture=replicaset +replicaCount=2 +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.port=27017 +externalAccess.service.loadBalancerIPs[0]='external-ip-1' +externalAccess.service.loadBalancerIPs[1]='external-ip-2'} +``` + +> Note: You need to know in advance the load balancer IPs so each MongoDB node advertised hostname is configured with it. + +#### Using NodePort services + +Manually specify the node ports to use: + +```console +architecture=replicaset +replicaCount=2 +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.serivce.nodePorts[0]='node-port-1' +externalAccess.serivce.nodePorts[1]='node-port-2' +``` + +> Note: You need to know in advance the node ports that will be exposed so each MongoDB node advertised hostname is configured with it. + +The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` is provided. + +### Adding extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as MongoDB (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Persistence + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. + +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Upgrading + +If authentication is enabled, it's necessary to set the `auth.rootPassword` (also `auth.replicaSetKey` when using a replicaset architecture) when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/mongodb --set auth.rootPassword=[PASSWORD] (--set auth.replicaSetKey=[REPLICASETKEY]) +``` + +> Note: you need to substitute the placeholders [PASSWORD] and [REPLICASETKEY] with the values obtained in the installation notes. + +### To 9.0.0 + +MongoDB container images were updated to `4.4.x` and it can affect compatibility with older versions of MongoDB. Refer to the following guides to upgrade your applications: + +- [Standalone](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-standalone/) +- [Replica Set](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-replica-set/) + +### To 8.0.0 + +- Architecture used to configure MongoDB as a replicaset was completely refactored. Now, both primary and secondary nodes are part of the same statefulset. +- Chart labels were adapted to follow the Helm charts best practices. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. +- Several parameters were renamed or dissapeared in favor of new ones on this major version. These are the most important ones: + - `replicas` is renamed to `replicaCount`. + - Authentication parameters are reorganized under the `auth.*` parameter: + - `usePassword` is renamed to `auth.enabled`. + - `mongodbRootPassword`, `mongodbUsername`, `mongodbPassword`, `mongodbDatabase`, and `replicaSet.key` are now `auth.rootPassword`, `auth.username`, `auth.password`, `auth.database`, and `auth.replicaSetKey` respectively. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Parameters prefixed with `mongodb` are renamed removing the prefix. E.g. `mongodbEnableIPv6` is renamed to `enableIPv6`. + - Parameters affecting Arbiter nodes are reorganized under the `arbiter.*` parameter. + +Consequences: + +- Backwards compatibility is not guaranteed. To upgrade to `8.0.0`, install a new release of the MongoDB chart, and migrate your data by creating a backup of the database, and restoring it on the new release. + +### To 7.0.0 + +From this version, the way of setting the ingress rules has changed. Instead of using `ingress.paths` and `ingress.hosts` as separate objects, you should now define the rules as objects inside the `ingress.hosts` value, for example: + +```yaml +ingress: + hosts: + - name: mongodb.local + path: / +``` + +### To 6.0.0 + +From this version, `mongodbEnableIPv6` is set to `false` by default in order to work properly in most k8s clusters, if you want to use IPv6 support, you need to set this variable to `true` by adding `--set mongodbEnableIPv6=true` to your `helm` command. +You can find more information in the [`bitnami/mongodb` image README](https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md). + +### To 5.0.0 + +When enabling replicaset configuration, backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 5.0.0. The following example assumes that the release name is `my-release`: + +```console +$ kubectl delete statefulset my-release-mongodb-arbiter my-release-mongodb-primary my-release-mongodb-secondary --cascade=false +``` diff --git a/mongodb/charts/common/.helmignore b/mongodb/charts/common/.helmignore new file mode 100755 index 0000000..50af031 --- /dev/null +++ b/mongodb/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/mongodb/charts/common/Chart.yaml b/mongodb/charts/common/Chart.yaml new file mode 100755 index 0000000..5566cdc --- /dev/null +++ b/mongodb/charts/common/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 0.8.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +version: 0.8.1 diff --git a/mongodb/charts/common/README.md b/mongodb/charts/common/README.md new file mode 100755 index 0000000..9bcdfd6 --- /dev/null +++ b/mongodb/charts/common/README.md @@ -0,0 +1,286 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|-----------------------------------------------------------------|----------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Labels + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|-----------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|--------------------------------|-----------------------------------------------------------------|-----------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Notable changes + +N/A diff --git a/mongodb/charts/common/templates/_affinities.tpl b/mongodb/charts/common/templates/_affinities.tpl new file mode 100755 index 0000000..40f575c --- /dev/null +++ b/mongodb/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/mongodb/charts/common/templates/_capabilities.tpl b/mongodb/charts/common/templates/_capabilities.tpl new file mode 100755 index 0000000..143bef2 --- /dev/null +++ b/mongodb/charts/common/templates/_capabilities.tpl @@ -0,0 +1,33 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/mongodb/charts/common/templates/_errors.tpl b/mongodb/charts/common/templates/_errors.tpl new file mode 100755 index 0000000..d6d3ec6 --- /dev/null +++ b/mongodb/charts/common/templates/_errors.tpl @@ -0,0 +1,20 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: you must provide your current passwords when upgrade the release%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/mongodb/charts/common/templates/_images.tpl b/mongodb/charts/common/templates/_images.tpl new file mode 100755 index 0000000..aafde9f --- /dev/null +++ b/mongodb/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/mongodb/charts/common/templates/_labels.tpl b/mongodb/charts/common/templates/_labels.tpl new file mode 100755 index 0000000..252066c --- /dev/null +++ b/mongodb/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/mongodb/charts/common/templates/_names.tpl b/mongodb/charts/common/templates/_names.tpl new file mode 100755 index 0000000..adf2a74 --- /dev/null +++ b/mongodb/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/mongodb/charts/common/templates/_secrets.tpl b/mongodb/charts/common/templates/_secrets.tpl new file mode 100755 index 0000000..8eee91d --- /dev/null +++ b/mongodb/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/mongodb/charts/common/templates/_storage.tpl b/mongodb/charts/common/templates/_storage.tpl new file mode 100755 index 0000000..60e2a84 --- /dev/null +++ b/mongodb/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/mongodb/charts/common/templates/_tplvalues.tpl b/mongodb/charts/common/templates/_tplvalues.tpl new file mode 100755 index 0000000..2db1668 --- /dev/null +++ b/mongodb/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/mongodb/charts/common/templates/_utils.tpl b/mongodb/charts/common/templates/_utils.tpl new file mode 100755 index 0000000..74774a3 --- /dev/null +++ b/mongodb/charts/common/templates/_utils.tpl @@ -0,0 +1,45 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} diff --git a/mongodb/charts/common/templates/_validations.tpl b/mongodb/charts/common/templates/_validations.tpl new file mode 100755 index 0000000..05d1edb --- /dev/null +++ b/mongodb/charts/common/templates/_validations.tpl @@ -0,0 +1,278 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s=$%s' to the command.%s" .valueKey .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} + +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/mongodb/charts/common/templates/_warnings.tpl b/mongodb/charts/common/templates/_warnings.tpl new file mode 100755 index 0000000..ae10fa4 --- /dev/null +++ b/mongodb/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/mongodb/charts/common/values.yaml b/mongodb/charts/common/values.yaml new file mode 100755 index 0000000..9ecdc93 --- /dev/null +++ b/mongodb/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/mongodb/mongodb-values.yaml b/mongodb/mongodb-values.yaml new file mode 100644 index 0000000..4adbee4 --- /dev/null +++ b/mongodb/mongodb-values.yaml @@ -0,0 +1,460 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + #tag: 4.2.4-debian-10-r3 + tag: 4.2.5-debian-10-r44 + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns on Bitnami debugging in minideb-extras-base + ## ref: https://github.com/bitnami/minideb-extras-base + debug: false + +## String to partially override mongodb.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override mongodb.fullname template +## +# fullnameOverride: + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +#mongodbRootPassword: password + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +#mongodbUsername: qsefe +#mongodbPassword: qsefe +#mongodbDatabase: qsefe + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: false + +## Whether enable/disable DirectoryPerDB on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb +## +mongodbDirectoryPerDB: false + +## MongoDB System Log configuration +## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level +## +mongodbSystemLogVerbosity: 0 +mongodbDisableSystemLog: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + ## Specify an explicit service name. + name: svc-mongo + annotations: {} + type: ClusterIP + # clusterIP: None + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Specify the externalIP value ClusterIP service type. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + # externalIPs: [] + + ## Specify the loadBalancerIP value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + # loadBalancerIP: + + ## Specify the loadBalancerSourceRanges value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: [] + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: true + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + enabled: true + minAvailable: + secondary: 1 + arbiter: 1 + # maxUnavailable: + # secondary: 1 + # arbiter: 1 + +# Annotations to be added to the deployment or statefulsets +annotations: {} + +# Additional labels to apply to the deployment or statefulsets +labels: {} + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +# Additional pod labels to apply +podLabels: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# Define separate resources per arbiter, which are less then primary or secondary +# used only when replica set is enabled +resourcesArbiter: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Pod priority +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} +# Define separate affinity for arbiter pod +affinityArbiter: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## updateStrategy for MongoDB Primary, Secondary and Arbitrer statefulsets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## MongoDB images. + ## + mountPath: /bitnami/mongodb + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure the ingress resource that allows you to access the +## MongoDB installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: mongodb.example.com + path: / + + ## The tls configuration for the ingress + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + #tls: + #- hosts: + # - mongodb.local + # secretName: mongodb.local-tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: airflow.local-tls + # key: + # certificate: + +## Configure the options for init containers to be run before the main app containers +## are started. All init containers are run sequentially and must exit without errors +## for the next one to be started. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +# extraInitContainers: | +# - name: do-something +# image: busybox +# command: ['do', 'something'] + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Define custom config map with init scripts +initConfigMap: {} +# name: "init-config-map" + +## Entries for the MongoDB config file. For documentation of all options, see: +## http://docs.mongodb.org/manual/reference/configuration-options/ +## +configmap: +# # where and how to store data. +# storage: +# dbPath: /bitnami/mongodb/data/db +# journal: +# enabled: true +# directoryPerDB: false +# # where to write logging data. +# systemLog: +# destination: file +# quiet: false +# logAppend: true +# logRotate: reopen +# path: /opt/bitnami/mongodb/logs/mongodb.log +# verbosity: 0 +# # network interfaces +# net: +# port: 27017 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# ipv6: false +# bindIpAll: true +# # replica set options +# #replication: +# #replSetName: replicaset +# #enableMajorityReadConcern: true +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: disabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.10.0-debian-9-r24 + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## String with extra arguments to the metrics exporter + ## ref: https://github.com/dcu/mongodb_exporter/blob/master/mongodb_exporter.go + extraArgs: "" + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Metrics exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + livenessProbe: + enabled: false + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## Metrics exporter pod Annotation + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9216" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + + ## Specify a namespace if needed + # namespace: monitoring + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + ## Specify Metric Relabellings to add to the scrape endpoint + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + # relabellings: + + alerting: + ## Define individual alerting rules as required + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup + ## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + rules: {} + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} diff --git a/mongodb/mongodb/.helmignore b/mongodb/mongodb/.helmignore new file mode 100755 index 0000000..f0c1319 --- /dev/null +++ b/mongodb/mongodb/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/mongodb/mongodb/Chart.yaml b/mongodb/mongodb/Chart.yaml new file mode 100755 index 0000000..88ca67b --- /dev/null +++ b/mongodb/mongodb/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +appVersion: 4.2.5 +description: NoSQL document-oriented database that stores JSON-like documents with + dynamic schemas, simplifying the integration of data in content-driven applications. +engine: gotpl +home: https://mongodb.org +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb +sources: +- https://github.com/bitnami/bitnami-docker-mongodb +version: 7.10.11 diff --git a/mongodb/mongodb/README.md b/mongodb/mongodb/README.md new file mode 100755 index 0000000..6a1ee9e --- /dev/null +++ b/mongodb/mongodb/README.md @@ -0,0 +1,318 @@ +# MongoDB + +[MongoDB](https://www.mongodb.com/) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +## TL;DR; + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure +- ReadWriteMany volumes for deployment scaling + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/mongodb +``` + +The command deploys MongoDB on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the MongoDB chart and their default values. + +| Parameter | Description | Default | +|----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | MongoDB image registry | `docker.io` | +| `image.repository` | MongoDB Image name | `bitnami/mongodb` | +| `image.tag` | MongoDB Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `nameOverride` | String to partially override mongodb.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override mongodb.fullname template with a string | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | If serviceAccount.create is enabled, what should the serviceAccount name be - otherwise defaults to the fullname | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources` | Init container resource requests/limit | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `usePassword` | Enable password authentication | `true` | +| `existingSecret` | Existing secret with MongoDB credentials | `nil` | +| `mongodbRootPassword` | MongoDB admin password | `random alphanumeric string (10)` | +| `mongodbUsername` | MongoDB custom user (mandatory if `mongodbDatabase` is set) | `nil` | +| `mongodbPassword` | MongoDB custom user password | `random alphanumeric string (10)` | +| `mongodbDatabase` | Database to create | `nil` | +| `mongodbEnableIPv6` | Switch to enable/disable IPv6 on MongoDB | `false` | +| `mongodbDirectoryPerDB` | Switch to enable/disable DirectoryPerDB on MongoDB | `false` | +| `mongodbSystemLogVerbosity` | MongoDB system log verbosity level | `0` | +| `mongodbDisableSystemLog` | Whether to disable MongoDB system log or not | `false` | +| `mongodbExtraFlags` | MongoDB additional command line flags | `[]` | +| `service.name` | Kubernetes service name | `nil` | +| `service.annotations` | Kubernetes service annotations, evaluated as a template | `{}` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `service.port` | MongoDB service port | `27017` | +| `service.nodePort` | Port to bind to for NodePort service type | `nil` | +| `service.loadBalancerIP` | Static IP Address to use for LoadBalancer service type | `nil` | +| `service.externalIPs` | External IP list to use with ClusterIP service type | `[]` | +| `service.loadBalancerSourceRanges` | List of IP ranges allowed access to load balancer (if supported) | `[]` (does not add IP range restrictions to the service) | +| `replicaSet.enabled` | Switch to enable/disable replica set configuration | `false` | +| `replicaSet.name` | Name of the replica set | `rs0` | +| `replicaSet.useHostnames` | Enable DNS hostnames in the replica set config | `true` | +| `replicaSet.key` | Key used for authentication in the replica set | `random alphanumeric string (10)` | +| `replicaSet.replicas.secondary` | Number of secondary nodes in the replica set | `1` | +| `replicaSet.replicas.arbiter` | Number of arbiter nodes in the replica set | `1` | +| `replicaSet.pdb.enabled` | Switch to enable/disable Pod Disruption Budget | `true` | +| `replicaSet.pdb.minAvailable.secondary` | PDB (min available) for the MongoDB Secondary nodes | `1` | +| `replicaSet.pdb.minAvailable.arbiter` | PDB (min available) for the MongoDB Arbiter nodes | `1` | +| `replicaSet.pdb.maxUnavailable.secondary` | PDB (max unavailable) for the MongoDB Secondary nodes | `nil` | +| `replicaSet.pdb.maxUnavailable.arbiter` | PDB (max unavailable) for the MongoDB Arbiter nodes | `nil` | +| `annotations` | Annotations to be added to the deployment or statefulsets | `{}` | +| `labels` | Additional labels for the deployment or statefulsets | `{}` | +| `podAnnotations` | Annotations to be added to pods | `{}` | +| `podLabels` | Additional labels for the pod(s). | `{}` | +| `resources` | Pod resources | `{}` | +| `resourcesArbiter` | Pod resources for arbiter when replica set is enabled | `{}` | +| `priorityClassName` | Pod priority class name | `` | +| `extraEnvVars` | Array containing extra env vars to be added to all pods in the cluster (evaluated as a template) | `nil` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `affinity` | Affinity for pod assignment | `{}` | +| `affinityArbiter` | Affinity for arbiter pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `{}` | +| `updateStrategy` | Statefulsets update strategy policy | `RollingUpdate` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `sidecars` | Add additional containers to pod | `[]` | +| `extraVolumes` | Add additional volumes to deployment | `[]` | +| `extraVolumeMounts` | Add additional volumes mounts to pod | `[]` | +| `sidecarsArbiter` | Add additional containers to arbiter pod | `[]` | +| `extraVolumesArbiter` | Add additional volumes to arbiter deployment | `[]` | +| `extraVolumeMountsArbiter` | Add additional volumes mounts to arbiter pod | `[]` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/mongodb` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessModes` | Use volume as ReadOnly or ReadWrite | `[ReadWriteOnce]` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.annotations` | Persistent Volume annotations | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use in the primary node (avoids creating one if this is given) | `nil` | +| `useStatefulSet` | Set to true to use StatefulSet instead of Deployment even when replicaSet.enabled=false | `nil` | +| `extraInitContainers` | Additional init containers as a string to be passed to the `tpl` function | `{}` | +| `livenessProbe.enabled` | Enable/disable the Liveness probe | `true` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.enabled` | Enable/disable the Readiness probe | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `initConfigMap.name` | Custom config map with init scripts | `nil` | +| `configmap` | MongoDB configuration file to be used | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.certManager` | Add annotations for cert-manager | `false` | +| `ingress.annotations` | Ingress annotations | `[]` | +| `ingress.hosts[0].name` | Hostname to your MongoDB installation | `mongodb.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.tls[0].hosts[0]` | TLS hosts | `mongodb.local` | +| `ingress.tls[0].secretName` | TLS Secret (certificates) | `mongodb.local-tls` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | MongoDB exporter image registry | `docker.io` | +| `metrics.image.repository` | MongoDB exporter image name | `bitnami/mongodb-exporter` | +| `metrics.image.tag` | MongoDB exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `Always` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `metrics.podAnnotations.prometheus.io/scrape` | Additional annotations for Metrics exporter pod | `true` | +| `metrics.podAnnotations.prometheus.io/port` | Additional annotations for Metrics exporter pod | `"9216"` | +| `metrics.extraArgs` | String with extra arguments for the MongoDB Exporter | `` | +| `metrics.resources` | Exporter resource requests/limit | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the Installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `nil` | +| `metrics.serviceMonitor.alerting.rules` | Define individual alerting rules as required | `{}` | +| `metrics.serviceMonitor.alerting.additionalLabels` | Used to pass Labels that are required by the Installed Prometheus Operator | `{}` | +| `metrics.livenessProbe.enabled` | Enable/disable the Liveness Check of Prometheus metrics exporter | `false` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial Delay for Liveness Check of Prometheus metrics exporter | `15` | +| `metrics.livenessProbe.periodSeconds` | How often to perform Liveness Check of Prometheus metrics exporter | `5` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout for Liveness Check of Prometheus metrics exporter | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure Threshold for Liveness Check of Prometheus metrics exporter | `3` | +| `metrics.livenessProbe.successThreshold` | Success Threshold for Liveness Check of Prometheus metrics exporter | `1` | +| `metrics.readinessProbe.enabled` | Enable/disable the Readiness Check of Prometheus metrics exporter | `false` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial Delay for Readiness Check of Prometheus metrics exporter | `5` | +| `metrics.readinessProbe.periodSeconds` | How often to perform Readiness Check of Prometheus metrics exporter | `5` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout for Readiness Check of Prometheus metrics exporter | `1` | +| `metrics.readinessProbe.failureThreshold` | Failure Threshold for Readiness Check of Prometheus metrics exporter | `3` | +| `metrics.readinessProbe.successThreshold` | Success Threshold for Readiness Check of Prometheus metrics exporter | `1` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set mongodbRootPassword=secretpassword,mongodbUsername=my-user,mongodbPassword=my-password,mongodbDatabase=my-database \ + bitnami/mongodb +``` + +The above command sets the MongoDB `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/mongodb +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Switch to enable/disable replica set configuration: +```diff +- replicaSet.enabled: false ++ replicaSet.enabled: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +- Enable/disable the Liveness Check of Prometheus metrics exporter: +```diff +- metrics.livenessProbe.enabled: false ++ metrics.livenessProbe.enabled: true +``` + +- Enable/disable the Readiness Check of Prometheus metrics exporter: +```diff +- metrics.readinessProbe.enabled: false ++ metrics.readinessProbe.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of secondary nodes in your MongoDB replica set. + +### Replication + +You can start the MongoDB chart in replica set mode with the following parameter: `replicaSet.enabled=true` + +Some characteristics of this chart are: + +- Each of the participants in the replication has a fixed stateful set so you always know where to find the primary, secondary or arbiter nodes. +- The number of secondary and arbiter nodes can be scaled out independently. +- Easy to move an application from using a standalone MongoDB server to use a replica set. + +### Initialize a fresh instance + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. +Also you can create a custom config map and give it via `initConfigMap`(check options for more details). + +The allowed extensions are `.sh`, and `.js`. + +## Persistence + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Upgrading + +### To 7.0.0 +From this version, the way of setting the ingress rules has changed. Instead of using `ingress.paths` and `ingress.hosts` as separate objects, you should now define the rules as objects inside the `ingress.hosts` value, for example: + +```yaml +ingress: + hosts: + - name: mongodb.local + path: / +``` + +### To 6.0.0 + +From this version, `mongodbEnableIPv6` is set to `false` by default in order to work properly in most k8s clusters, if you want to use IPv6 support, you need to set this variable to `true` by adding `--set mongodbEnableIPv6=true` to your `helm` command. +You can find more information in the [`bitnami/mongodb` image README](https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md). + +### To 5.0.0 + +When enabling replicaset configuration, backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 5.0.0. The following example assumes that the release name is `my-release`: + +```console +$ kubectl delete statefulset my-release-mongodb-arbiter my-release-mongodb-primary my-release-mongodb-secondary --cascade=false +``` + +## Configure Ingress +MongoDB can exposed externally using an Ingress controller. To do so, it's necessary to: + +- Install the MongoDB chart setting the parameter `ingress.enabled=true`. +- Create a ConfigMap to map the external port to use and the internal service/port where to redirect the requests (see https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/exposing-tcp-udp-services.md for more information). + +For instance, if you installed the MongoDB chart in the `default` namespace, you can install the [stable/nginx-ingress chart](https://github.com/helm/charts/tree/master/stable/nginx-ingress) setting the "tcp" parameter in the **values.yaml** used to install the chart as shown below: + +```yaml +... + +tcp: + 27017: "default/mongodb:27017" +``` diff --git a/mongodb/mongodb/files/docker-entrypoint-initdb.d/README.md b/mongodb/mongodb/files/docker-entrypoint-initdb.d/README.md new file mode 100755 index 0000000..a929990 --- /dev/null +++ b/mongodb/mongodb/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom .sh, or .js file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-mongodb](https://github.com/bitnami/bitnami-docker-mongodb#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/mongodb/mongodb/templates/NOTES.txt b/mongodb/mongodb/templates/NOTES.txt new file mode 100755 index 0000000..294828b --- /dev/null +++ b/mongodb/mongodb/templates/NOTES.txt @@ -0,0 +1,75 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.mongodbRootPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer" and not specifying "mongodbRootPassword" + you have most likely exposed the MongoDB service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "mongodbRootPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ template "mongodb.serviceName" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{ if .Values.usePassword -}} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- end }} +{{- if and .Values.mongodbUsername .Values.mongodbDatabase }} +{{- if .Values.mongodbPassword }} + +To get the password for "{{ .Values.mongodbUsername }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} +{{- end }} + +To connect to your database run the following command: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --restart='Never' --image {{ template "mongodb.image" . }} --command -- mongo admin --host {{ template "mongodb.serviceName" . }} {{- if .Values.usePassword }} --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.serviceName" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.usePassword }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "mongodb.serviceName" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mongodb.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + mongo --host $SERVICE_IP --port {{ .Values.service.port }} {{- if .Values.usePassword }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "mongodb.serviceName" . }} {{ .Values.service.port }}:{{ .Values.service.port }} & + mongo --host 127.0.0.1 {{- if .Values.usePassword }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} + +{{- include "mongodb.validateValues" . -}} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/mongodb/mongodb/templates/_helpers.tpl b/mongodb/mongodb/templates/_helpers.tpl new file mode 100755 index 0000000..af6fac0 --- /dev/null +++ b/mongodb/mongodb/templates/_helpers.tpl @@ -0,0 +1,265 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "mongodb.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "mongodb.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "mongodb.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name for the admin secret. +*/}} +{{- define "mongodb.adminSecret" -}} + {{- if .Values.auth.existingAdminSecret -}} + {{- .Values.auth.existingAdminSecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-admin + {{- end -}} +{{- end -}} + +{{/* +Create the name for the key secret. +*/}} +{{- define "mongodb.keySecret" -}} + {{- if .Values.auth.existingKeySecret -}} + {{- .Values.auth.existingKeySecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-keyfile + {{- end -}} +{{- end -}} + +{{/* +Return the proper MongoDB image name +*/}} +{{- define "mongodb.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "mongodb.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mongodb.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mongodb.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mongodb.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "mongodb.validateValues.mongodbCustomDatabase" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB - both mongodbUsername and mongodbDatabase are necessary +to create a custom user and database during 1st initialization +*/}} +{{- define "mongodb.validateValues.mongodbCustomDatabase" -}} +{{- if or (and .Values.mongodbUsername (not .Values.mongodbDatabase)) (and (not .Values.mongodbUsername) .Values.mongodbDatabase) }} +mongodb: mongodbUsername, mongodbDatabase + Both mongodbUsername and mongodbDatabase must be provided to create + a custom user and database during 1st initialization. + Please set both of them (--set mongodbUsername="xxxx",mongodbDatabase="yyyy") +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "mongodb.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the proper Service name depending if an explicit service name is set +in the values file. If the name is not explicitly set it will take the "mongodb.fullname" +*/}} +{{- define "mongodb.serviceName" -}} + {{- if .Values.service.name -}} + {{ .Values.service.name }} + {{- else -}} + {{ template "mongodb.fullname" .}} + {{- end -}} +{{- end -}} + +{{/* +Returns the proper service account name depending if an explicit service account name is set +in the values file. If the name is not set it will default to either mongodb.fullname if serviceAccount.create +is true or default otherwise. +*/}} +{{- define "mongodb.serviceAccountName" -}} + {{- if .Values.serviceAccount.create -}} + {{ default (include "mongodb.fullname" .) .Values.serviceAccount.name }} + {{- else -}} + {{ default "default" .Values.serviceAccount.name }} + {{- end -}} +{{- end -}} diff --git a/mongodb/mongodb/templates/configmap.yaml b/mongodb/mongodb/templates/configmap.yaml new file mode 100755 index 0000000..66dc853 --- /dev/null +++ b/mongodb/mongodb/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +data: + mongodb.conf: |- +{{ toYaml .Values.configmap | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/mongodb/mongodb/templates/deployment-standalone.yaml b/mongodb/mongodb/templates/deployment-standalone.yaml new file mode 100755 index 0000000..047c5a4 --- /dev/null +++ b/mongodb/mongodb/templates/deployment-standalone.yaml @@ -0,0 +1,305 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: {{ if .Values.useStatefulSet }}{{ "StatefulSet" }}{{- else }}{{ "Deployment" }}{{- end }} +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + {{- with .Values.labels }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} + {{- end }} +spec: + {{- if .Values.useStatefulSet }} + serviceName: {{ template "mongodb.serviceName" . }} + updateStrategy: + {{- else }} + strategy: + {{- end }} + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + chart: {{ template "mongodb.chart" . }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + {{- end }} + spec: + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- include "mongodb.imagePullSecrets" . | indent 6 }} + initContainers: + {{- if .Values.extraInitContainers }} +{{ tpl .Values.extraInitContainers . | indent 8}} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.persistence.mountPath }}"] + securityContext: + runAsUser: 0 + resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + containers: + - name: {{ template "mongodb.fullname" . }} + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if .Values.image.debug}} + - name: BITNAMI_DEBUG + value: "true" + {{- end }} + {{- if .Values.usePassword }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + {{- end }} + {{- if .Values.mongodbUsername }} + - name: MONGODB_USERNAME + value: {{ .Values.mongodbUsername | quote }} + {{- end }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if .Values.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if .Values.mongodbDatabase }} + - name: MONGODB_DATABASE + value: {{ .Values.mongodbDatabase | quote }} + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if .Values.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if .Values.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "mongodb.tplValue" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + ports: + - name: mongodb + containerPort: 27017 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js|json]") (.Values.initConfigMap) }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if .Values.usePassword }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + command: [ 'sh', '-c', '/bin/mongodb_exporter --mongodb.uri mongodb://root:${MONGODB_ROOT_PASSWORD}@localhost:{{ .Values.service.port }}/admin {{ .Values.metrics.extraArgs }}' ] + {{- else }} + command: [ 'sh', '-c', '/bin/mongodb_exporter --mongodb.uri mongodb://localhost:{{ .Values.service.port }} {{ .Values.metrics.extraArgs }}' ] + {{- end }} + ports: + - name: metrics + containerPort: 9216 + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + resources: +{{ toYaml .Values.metrics.resources | indent 12 }} +{{- end }} +{{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} +{{- end }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js|json]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + {{- if (.Values.initConfigMap) }} + - name: custom-init-scripts + configMap: + name: {{ .Values.initConfigMap.name }} + {{- end }} + - name: data + {{- if not .Values.useStatefulSet }} + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + + {{- else }} + emptyDir: {} + {{- end -}} + {{- end -}} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 8}} + {{- end }} +{{- if .Values.useStatefulSet }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "mongodb.storageClass" . }} +{{- else }} + - name: data + emptyDir: {} +{{- end }} +{{- end }} +{{- end -}} diff --git a/mongodb/mongodb/templates/ingress.yaml b/mongodb/mongodb/templates/ingress.yaml new file mode 100755 index 0000000..669c33b --- /dev/null +++ b/mongodb/mongodb/templates/ingress.yaml @@ -0,0 +1,33 @@ +{{- if .Values.ingress.enabled }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "mongodb.serviceName" $ }} + servicePort: mongodb + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end }} +{{- end }} diff --git a/mongodb/mongodb/templates/initialization-configmap.yaml b/mongodb/mongodb/templates/initialization-configmap.yaml new file mode 100755 index 0000000..02da7df --- /dev/null +++ b/mongodb/mongodb/templates/initialization-configmap.yaml @@ -0,0 +1,13 @@ +{{ if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js|json]") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mongodb.fullname" . }}-init-scripts + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{ tpl (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js|json]").AsConfig . | indent 2 }} +{{ end }} diff --git a/mongodb/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml b/mongodb/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml new file mode 100755 index 0000000..b97fb72 --- /dev/null +++ b/mongodb/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.replicaSet.enabled .Values.replicaSet.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + {{- if .Values.replicaSet.pdb.minAvailable }} + {{- if .Values.replicaSet.pdb.minAvailable.arbiter }} + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.arbiter }} + {{- end }} + {{- end }} + {{- if .Values.replicaSet.pdb.maxUnavailable }} + {{- if .Values.replicaSet.pdb.maxUnavailable.arbiter }} + maxUnavailable: {{ .Values.replicaSet.pdb.maxUnavailable.arbiter }} + {{- end }} + {{- end }} + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: arbiter +{{- end }} \ No newline at end of file diff --git a/mongodb/mongodb/templates/poddisruptionbudget-secondary-rs.yaml b/mongodb/mongodb/templates/poddisruptionbudget-secondary-rs.yaml new file mode 100755 index 0000000..1fc2cdf --- /dev/null +++ b/mongodb/mongodb/templates/poddisruptionbudget-secondary-rs.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.replicaSet.enabled .Values.replicaSet.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + {{- if .Values.replicaSet.pdb.minAvailable }} + {{- if .Values.replicaSet.pdb.minAvailable.secondary }} + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.secondary }} + {{- end }} + {{- end }} + {{- if .Values.replicaSet.pdb.maxUnavailable }} + {{- if .Values.replicaSet.pdb.maxUnavailable.secondary }} + maxUnavailable: {{ .Values.replicaSet.pdb.maxUnavailable.secondary }} + {{- end }} + {{- end }} + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: secondary +{{- end }} diff --git a/mongodb/mongodb/templates/prometheus-alerting-rule.yaml b/mongodb/mongodb/templates/prometheus-alerting-rule.yaml new file mode 100755 index 0000000..e6d4d4c --- /dev/null +++ b/mongodb/mongodb/templates/prometheus-alerting-rule.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled .Values.metrics.serviceMonitor.alerting.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- if .Values.metrics.serviceMonitor.alerting.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.alerting.additionalLabels | indent 4 }} + {{- end }} +spec: + groups: +{{ toYaml .Values.metrics.serviceMonitor.alerting.rules | indent 4 }} +{{- end }} diff --git a/mongodb/mongodb/templates/prometheus-service-monitor.yaml b/mongodb/mongodb/templates/prometheus-service-monitor.yaml new file mode 100755 index 0000000..8900b34 --- /dev/null +++ b/mongodb/mongodb/templates/prometheus-service-monitor.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "mongodb.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - interval: 30s + port: metrics + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: +{{ toYaml .Values.metrics.serviceMonitor.relabellings | indent 4 }} + {{- end }} + jobLabel: {{ template "mongodb.fullname" . }} + namespaceSelector: + matchNames: + - "{{ $.Release.Namespace }}" + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- end }} diff --git a/mongodb/mongodb/templates/pvc-standalone.yaml b/mongodb/mongodb/templates/pvc-standalone.yaml new file mode 100755 index 0000000..f4e114d --- /dev/null +++ b/mongodb/mongodb/templates/pvc-standalone.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.replicaSet.enabled) (not .Values.useStatefulSet) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "mongodb.storageClass" . }} +{{- end }} diff --git a/mongodb/mongodb/templates/secrets.yaml b/mongodb/mongodb/templates/secrets.yaml new file mode 100755 index 0000000..bf644cb --- /dev/null +++ b/mongodb/mongodb/templates/secrets.yaml @@ -0,0 +1,32 @@ +{{ if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{- if .Values.mongodbRootPassword }} + mongodb-root-password: {{ .Values.mongodbRootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + {{- if .Values.mongodbPassword }} + mongodb-password: {{ .Values.mongodbPassword | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if .Values.replicaSet.enabled }} + {{- if .Values.replicaSet.key }} + mongodb-replica-set-key: {{ .Values.replicaSet.key | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/mongodb/mongodb/templates/serviceaccount.yml b/mongodb/mongodb/templates/serviceaccount.yml new file mode 100755 index 0000000..8310abc --- /dev/null +++ b/mongodb/mongodb/templates/serviceaccount.yml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "mongodb.serviceAccountName" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +secrets: + - name: {{ template "mongodb.fullname" . }} +{{- end }} diff --git a/mongodb/mongodb/templates/statefulset-arbiter-rs.yaml b/mongodb/mongodb/templates/statefulset-arbiter-rs.yaml new file mode 100755 index 0000000..525c8fb --- /dev/null +++ b/mongodb/mongodb/templates/statefulset-arbiter-rs.yaml @@ -0,0 +1,191 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "mongodb.fullname" . }}-arbiter + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.labels }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: arbiter + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.arbiter }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: arbiter + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.affinityArbiter }} + affinity: +{{ toYaml .Values.affinityArbiter | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- include "mongodb.imagePullSecrets" . | indent 6 }} + {{- if .Values.extraInitContainers }} + initContainers: +{{ tpl .Values.extraInitContainers . | indent 6}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-arbiter + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + {{- if .Values.image.debug}} + - name: BITNAMI_DEBUG + value: "true" + {{- end }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if .Values.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.serviceName" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if .Values.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if .Values.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "mongodb.tplValue" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 10 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + resources: +{{ toYaml .Values.resourcesArbiter | indent 12 }} +{{- if .Values.extraVolumeMountsArbiter }} + volumeMounts: +{{ toYaml .Values.extraVolumeMountsArbiter | indent 12}} +{{- end }} +{{- if .Values.sidecarsArbiter }} +{{ toYaml .Values.sidecarsArbiter | indent 8 }} +{{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- if .Values.extraVolumesArbiter }} +{{ toYaml .Values.extraVolumesArbiter | indent 8 }} + {{- end }} +{{- end }} diff --git a/mongodb/mongodb/templates/statefulset-primary-rs.yaml b/mongodb/mongodb/templates/statefulset-primary-rs.yaml new file mode 100755 index 0000000..8d69f43 --- /dev/null +++ b/mongodb/mongodb/templates/statefulset-primary-rs.yaml @@ -0,0 +1,317 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "mongodb.fullname" . }}-primary + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.labels }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} + {{- end }} +spec: + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: primary + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + {{- end }} + spec: + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- include "mongodb.imagePullSecrets" . | indent 6 }} + initContainers: + {{- if .Values.extraInitContainers }} +{{ tpl .Values.extraInitContainers . | indent 6}} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.persistence.mountPath }}"] + securityContext: + runAsUser: 0 + resources: {{ toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-primary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + {{- if .Values.image.debug}} + - name: BITNAMI_DEBUG + value: "true" + {{- end }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if .Values.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "primary" + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.mongodbUsername }} + - name: MONGODB_USERNAME + value: {{ .Values.mongodbUsername | quote }} + {{- end }} + {{- if .Values.mongodbDatabase }} + - name: MONGODB_DATABASE + value: {{ .Values.mongodbDatabase | quote }} + {{- end }} + {{- if .Values.usePassword }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if .Values.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if .Values.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "mongodb.tplValue" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 10 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - pgrep + - mongod + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js|json]") (.Values.initConfigMap) }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + {{- if .Values.extraVolumeMounts }} +{{ toYaml .Values.extraVolumeMounts | indent 12}} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if .Values.usePassword }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + command: [ 'sh', '-c', '/bin/mongodb_exporter --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:{{ .Values.service.port }}/admin {{ .Values.metrics.extraArgs }}' ] + {{- else }} + command: [ 'sh', '-c', '/bin/mongodb_exporter --mongodb.uri mongodb://localhost:{{ .Values.service.port }} {{ .Values.metrics.extraArgs }}' ] + {{- end }} + ports: + - name: metrics + containerPort: 9216 + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + resources: +{{ toYaml .Values.metrics.resources | indent 12 }} +{{- end }} +{{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 8 }} +{{- end }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js|json]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + {{- if (.Values.initConfigMap) }} + - name: custom-init-scripts + configMap: + name: {{ .Values.initConfigMap.name }} + {{- end }} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 8}} + {{- end }} +{{- if .Values.persistence.enabled }} + {{- if.Values.persistence.existingClaim }} + - name: datadir + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "mongodb.storageClass" . }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/mongodb/mongodb/templates/statefulset-secondary-rs.yaml b/mongodb/mongodb/templates/statefulset-secondary-rs.yaml new file mode 100755 index 0000000..2f82245 --- /dev/null +++ b/mongodb/mongodb/templates/statefulset-secondary-rs.yaml @@ -0,0 +1,285 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "mongodb.fullname" . }}-secondary + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.labels }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: secondary + podManagementPolicy: "Parallel" + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.secondary }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: secondary + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + {{- end }} + spec: + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- include "mongodb.imagePullSecrets" . | indent 6 }} + initContainers: + {{- if .Values.extraInitContainers }} +{{ tpl .Values.extraInitContainers . | indent 6}} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.persistence.mountPath }}"] + securityContext: + runAsUser: 0 + resources: {{ toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-secondary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + {{- if .Values.image.debug}} + - name: BITNAMI_DEBUG + value: "true" + {{- end }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.mongodbSystemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + {{- if .Values.mongodbDisableSystemLog }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "secondary" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + {{- if .Values.mongodbDirectoryPerDB }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + {{- if .Values.mongodbExtraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.mongodbExtraFlags | join " " | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "mongodb.tplValue" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 10 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - pgrep + - mongod + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + {{- if .Values.extraVolumeMounts }} +{{ toYaml .Values.extraVolumeMounts | indent 12}} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + {{- if .Values.usePassword }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + command: [ 'sh', '-c', '/bin/mongodb_exporter --mongodb.uri mongodb://root:`echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g"`@localhost:{{ .Values.service.port }}/admin {{ .Values.metrics.extraArgs }}' ] + {{- else }} + command: [ 'sh', '-c', '/bin/mongodb_exporter --mongodb.uri mongodb://localhost:{{ .Values.service.port }} {{ .Values.metrics.extraArgs }}' ] + {{- end }} + ports: + - name: metrics + containerPort: 9216 + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + resources: +{{ toYaml .Values.metrics.resources | indent 12 }} +{{- end }} +{{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 8 }} +{{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 8}} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "mongodb.storageClass" . }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/mongodb/mongodb/templates/svc-headless-rs.yaml b/mongodb/mongodb/templates/svc-headless-rs.yaml new file mode 100755 index 0000000..92f1141 --- /dev/null +++ b/mongodb/mongodb/templates/svc-headless-rs.yaml @@ -0,0 +1,23 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }}-headless + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- with .Values.service.annotations }} + annotations: {{ tpl (toYaml .) $ | nindent 4 }} +{{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: {{ .Values.service.port }} + selector: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/mongodb/mongodb/templates/svc-primary-rs.yaml b/mongodb/mongodb/templates/svc-primary-rs.yaml new file mode 100755 index 0000000..7815068 --- /dev/null +++ b/mongodb/mongodb/templates/svc-primary-rs.yaml @@ -0,0 +1,44 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.serviceName" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- with .Values.service.annotations }} + annotations: {{ tpl (toYaml .) $ | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{ toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: mongodb + port: {{ .Values.service.port }} + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + port: 9216 + targetPort: metrics +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + component: primary +{{- end }} diff --git a/mongodb/mongodb/templates/svc-standalone.yaml b/mongodb/mongodb/templates/svc-standalone.yaml new file mode 100755 index 0000000..d327b9d --- /dev/null +++ b/mongodb/mongodb/templates/svc-standalone.yaml @@ -0,0 +1,43 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.serviceName" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- with .Values.service.annotations }} + annotations: {{ tpl (toYaml .) $ | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{ toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: mongodb + port: {{ .Values.service.port }} + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + port: 9216 + targetPort: metrics +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" +{{- end }} diff --git a/mongodb/mongodb/values-production.yaml b/mongodb/mongodb/values-production.yaml new file mode 100755 index 0000000..2be5fd2 --- /dev/null +++ b/mongodb/mongodb/values-production.yaml @@ -0,0 +1,513 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.2.5-debian-10-r54 + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns on Bitnami debugging in minideb-extras-base + ## ref: https://github.com/bitnami/minideb-extras-base + debug: false + +## String to partially override mongodb.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override mongodb.fullname template +## +# fullnameOverride: + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +# Add custom extra environment variables to all the MongoDB containers +# extraEnvVars: + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: false + +## Whether enable/disable DirectoryPerDB on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb +## +mongodbDirectoryPerDB: false + +## MongoDB System Log configuration +## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level +## +mongodbSystemLogVerbosity: 0 +mongodbDisableSystemLog: false + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + ## Specify an explicit service name. + # name: svc-mongo + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + type: ClusterIP + # clusterIP: None + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Specify the externalIP value ClusterIP service type. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + # externalIPs: [] + + ## Specify the loadBalancerIP value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + # loadBalancerIP: + + ## Specify the loadBalancerSourceRanges value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: [] + +## Use StatefulSet instead of Deployment when deploying standalone +useStatefulSet: false + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: true + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + enabled: true + minAvailable: + secondary: 1 + arbiter: 1 + # maxUnavailable: + # secondary: 1 + # arbiter: 1 + +# Annotations to be added to the deployment or statefulsets +annotations: {} + +# Additional labels to apply to the deployment or statefulsets +labels: {} + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +# Additional pod labels to apply +podLabels: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# Define separate resources per arbiter, which are less then primary or secondary +# used only when replica set is enabled +resourcesArbiter: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Pod priority +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} +# Define separate affinity for arbiter pod +affinityArbiter: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Add sidecars to the pod +## +## For example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +sidecars: [] +## Array to add extra volumes +## +extraVolumes: [] +## Array to add extra mounts (normally used with extraVolumes) +## +extraVolumeMounts: [] + +## Add sidecars to the arbiter pod +# used only when replica set is enabled +## +## For example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +sidecarsArbiter: [] +## Array to add extra volumes to the arbiter +# used only when replica set is enabled +## +extraVolumesArbiter: [] +## Array to add extra mounts (normally used with extraVolumes) to the arbiter +# used only when replica set is enabled +## +extraVolumeMountsArbiter: [] + +## updateStrategy for MongoDB Primary, Secondary and Arbitrer statefulsets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## MongoDB images. + ## + mountPath: /bitnami/mongodb + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure the ingress resource that allows you to access the +## MongoDB installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: mongodb.local + path: / + + ## The tls configuration for the ingress + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + tls: + - hosts: + - mongodb.local + secretName: mongodb.local-tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: airflow.local-tls + # key: + # certificate: + +## Configure the options for init containers to be run before the main app containers +## are started. All init containers are run sequentially and must exit without errors +## for the next one to be started. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +# extraInitContainers: | +# - name: do-something +# image: busybox +# command: ['do', 'something'] + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Define custom config map with init scripts +initConfigMap: {} +# name: "init-config-map" + +## Entries for the MongoDB config file. For documentation of all options, see: +## http://docs.mongodb.org/manual/reference/configuration-options/ +## +configmap: +# # where and how to store data. +# storage: +# dbPath: /bitnami/mongodb/data/db +# journal: +# enabled: true +# directoryPerDB: false +# # where to write logging data. +# systemLog: +# destination: file +# quiet: false +# logAppend: true +# logRotate: reopen +# path: /opt/bitnami/mongodb/logs/mongodb.log +# verbosity: 0 +# # network interfaces +# net: +# port: 27017 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# ipv6: false +# bindIpAll: true +# # replica set options +# #replication: +# #replSetName: replicaset +# #enableMajorityReadConcern: true +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: disabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.10.0-debian-10-r79 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## String with extra arguments to the metrics exporter + ## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go + extraArgs: "" + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Metrics exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + livenessProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## Metrics exporter pod Annotation + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9216" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + + ## Specify a namespace if needed + # namespace: monitoring + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + ## Specify Metric Relabellings to add to the scrape endpoint + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + # relabellings: + + alerting: + ## Define individual alerting rules as required + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup + ## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + rules: {} + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} diff --git a/mongodb/mongodb/values.schema.json b/mongodb/mongodb/values.schema.json new file mode 100755 index 0000000..9bf39e5 --- /dev/null +++ b/mongodb/mongodb/values.schema.json @@ -0,0 +1,147 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Enable password authentication", + "form": true + }, + "mongodbRootPassword": { + "type": "string", + "title": "MongoDB admin password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "condition": false, + "value": "usePassword" + } + }, + "mongodbDatabase": { + "type": "string", + "title": "MongoDB custom database", + "description": "Name of the custom database to be created during the 1st initialization of MongoDB", + "form": true + }, + "mongodbUsername": { + "type": "string", + "title": "MongoDB custom user", + "description": "Name of the custom user to be created during the 1st initialization of MongoDB. This user only has permissions on the MongoDB custom database", + "form": true + }, + "mongodbPassword": { + "type": "string", + "title": "Password for MongoDB custom user", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "condition": false, + "value": "usePassword" + } + }, + "replicaSet": { + "type": "object", + "title": "Replicaset configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable replicaset configuration" + }, + "replicas": { + "type": "object", + "title": "Number of replicas", + "form": true, + "hidden": { + "condition": false, + "value": "replicaSet.enabled" + }, + "properties": { + "secondary": { + "type": "integer", + "title": "Secondary node replicas", + "description": "Number of secondary node replicas to deploy", + "form": true + }, + "arbiter": { + "type": "integer", + "title": "Arbiter node replicas", + "description": "Number of arbiter node replicas to deploy", + "form": true + } + } + } + } + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "persistence.enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "hidden": { + "condition": false, + "value": "persistence.enabled" + }, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/mongodb/mongodb/values.yaml b/mongodb/mongodb/values.yaml new file mode 100755 index 0000000..cb426da --- /dev/null +++ b/mongodb/mongodb/values.yaml @@ -0,0 +1,515 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.2.5-debian-10-r54 + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns on Bitnami debugging in minideb-extras-base + ## ref: https://github.com/bitnami/minideb-extras-base + debug: false + +## String to partially override mongodb.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override mongodb.fullname template +## +# fullnameOverride: + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: false + +## Whether enable/disable DirectoryPerDB on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb +## +mongodbDirectoryPerDB: false + +## MongoDB System Log configuration +## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level +## +mongodbSystemLogVerbosity: 0 +mongodbDisableSystemLog: false + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + ## Specify an explicit service name. + # name: svc-mongo + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + type: ClusterIP + # clusterIP: None + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Specify the externalIP value ClusterIP service type. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + # externalIPs: [] + + ## Specify the loadBalancerIP value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + # loadBalancerIP: + + ## Specify the loadBalancerSourceRanges value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: [] + +# Add custom extra environment variables to all the MongoDB containers +# extraEnvVars: + +## Use StatefulSet instead of Deployment when deploying standalone +useStatefulSet: false + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + enabled: true + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + # maxUnavailable: + # primary: 1 + # secondary: 1 + # arbiter: 1 + +# Annotations to be added to the deployment or statefulsets +annotations: {} + +# Additional labels to apply to the deployment or statefulsets +labels: {} + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +# Additional pod labels to apply +podLabels: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# Define separate resources per arbiter, which are less then primary or secondary +# used only when replica set is enabled +resourcesArbiter: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Pod priority +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} +# Define separate affinity for arbiter pod +affinityArbiter: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## updateStrategy for MongoDB Primary, Secondary and Arbitrer statefulsets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## Add sidecars to the pod +## +## For example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +sidecars: [] +## Array to add extra volumes +## +extraVolumes: [] +## Array to add extra mounts (normally used with extraVolumes) +## +extraVolumeMounts: [] + +## Add sidecars to the arbiter pod +# used only when replica set is enabled +## +## For example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +sidecarsArbiter: [] +## Array to add extra volumes to the arbiter +# used only when replica set is enabled +## +extraVolumesArbiter: [] +## Array to add extra mounts (normally used with extraVolumes) to the arbiter +# used only when replica set is enabled +## +extraVolumeMountsArbiter: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## MongoDB images. + ## + mountPath: /bitnami/mongodb + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure the ingress resource that allows you to access the +## MongoDB installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: mongodb.local + path: / + + ## The tls configuration for the ingress + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + tls: + - hosts: + - mongodb.local + secretName: mongodb.local-tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: airflow.local-tls + # key: + # certificate: + +## Configure the options for init containers to be run before the main app containers +## are started. All init containers are run sequentially and must exit without errors +## for the next one to be started. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +# extraInitContainers: | +# - name: do-something +# image: busybox +# command: ['do', 'something'] + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Define custom config map with init scripts +initConfigMap: {} +# name: "init-config-map" + +## Entries for the MongoDB config file. For documentation of all options, see: +## http://docs.mongodb.org/manual/reference/configuration-options/ +## +configmap: +# # where and how to store data. +# storage: +# dbPath: /bitnami/mongodb/data/db +# journal: +# enabled: true +# directoryPerDB: false +# # where to write logging data. +# systemLog: +# destination: file +# quiet: false +# logAppend: true +# logRotate: reopen +# path: /opt/bitnami/mongodb/logs/mongodb.log +# verbosity: 0 +# # network interfaces +# net: +# port: 27017 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# ipv6: false +# bindIpAll: true +# # replica set options +# #replication: +# #replSetName: replicaset +# #enableMajorityReadConcern: true +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: disabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.10.0-debian-10-r79 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## String with extra arguments to the metrics exporter + ## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go + extraArgs: "" + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Metrics exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + livenessProbe: + enabled: false + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## Metrics exporter pod Annotation + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9216" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + + ## Specify a namespace if needed + # namespace: monitoring + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + ## Specify Metric Relabellings to add to the scrape endpoint + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + # relabellings: + + alerting: + ## Define individual alerting rules as required + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup + ## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + rules: {} + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} diff --git a/mongodb/requirements.lock b/mongodb/requirements.lock new file mode 100755 index 0000000..035c256 --- /dev/null +++ b/mongodb/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.8.1 +digest: sha256:ad106a61ddcf8b78033635f756554bda2de59183ca30ef9b6642a392eb832c3a +generated: "2020-10-12T00:00:59.94799946Z" diff --git a/mongodb/requirements.yaml b/mongodb/requirements.yaml new file mode 100755 index 0000000..0c2495d --- /dev/null +++ b/mongodb/requirements.yaml @@ -0,0 +1,6 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common diff --git a/mongodb/templates/NOTES.txt b/mongodb/templates/NOTES.txt new file mode 100755 index 0000000..9c26110 --- /dev/null +++ b/mongodb/templates/NOTES.txt @@ -0,0 +1,193 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- $portNumber := int .Values.service.port }} +{{- $fullname := include "mongodb.fullname" . }} +{{- $releaseNamespace := include "mongodb.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} + +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }} + +############################################################################### +### ERROR: You enabled external access to MongoDB nodes without specifying ### +### the array of load balancer IPs for MongoDB nodes. ### +############################################################################### + +This deployment will be incomplete until you configure the array of load balancer +IPs for MongoDB nodes. To complete your deployment follow the steps below: + +1. Wait for the load balancer IPs (it may take a few minutes for them to be available): + + kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb" -w + +2. Obtain the load balancer IPs and upgrade your chart: + + {{- range $e, $i := until $replicaCount }} + LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" + {{- end }} + +3. Upgrade you chart: + + helm upgrade {{ .Release.Name }} bitnami/{{ .Chart.Name }} \ + --set mongodb.replicaCount={{ $replicaCount }} \ + --set mongodb.externalAccess.enabled=true \ + {{- range $i, $e := until $replicaCount }} + --set mongodb.externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \ + {{- end }} + --set mongodb.externalAccess.service.type=LoadBalancer + +{{- else }} + +{{- if and (or (and (eq .Values.architecture "standalone") (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort"))) (and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled)) (not .Values.auth.enabled) }} +------------------------------------------------------------------------------- + WARNING + + By not enabling "mongodb.auth.enabled" you have most likely exposed the + MongoDB service externally without any authentication mechanism. + + For security reasons, we strongly suggest that you enable authentiation + setting the "mongodb.auth.enabled" parameter to "true". + +------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB can be accessed via port {{ .Values.service.port }} on the following DNS name(s) from within your cluster: + +{{- if eq .Values.architecture "replicaset" }} + + {{ range $e, $i := until $replicaCount }} + {{- $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }} + {{ end }} + +{{- else }} + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{- if .Values.auth.enabled }} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- end }} +{{- if and .Values.auth.username .Values.auth.database .Values.auth.password }} + +To get the password for "{{ .Values.auth.username }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} + +To connect to your database, create a MongoDB client container: + + kubectl run --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --restart='Never' --image {{ template "mongodb.image" . }} --command -- bash + +Then, run the following command: + + {{- if eq .Values.architecture "replicaset" }} + mongo admin --host "{{- range $e, $i := until $replicaCount }}{{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }},{{ end }}" {{- if .Values.auth.enabled }} --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + {{- else }} + mongo admin --host "{{ template "mongodb.fullname" . }}" {{- if .Values.auth.enabled }} --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + {{- end }} + +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled }} + +To connect to your database nodes from outside, you need to add both primary and secondary nodes hostnames/IPs to your Mongo client. To obtain them, follow the instructions below: + +{{- if eq "NodePort" .Values.externalAccess.service.type }} +{{- if .Values.externalAccess.service.domain }} + + MongoDB nodes domain: Use your provided hostname to reach MongoDB nodes, {{ .Values.externalAccess.service.domain }} + +{{- else }} + + MongoDB nodes domain: you can reach MongoDB nodes on any of the K8s nodes external IPs. + + kubectl get nodes -o wide + +{{- end }} + + MongoDB nodes port: You will have a different node port for each MongoDB node. You can get the list of configured node ports using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')" + +{{- else if contains "LoadBalancer" .Values.externalAccess.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IPs to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -w' + + MongoDB nodes domain: You will have a different external IP for each MongoDB node. You can get the list of external IPs using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" + + MongoDB nodes port: {{ .Values.externalAccess.service.port }} + +{{- end }} + +{{- else if eq .Values.architecture "standalone" }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ template "mongodb.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ template "mongodb.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.fullname" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ template "mongodb.namespace" . }} -w {{ template "mongodb.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + mongo --host $SERVICE_IP --port {{ .Values.service.port }} {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ template "mongodb.namespace" . }} svc/{{ template "mongodb.fullname" . }} {{ .Values.service.port }}:{{ .Values.service.port }} & + mongo --host 127.0.0.1 {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the MongoDB Prometheus metrics, get the MongoDB Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "mongodb.fullname" . }}-metrics {{ .Values.metrics.service.port }}:{{ .Values.metrics.service.port }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.service.port }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }} +{{- include "mongodb.validateValues" . }} +{{- $secretName := include "mongodb.fullname" . -}} +{{- $requiredPasswords := list -}} + +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }} + +{{- $requiredRootPassword := dict "valueKey" "auth.rootPassword" "secret" $secretName "field" "mongodb-root-password" "context" $ -}} +{{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + +{{- if and .Values.auth.username .Values.auth.database }} + {{- $requiredDBPassword := dict "valueKey" "auth.password" "secret" $secretName "field" "mongodb-password" "context" $ -}} + {{- $requiredPasswords = append $requiredPasswords $requiredDBPassword -}} +{{- end -}} + + +{{- if eq .Values.architecture "replicaset" }} + {{- $requiredReplicaSetKey := dict "valueKey" "auth.replicaSetKey" "secret" $secretName "field" "mongodb-replica-set-key" "context" $ -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} +{{- end -}} + +{{- $requiredPasswordValidationErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" $) -}} +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $requiredPasswordValidationErrors) "context" $) -}} +{{- end }} diff --git a/mongodb/templates/_helpers.tpl b/mongodb/templates/_helpers.tpl new file mode 100755 index 0000000..f35abb1 --- /dev/null +++ b/mongodb/templates/_helpers.tpl @@ -0,0 +1,266 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- include "common.names.name" . -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- include "common.names.fullname" . -}} +{{- end -}} + +{{/* +Return the proper MongoDB image name +*/}} +{{- define "mongodb.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "mongodb.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mongodb.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container auto-discovery image) +*/}} +{{- define "mongodb.externalAccess.autoDiscovery.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.externalAccess.autoDiscovery.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mongodb.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "mongodb.namespace" -}} + {{- if .Values.global -}} + {{- if .Values.global.namespaceOverride }} + {{- .Values.global.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end }} +{{- end -}} +{{- define "mongodb.serviceMonitor.namespace" -}} + {{- if .Values.metrics.serviceMonitor.namespace -}} + {{- .Values.metrics.serviceMonitor.namespace -}} + {{- else -}} + {{- include "mongodb.namespace" . -}} + {{- end }} +{{- end -}} +{{- define "mongodb.prometheusRule.namespace" -}} + {{- if .Values.metrics.prometheusRule.namespace -}} + {{- .Values.metrics.prometheusRule.namespace -}} + {{- else -}} + {{- include "mongodb.namespace" . -}} + {{- end }} +{{- end -}} + +{{/* +Returns the proper service account name depending if an explicit service account name is set +in the values file. If the name is not set it will default to either mongodb.fullname if serviceAccount.create +is true or default otherwise. +*/}} +{{- define "mongodb.serviceAccountName" -}} + {{- if .Values.serviceAccount.create -}} + {{ default (include "mongodb.fullname" .) .Values.serviceAccount.name }} + {{- else -}} + {{ default "default" .Values.serviceAccount.name }} + {{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MongoDB configuration +*/}} +{{- define "mongodb.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MongoDB +*/}} +{{- define "mongodb.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with MongoDB credentials +*/}} +{{- define "mongodb.secretName" -}} + {{- if .Values.auth.existingSecret -}} + {{- printf "%s" .Values.auth.existingSecret -}} + {{- else -}} + {{- printf "%s" (include "mongodb.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for MongoDB +*/}} +{{- define "mongodb.createSecret" -}} +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "mongodb.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" .Values.initdbScriptsConfigMap -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if the Arbiter should be deployed +*/}} +{{- define "mongodb.arbiter.enabled" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.arbiter.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MongoDB configuration for the Arbiter +*/}} +{{- define "mongodb.arbiter.configmapName" -}} +{{- if .Values.arbiter.existingConfigmap -}} + {{- printf "%s" (tpl .Values.arbiter.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-arbiter" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MongoDB Arbiter +*/}} +{{- define "mongodb.arbiter.createConfigmap" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.arbiter.enabled .Values.arbiter.configuration (not .Values.arbiter.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mongodb.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "mongodb.validateValues.architecture" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.customDatabase" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.externalAccessServiceType" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.loadBalancerIPsListLength" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.nodePortListLength" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.externalAccessAutoDiscoveryRBAC" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of MongoDB - must provide a valid architecture */}} +{{- define "mongodb.validateValues.architecture" -}} +{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replicaset") -}} +mongodb: architecture + Invalid architecture selected. Valid values are "standalone" and + "replicaset". Please set a valid architecture (--set mongodb.architecture="xxxx") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB - both auth.username and auth.database are necessary +to create a custom user and database during 1st initialization +*/}} +{{- define "mongodb.validateValues.customDatabase" -}} +{{- if or (and .Values.auth.username (not .Values.auth.database)) (and (not .Values.auth.username) .Values.auth.database) }} +mongodb: auth.username, auth.database + Both auth.username and auth.database must be provided to create + a custom user and database during 1st initialization. + Please set both of them (--set auth.username="xxxx",auth.database="yyyy") +{{- end -}} +{{- end -}} + + +{{/* +Validate values of MongoDB - service type for external access +*/}} +{{- define "mongodb.validateValues.externalAccessServiceType" -}} +{{- if and (eq .Values.architecture "replicaset") (not (eq .Values.externalAccess.service.type "NodePort")) (not (eq .Values.externalAccess.service.type "LoadBalancer")) -}} +mongodb: externalAccess.service.type + Available servive type for external access are NodePort or LoadBalancer. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB - number of replicas must be the same than LoadBalancer IPs list +*/}} +{{- define "mongodb.validateValues.loadBalancerIPsListLength" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- $loadBalancerListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled ) (eq .Values.externalAccess.service.type "LoadBalancer") (not (eq $replicaCount $loadBalancerListLength )) -}} +mongodb: .Values.externalAccess.service.loadBalancerIPs + Number of replicas and loadBalancerIPs array length must be the same. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB - number of replicas must be the same than NodePort list +*/}} +{{- define "mongodb.validateValues.nodePortListLength" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- $nodePortListLength := len .Values.externalAccess.service.nodePorts }} +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "NodePort") (not (eq $replicaCount $nodePortListLength )) -}} +mongodb: .Values.externalAccess.service.nodePorts + Number of replicas and nodePorts array length must be the same. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB - RBAC should be enabled when autoDiscovery is enabled +*/}} +{{- define "mongodb.validateValues.externalAccessAutoDiscoveryRBAC" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (not .Values.rbac.create )}} +mongodb: rbac.create + By specifying "externalAccess.enabled=true" and "externalAccess.autoDiscovery.enabled=true" + an initContainer will be used to autodetect the external IPs/ports by querying the + K8s API. Please note this initContainer requires specific RBAC resources. You can create them + by specifying "--set rbac.create=true". +{{- end -}} +{{- end -}} diff --git a/mongodb/templates/arbiter/configmap.yaml b/mongodb/templates/arbiter/configmap.yaml new file mode 100755 index 0000000..1971200 --- /dev/null +++ b/mongodb/templates/arbiter/configmap.yaml @@ -0,0 +1,12 @@ +{{- if (include "mongodb.arbiter.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mongodb.fullname" . }}-arbiter + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: arbiter +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.configuration "context" $) | nindent 4 }} +{{- end }} diff --git a/mongodb/templates/arbiter/headless-svc.yaml b/mongodb/templates/arbiter/headless-svc.yaml new file mode 100755 index 0000000..85d8ac9 --- /dev/null +++ b/mongodb/templates/arbiter/headless-svc.yaml @@ -0,0 +1,21 @@ +{{- if (include "mongodb.arbiter.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb.fullname" . }}-arbiter-headless + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: arbiter + {{- if .Values.service.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-mongodb + port: {{ .Values.service.port }} + targetPort: mongodb + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: arbiter +{{- end }} diff --git a/mongodb/templates/arbiter/pdb.yaml b/mongodb/templates/arbiter/pdb.yaml new file mode 100755 index 0000000..6cc024c --- /dev/null +++ b/mongodb/templates/arbiter/pdb.yaml @@ -0,0 +1,19 @@ +{{- if and (include "mongodb.arbiter.enabled" .) .Values.arbiter.pdb.create }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "mongodb.fullname" . }}-arbiter + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: arbiter +spec: + {{- if .Values.arbiter.pdb.minAvailable }} + minAvailable: {{ .Values.arbiter.pdb.minAvailable }} + {{- end }} + {{- if .Values.arbiter.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.arbiter.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: arbiter +{{- end }} diff --git a/mongodb/templates/arbiter/statefulset.yaml b/mongodb/templates/arbiter/statefulset.yaml new file mode 100755 index 0000000..925e10c --- /dev/null +++ b/mongodb/templates/arbiter/statefulset.yaml @@ -0,0 +1,181 @@ +{{- if (include "mongodb.arbiter.enabled" .) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "mongodb.fullname" . }}-arbiter + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: arbiter + {{- if .Values.arbiter.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.arbiter.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ include "mongodb.fullname" . }}-arbiter-headless + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: arbiter + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: arbiter + {{- if .Values.arbiter.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or (include "mongodb.arbiter.createConfigmap" .) .Values.arbiter.podAnnotations }} + annotations: + {{- if (include "mongodb.arbiter.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/arbiter/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.arbiter.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.arbiter.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.affinity "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.priorityClassName }} + priorityClassName: {{ .Values.arbiter.priorityClassName }} + {{- end }} + {{- if .Values.arbiter.podSecurityContext.enabled }} + securityContext: {{- omit .Values.arbiter.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: mongodb-arbiter + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.arbiter.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.arbiter.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.arbiter.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.arbiter.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ include "mongodb.fullname" . }}-arbiter-headless" + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_INITIAL_PRIMARY_HOST + value: "{{ include "mongodb.fullname" . }}-0.{{ include "mongodb.fullname" . }}-headless.$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSetName | quote }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- if .Values.auth.enabled }} + - name: MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-replica-set-key + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.arbiter.extraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.arbiter.extraFlags | join " " | quote }} + {{- end }} + {{- if .Values.arbiter.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.arbiter.extraEnvVarsCM .Values.arbiter.extraEnvVarsSecret }} + envFrom: + {{- if .Values.arbiter.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.arbiter.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.arbiter.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.arbiter.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - containerPort: 27017 + name: mongodb + {{- if .Values.arbiter.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.arbiter.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.arbiter.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.arbiter.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.arbiter.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.arbiter.livenessProbe.failureThreshold }} + {{- else if .Values.arbiter.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.arbiter.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.arbiter.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.arbiter.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.arbiter.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.arbiter.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.arbiter.readinessProbe.failureThreshold }} + {{- else if .Values.arbiter.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.arbiter.resources }} + resources: {{- toYaml .Values.arbiter.resources | nindent 12 }} + {{- end }} + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap .Values.arbiter.extraVolumeMounts }} + volumeMounts: + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + {{- if .Values.arbiter.extraVolumeMounts }} + {{- toYaml .Values.arbiter.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.arbiter.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap .Values.arbiter.extraVolumes }} + volumes: + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.arbiter.configmapName" . }} + {{- end }} + {{- if .Values.arbiter.extraVolumes }} + {{- toYaml .Values.arbiter.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/mongodb/templates/configmap.yaml b/mongodb/templates/configmap.yaml new file mode 100755 index 0000000..158c101 --- /dev/null +++ b/mongodb/templates/configmap.yaml @@ -0,0 +1,12 @@ +{{- if (include "mongodb.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }} +{{- end }} diff --git a/mongodb/templates/initialization-configmap.yaml b/mongodb/templates/initialization-configmap.yaml new file mode 100755 index 0000000..245aceb --- /dev/null +++ b/mongodb/templates/initialization-configmap.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mongodb.fullname" . }}-init-scripts + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb +data: +{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} +{{- end }} diff --git a/mongodb/templates/metrics-svc.yaml b/mongodb/templates/metrics-svc.yaml new file mode 100755 index 0000000..bbe7ea4 --- /dev/null +++ b/mongodb/templates/metrics-svc.yaml @@ -0,0 +1,21 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb.fullname" . }}-metrics + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.service.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - port: {{ .Values.metrics.service.port }} + targetPort: metrics + protocol: TCP + name: http-metrics + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/mongodb/templates/prometheusrule.yaml b/mongodb/templates/prometheusrule.yaml new file mode 100755 index 0000000..aad3571 --- /dev/null +++ b/mongodb/templates/prometheusrule.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.prometheusRule.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} +spec: + groups: + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.rules "context" $) | nindent 4 }} +{{- end }} diff --git a/mongodb/templates/replicaset/external-access-svc.yaml b/mongodb/templates/replicaset/external-access-svc.yaml new file mode 100755 index 0000000..2e12103 --- /dev/null +++ b/mongodb/templates/replicaset/external-access-svc.yaml @@ -0,0 +1,45 @@ +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled }} +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $fullName }}-{{ $i }}-external + namespace: {{ include "mongodb.namespace" $ }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: mongodb + pod: {{ $targetPod }} + {{- if $root.Values.externalAccess.service.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} + {{- if not (empty $root.Values.externalAccess.service.loadBalancerIPs) }} + loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: {{ $root.Values.service.portName }} + port: {{ $root.Values.externalAccess.service.port }} + {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} + nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: mongodb + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: mongodb + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/mongodb/templates/replicaset/headless-svc.yaml b/mongodb/templates/replicaset/headless-svc.yaml new file mode 100755 index 0000000..39c4874 --- /dev/null +++ b/mongodb/templates/replicaset/headless-svc.yaml @@ -0,0 +1,22 @@ +{{- if eq .Values.architecture "replicaset" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb.fullname" . }}-headless + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.service.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + targetPort: mongodb + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/mongodb/templates/replicaset/pdb.yaml b/mongodb/templates/replicaset/pdb.yaml new file mode 100755 index 0000000..cab61ba --- /dev/null +++ b/mongodb/templates/replicaset/pdb.yaml @@ -0,0 +1,19 @@ +{{- if and (eq .Values.architecture "replicaset") .Values.pdb.create }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/mongodb/templates/replicaset/scripts-configmap.yaml b/mongodb/templates/replicaset/scripts-configmap.yaml new file mode 100755 index 0000000..f6b197c --- /dev/null +++ b/mongodb/templates/replicaset/scripts-configmap.yaml @@ -0,0 +1,88 @@ +{{- if eq .Values.architecture "replicaset" }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mongodb.fullname" . }}-scripts + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb +data: + {{- $fullname := include "mongodb.fullname" . }} + {{- $releaseNamespace := include "mongodb.namespace" . }} + {{- if and .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + auto-discovery.sh: |- + #!/bin/bash + + SVC_NAME="${MY_POD_NAME}-external" + + # Auxiliar functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- end }} + setup.sh: |- + #!/bin/bash + + {{- if .Values.externalAccess.enabled }} + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export MONGODB_ADVERTISED_HOSTNAME="$(<${SHARED_FILE})" + {{- else }} + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + export MONGODB_ADVERTISED_HOSTNAME=$(echo '{{ .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + {{- if .Values.externalAccess.service.domain }} + export MONGODB_ADVERTISED_HOSTNAME={{ .Values.externalAccess.service.domain }} + {{- else }} + export MONGODB_ADVERTISED_HOSTNAME=$(curl -s https://ipinfo.io/ip) + {{- end }} + {{- end }} + {{- end }} + + echo "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME" + + if [[ "$MY_POD_NAME" = "{{ $fullname }}-0" ]]; then + echo "Pod name matches initial primary pod name, configuring node as a primary" + export MONGODB_REPLICA_SET_MODE="primary" + else + echo "Pod name doesn't match initial primary pod name, configuring node as a secondary" + export MONGODB_REPLICA_SET_MODE="secondary" + export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD" + export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER" + export MONGODB_ROOT_PASSWORD="" MONGODB_USERNAME="" MONGODB_DATABASE="" MONGODB_PASSWORD="" + fi + + exec /opt/bitnami/scripts/mongodb/entrypoint.sh /opt/bitnami/scripts/mongodb/run.sh +{{- end }} diff --git a/mongodb/templates/replicaset/statefulset.yaml b/mongodb/templates/replicaset/statefulset.yaml new file mode 100755 index 0000000..8950332 --- /dev/null +++ b/mongodb/templates/replicaset/statefulset.yaml @@ -0,0 +1,375 @@ +{{- if eq .Values.architecture "replicaset" }} +{{- $replicaCount := int .Values.replicaCount }} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- if not (and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer")) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ include "mongodb.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.strategyType }} + {{- if (eq "OnDelete" .Values.strategyType) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongodb + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: mongodb + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or (include "mongodb.createConfigmap" .) .Values.podAnnotations }} + annotations: + {{- if (include "mongodb.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + args: + - | + mkdir -p {{ .Values.persistence.mountPath }} + {{- if and .Values.podSecurityContext.enabled .Values.containerSecurityContext.enabled }} + chown -R "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.persistence.mountPath }}" + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: auto-discovery + image: {{ include "mongodb.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- else }} + command: + - /scripts/setup.sh + {{- end }} + {{- if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ include "mongodb.fullname" . }}-headless" + - name: MONGODB_INITIAL_PRIMARY_HOST + value: "{{ include "mongodb.fullname" . }}-0.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSetName | quote }} + {{- if and .Values.replicaSetHostnames (not .Values.externalAccess.enabled) }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.auth.username }} + - name: MONGODB_USERNAME + value: {{ .Values.auth.username | quote }} + {{- end }} + {{- if .Values.auth.database }} + - name: MONGODB_DATABASE + value: {{ .Values.auth.database | quote }} + {{- end }} + {{- if .Values.auth.enabled }} + {{- if and .Values.auth.username .Values.auth.database }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-replica-set-key + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.systemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }} + - name: MONGODB_ENABLE_IPV6 + value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }} + {{- if .Values.extraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.extraFlags | join " " | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - containerPort: 27017 + name: mongodb + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - pgrep + - mongod + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: shared + mountPath: /shared + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -ec + args: + - | + {{- if .Values.auth.enabled }} + /bin/mongodb_exporter --mongodb.uri mongodb://root:$(echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g")@localhost:27017/admin{{ .Values.metrics.extraUri }} {{ .Values.metrics.extraFlags }} + {{- else }} + /bin/mongodb_exporter --mongodb.uri mongodb://localhost:27017/admin{{ .Values.metrics.extraUri }} {{ .Values.metrics.extraFlags }} + {{- end }} + env: + {{- if .Values.auth.enabled }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- end }} + ports: + - name: metrics + containerPort: 9216 + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.initdbScriptsCM" . }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.configmapName" . }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: shared + emptyDir: {} + {{- end }} + - name: scripts + configMap: + name: {{ include "mongodb.fullname" . }}-scripts + defaultMode: 0755 + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: datadir + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: datadir + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- end }} +{{- end }} +{{- end }} diff --git a/mongodb/templates/role.yaml b/mongodb/templates/role.yaml new file mode 100755 index 0000000..f8eda3d --- /dev/null +++ b/mongodb/templates/role.yaml @@ -0,0 +1,17 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- end }} diff --git a/mongodb/templates/rolebinding.yaml b/mongodb/templates/rolebinding.yaml new file mode 100755 index 0000000..4e24df5 --- /dev/null +++ b/mongodb/templates/rolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +roleRef: + kind: Role + name: {{ include "mongodb.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "mongodb.serviceAccountName" . }} + namespace: {{ include "mongodb.namespace" . }} +{{- end }} diff --git a/mongodb/templates/secrets.yaml b/mongodb/templates/secrets.yaml new file mode 100755 index 0000000..5b8c3b2 --- /dev/null +++ b/mongodb/templates/secrets.yaml @@ -0,0 +1,30 @@ +{{- if (include "mongodb.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ template "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb +type: Opaque +data: + {{- if .Values.auth.rootPassword }} + mongodb-root-password: {{ .Values.auth.rootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- if and .Values.auth.username .Values.auth.database }} + {{- if .Values.auth.password }} + mongodb-password: {{ .Values.auth.password | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if eq .Values.architecture "replicaset" }} + {{- if .Values.auth.replicaSetKey }} + mongodb-replica-set-key: {{ .Values.auth.replicaSetKey | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/mongodb/templates/serviceaccount.yaml b/mongodb/templates/serviceaccount.yaml new file mode 100755 index 0000000..bac0a86 --- /dev/null +++ b/mongodb/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mongodb.serviceAccountName" . }} + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +secrets: + - name: {{ template "mongodb.fullname" . }} +{{- end }} diff --git a/mongodb/templates/servicemonitor.yaml b/mongodb/templates/servicemonitor.yaml new file mode 100755 index 0000000..5dae1cc --- /dev/null +++ b/mongodb/templates/servicemonitor.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.serviceMonitor.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - "{{ include "mongodb.namespace" . }}" + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/mongodb/templates/standalone/dep-sts.yaml b/mongodb/templates/standalone/dep-sts.yaml new file mode 100755 index 0000000..157d734 --- /dev/null +++ b/mongodb/templates/standalone/dep-sts.yaml @@ -0,0 +1,313 @@ +{{- if not (eq .Values.architecture "replicaset") }} +apiVersion: {{ if .Values.useStatefulSet }}apps/v1{{- else }}{{ include "common.capabilities.deployment.apiVersion" . }}{{- end }} +kind: {{ if .Values.useStatefulSet }}StatefulSet{{- else }}Deployment{{- end }} +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.useStatefulSet }} + serviceName: {{ include "mongodb.fullname" . }} + updateStrategy: + {{- else }} + strategy: + {{- end }} + type: {{ .Values.strategyType }} + {{- if or (and (not .Values.useStatefulSet) (eq "Recreate" .Values.strategyType)) (and .Values.useStatefulSet (eq "OnDelete" .Values.strategyType)) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongodb + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: mongodb + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or (include "mongodb.createConfigmap" .) .Values.podAnnotations }} + annotations: + {{- if (include "mongodb.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + args: + - | + mkdir -p {{ .Values.persistence.mountPath }} + {{- if and .Values.podSecurityContext.enabled .Values.containerSecurityContext.enabled }} + chown -R "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.persistence.mountPath }}" + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + {{- if .Values.auth.username }} + - name: MONGODB_USERNAME + value: {{ .Values.auth.username | quote }} + {{- end }} + {{- if .Values.auth.database }} + - name: MONGODB_DATABASE + value: {{ .Values.auth.database | quote }} + {{- end }} + {{- if .Values.auth.enabled }} + {{- if and .Values.auth.username .Values.auth.database }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.systemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }} + - name: MONGODB_ENABLE_IPV6 + value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }} + {{- if .Values.extraFlags }} + - name: MONGODB_EXTRA_FLAGS + value: {{ .Values.extraFlags | join " " | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - name: mongodb + containerPort: 27017 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - pgrep + - mongod + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -ec + args: + - | + {{- if .Values.auth.enabled }} + /bin/mongodb_exporter --mongodb.uri mongodb://root:$(echo $MONGODB_ROOT_PASSWORD | sed -r "s/@/%40/g;s/:/%3A/g")@localhost:27017/admin{{ .Values.metrics.extraUri }} {{ .Values.metrics.extraFlags }} + {{- else }} + /bin/mongodb_exporter --mongodb.uri mongodb://localhost:27017/admin{{ .Values.metrics.extraUri }} {{ .Values.metrics.extraFlags }} + {{- end }} + env: + {{- if .Values.auth.enabled }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- end }} + ports: + - name: metrics + containerPort: 9216 + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.initdbScriptsCM" . }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.configmapName" . }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: datadir + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: datadir + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.useStatefulSet }} + - name: datadir + persistentVolumeClaim: + claimName: {{ template "mongodb.fullname" . }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: datadir + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- end }} +{{- end }} diff --git a/mongodb/templates/standalone/pvc.yaml b/mongodb/templates/standalone/pvc.yaml new file mode 100755 index 0000000..70e161c --- /dev/null +++ b/mongodb/templates/standalone/pvc.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not (eq .Values.architecture "replicaset")) (not .Values.useStatefulSet) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} +{{- end }} diff --git a/mongodb/templates/standalone/svc.yaml b/mongodb/templates/standalone/svc.yaml new file mode 100755 index 0000000..b421c3c --- /dev/null +++ b/mongodb/templates/standalone/svc.yaml @@ -0,0 +1,37 @@ +{{- if not (eq .Values.architecture "replicaset") }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.service.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{ toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + targetPort: mongodb + {{- if and (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/mongodb/values-production.yaml b/mongodb/values-production.yaml new file mode 100755 index 0000000..fed82ff --- /dev/null +++ b/mongodb/values-production.yaml @@ -0,0 +1,908 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass +## Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride +# namespaceOverride: my-global-namespace + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.4.1-debian-10-r39 + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns on Bitnami debugging in minideb-extras-base + ## ref: https://github.com/bitnami/minideb-extras-base + debug: false + +## String to partially override mongodb.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override mongodb.fullname template +## +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## MongoDB architecture. Allowed values: standalone or replicaset +## +architecture: replicaset + +## Use StatefulSet instead of Deployment when deploying standalone +## +useStatefulSet: false + +## MongoDB Authentication parameters +## +auth: + ## Enable authentication + ## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ + ## + enabled: true + ## MongoDB root password + ## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run + ## + rootPassword: "" + ## MongoDB custom user and database + ## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run + ## + # username: username + # password: password + # database: database + ## Key used for replica set authentication + ## Ignored when mongodb.architecture=standalone + ## + replicaSetKey: "" + + ## Existing secret with MongoDB credentials + ## NOTE: When it's set the previous parameters are ignored. + ## + # existingSecret: name-of-existing-secret + +## Name of the replica set +## Ignored when mongodb.architecture=standalone +## +replicaSetName: rs0 + +## Enable DNS hostnames in the replica set config +## Ignored when mongodb.architecture=standalone +## Ignored when externalAccess.enabled=true +## +replicaSetHostnames: true + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +enableIPv6: false + +## Whether enable/disable DirectoryPerDB on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb +## +directoryPerDB: false + +## MongoDB System Log configuration +## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level +## +systemLogVerbosity: 0 +disableSystemLog: false + +## MongoDB configuration file for Primary and Secondary nodes. For documentation of all options, see: +## http://docs.mongodb.org/manual/reference/configuration-options/ +## Example: +## configuration: +## # where and how to store data. +## storage: +## dbPath: /bitnami/mongodb/data/db +## journal: +## enabled: true +## directoryPerDB: false +## # where to write logging data +## systemLog: +## destination: file +## quiet: false +## logAppend: true +## logRotate: reopen +## path: /opt/bitnami/mongodb/logs/mongodb.log +## verbosity: 0 +## # network interfaces +## net: +## port: 27017 +## unixDomainSocket: +## enabled: true +## pathPrefix: /opt/bitnami/mongodb/tmp +## ipv6: false +## bindIpAll: true +## # replica set options +## #replication: +## #replSetName: replicaset +## #enableMajorityReadConcern: true +## # process management options +## processManagement: +## fork: false +## pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +## # set parameter options +## setParameter: +## enableLocalhostAuthBypass: true +## # security options +## security: +## authorization: disabled +## #keyFile: /opt/bitnami/mongodb/conf/keyfile +## +configuration: "" + +## ConfigMap with MongoDB configuration for Primary and Secondary nodes +## NOTE: When it's set the arbiter.configuration parameter is ignored +## +# existingConfigmap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +initdbScripts: {} + +## Existing ConfigMap with custom init scripts +## +# initdbScriptsConfigMap: + +## Command and args for running the container (set to default if not set). Use array form +## +# command: +# args: + +## Additional command line flags +## Example: +## extraFlags: +## - "--wiredTigerCacheSizeGB=2" +## +extraFlags: [] + +## Additional environment variables to set +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] + +## ConfigMap with extra environment variables +## +# extraEnvVarsCM: + +## Secret with extra environment variables +## +# extraEnvVarsSecret: + +## Annotations to be added to the MongoDB statefulset. Evaluated as a template. +## +annotations: {} + +## Additional labels to be added to the MongoDB statefulset. Evaluated as a template. +## +labels: {} + +## Number of MongoDB replicas to deploy. +## Ignored when mongodb.architecture=standalone +## +replicaCount: 4 + +## StrategyType for MongoDB statefulset +## It can be set to RollingUpdate or Recreate by default. +## +strategyType: RollingUpdate + +## MongoDB should be initialized one by one when building the replicaset for the first time. +## +podManagementPolicy: OrderedReady + +## Affinity for pod assignment. Evaluated as a template. +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Lables for MongoDB pods. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Annotations for MongoDB pods. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## MongoDB pods' priority. +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +# priorityClassName: "" + +## MongoDB pods' Security Context. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings + ## Example: + ## sysctls: + ## - name: net.core.somaxconn + ## value: "10000" + ## + sysctls: [] + +## MongoDB containers' Security Context (main and metrics container). +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + +## MongoDB containers' resource requests and limits. +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## MongoDB pods' liveness and readiness probes. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Custom Liveness probes for MongoDB pods +## +customLivenessProbe: {} + +## Custom Rediness probes MongoDB pods +## +customReadinessProbe: {} + +## Add init containers to the MongoDB pods. +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: {} + +## Add sidecars to the MongoDB pods. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +## extraVolumes and extraVolumeMounts allows you to mount other volumes on MongoDB pods +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## extraVolumes: +## - name: extras +## emptyDir: {} +extraVolumeMounts: [] +extraVolumes: [] + +## MongoDB Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + create: true + ## Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + # maxUnavailable: 1 + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## Ignored when mongodb.architecture=replicaset + ## + # existingClaim: + ## PV Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + ## PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## PVC size + ## + size: 8Gi + ## PVC annotations + ## + annotations: {} + ## The path the volume will be mounted at, useful when using different + ## MongoDB images. + ## + mountPath: /bitnami/mongodb + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + +## Service parameters +## +service: + ## Service type + ## + type: ClusterIP + ## MongoDB service port + ## + port: 27017 + ## MongoDB service port name + ## + portName: mongodb + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## MongoDB service clusterIP IP + ## + # clusterIP: None + ## Specify the externalIP value ClusterIP service type. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + ## Specify the loadBalancerIP value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + # loadBalancerIP: + ## Specify the loadBalancerSourceRanges value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## External Access to MongoDB nodes configuration +## +externalAccess: + ## Enable Kubernetes external cluster access to MongoDB nodes + ## + enabled: false + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## Enable external IP/ports auto-discovery + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.18.9-debian-10-r30 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + ## Parameters to configure K8s service(s) used to externally access MongoDB + ## A new service per broker will be created + ## + service: + ## Service type. Allowed values: LoadBalancer or NodePort + ## + type: LoadBalancer + ## Port used when service type is LoadBalancer + ## + port: 27017 + ## Array of load balancer IPs for each MongoDB node. Length must be the same as replicaCount + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Array of node ports used for each MongoDB node. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## When service type is NodePort, you can specify the domain used for MongoDB advertised hostnames. + ## If not specified, the container will try to get the kubernetes node external IP + ## + # domain: mydomain.com + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## +## MongoDB Arbiter parameters. +## +arbiter: + ## Enable deploying the MongoDB Arbiter + ## https://docs.mongodb.com/manual/tutorial/add-replica-set-arbiter/ + enabled: true + + ## MongoDB configuration file for the Arbiter. For documentation of all options, see: + ## http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configuration: "" + + ## ConfigMap with MongoDB configuration for the Arbiter + ## NOTE: When it's set the arbiter.configuration parameter is ignored + ## + # existingConfigmap: + + ## Command and args for running the container (set to default if not set). Use array form + ## + # command: + # args: + + ## Additional command line flags + ## Example: + ## extraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + extraFlags: [] + + ## Additional environment variables to set + ## E.g: + ## extraEnvVars: + ## - name: FOO + ## value: BAR + ## + extraEnvVars: [] + + ## ConfigMap with extra environment variables + ## + # extraEnvVarsCM: + + ## Secret with extra environment variables + ## + # extraEnvVarsSecret: + + ## Annotations to be added to the Arbiter statefulset. Evaluated as a template. + ## + annotations: {} + + ## Additional to be added to the Arbiter statefulset. Evaluated as a template. + ## + labels: {} + + ## Affinity for pod assignment. Evaluated as a template. + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Node labels for pod assignment. Evaluated as a template. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for pod assignment. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## Lables for MongoDB Arbiter pods. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + + ## Annotations for MongoDB Arbiter pods. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## MongoDB Arbiter pods' priority. + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + # priorityClassName: "" + + ## MongoDB Arbiter pods' Security Context. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings + ## Example: + ## sysctls: + ## - name: net.core.somaxconn + ## value: "10000" + ## + sysctls: [] + + ## MongoDB Arbiter containers' Security Context (only main container). + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + + ## MongoDB Arbiter containers' resource requests and limits. + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## MongoDB Arbiter pods' liveness and readiness probes. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + ## Custom Liveness probes for MongoDB Arbiter pods + ## + customLivenessProbe: {} + + ## Custom Rediness probes MongoDB Arbiter pods + ## + customReadinessProbe: {} + + ## Add init containers to the MongoDB Arbiter pods. + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: {} + + ## Add sidecars to the MongoDB Arbiter pods. + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: {} + + ## extraVolumes and extraVolumeMounts allows you to mount other volumes on MongoDB Arbiter pods + ## Examples: + ## extraVolumeMounts: + ## - name: extras + ## mountPath: /usr/share/extras + ## readOnly: true + ## extraVolumes: + ## - name: extras + ## emptyDir: {} + extraVolumeMounts: [] + extraVolumes: [] + + ## MongoDB Arbiter Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + create: false + ## Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + # maxUnavailable: 1 + +## ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + # name: + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## binding MongoDB ServiceAccount to a role + ## that allows MongoDB pods querying the K8s API + ## + create: false + +## Init Container paramaters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + enabled: false + ## Bitnami Minideb image + ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + ## Bitnami MongoDB Promtheus Exporter image + ## ref: https://hub.docker.com/r/bitnami/mongodb-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.11.2-debian-10-r18 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## String with extra flags to the metrics exporter + ## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go + ## + extraFlags: "" + + ## String with additional URI options to the metrics exporter + ## ref: https://docs.mongodb.com/manual/reference/connection-string + ## + extraUri: "" + + ## Metrics exporter container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Prometheus Exporter service configuration + ## + service: + ## Annotations for Prometheus Exporter pods. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + type: ClusterIP + port: 9216 + + ## Metrics exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md + ## + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + + ## Specify the namespace where Prometheus Operator is running + ## + # namespace: monitoring + + ## Specify the interval at which metrics should be scraped + ## + interval: 30s + ## Specify the timeout after which the scrape is ended + ## + # scrapeTimeout: 30s + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + ## Specify the namespace where Prometheus Operator is running + ## + # namespace: monitoring + ## Define individual alerting rules as required + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup + ## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + ## + rules: {} diff --git a/mongodb/values.schema.json b/mongodb/values.schema.json new file mode 100755 index 0000000..5e9d5da --- /dev/null +++ b/mongodb/values.schema.json @@ -0,0 +1,167 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "MongoDB architecture", + "form": true, + "description": "Allowed values: `standalone` or `replicaset`" + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Authentication", + "form": true + }, + "rootPassword": { + "type": "string", + "title": "MongoDB admin password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "auth/enabled" + } + }, + "database": { + "type": "string", + "title": "MongoDB custom database", + "description": "Name of the custom database to be created during the 1st initialization of MongoDB", + "form": true + }, + "username": { + "type": "string", + "title": "MongoDB custom user", + "description": "Name of the custom user to be created during the 1st initialization of MongoDB. This user only has permissions on the MongoDB custom database", + "form": true + }, + "password": { + "type": "string", + "title": "Password for MongoDB custom user", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "auth/enabled" + } + }, + "replicaSetKey": { + "type": "string", + "title": "Key used for replica set authentication", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of MongoDB replicas", + "hidden": { + "value": "standalone", + "path": "architecture" + } + }, + "configuration": { + "type": "string", + "title": "MongoDB Custom Configuration", + "form": true, + "render": "textArea" + }, + "arbiter": { + "type": "object", + "title": "Arbiter configuration", + "form": true, + "properties": { + "configuration": { + "type": "string", + "title": "Arbiter Custom Configuration", + "form": true, + "render": "textArea", + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "hidden": { + "value": false, + "path": "persistence/enabled" + }, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/mongodb/values.yaml b/mongodb/values.yaml new file mode 100755 index 0000000..3501642 --- /dev/null +++ b/mongodb/values.yaml @@ -0,0 +1,908 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass +## Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride +# namespaceOverride: my-global-namespace + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.4.1-debian-10-r39 + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns on Bitnami debugging in minideb-extras-base + ## ref: https://github.com/bitnami/minideb-extras-base + debug: false + +## String to partially override mongodb.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override mongodb.fullname template +## +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## MongoDB architecture. Allowed values: standalone or replicaset +## +architecture: standalone + +## Use StatefulSet instead of Deployment when deploying standalone +## +useStatefulSet: false + +## MongoDB Authentication parameters +## +auth: + ## Enable authentication + ## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ + ## + enabled: true + ## MongoDB root password + ## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run + ## + rootPassword: "" + ## MongoDB custom user and database + ## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run + ## + # username: username + # password: password + # database: database + ## Key used for replica set authentication + ## Ignored when mongodb.architecture=standalone + ## + replicaSetKey: "" + + ## Existing secret with MongoDB credentials + ## NOTE: When it's set the previous parameters are ignored. + ## + # existingSecret: name-of-existing-secret + +## Name of the replica set +## Ignored when mongodb.architecture=standalone +## +replicaSetName: rs0 + +## Enable DNS hostnames in the replica set config +## Ignored when mongodb.architecture=standalone +## Ignored when externalAccess.enabled=true +## +replicaSetHostnames: true + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +enableIPv6: false + +## Whether enable/disable DirectoryPerDB on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb +## +directoryPerDB: false + +## MongoDB System Log configuration +## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level +## +systemLogVerbosity: 0 +disableSystemLog: false + +## MongoDB configuration file for Primary and Secondary nodes. For documentation of all options, see: +## http://docs.mongodb.org/manual/reference/configuration-options/ +## Example: +## configuration: +## # where and how to store data. +## storage: +## dbPath: /bitnami/mongodb/data/db +## journal: +## enabled: true +## directoryPerDB: false +## # where to write logging data +## systemLog: +## destination: file +## quiet: false +## logAppend: true +## logRotate: reopen +## path: /opt/bitnami/mongodb/logs/mongodb.log +## verbosity: 0 +## # network interfaces +## net: +## port: 27017 +## unixDomainSocket: +## enabled: true +## pathPrefix: /opt/bitnami/mongodb/tmp +## ipv6: false +## bindIpAll: true +## # replica set options +## #replication: +## #replSetName: replicaset +## #enableMajorityReadConcern: true +## # process management options +## processManagement: +## fork: false +## pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +## # set parameter options +## setParameter: +## enableLocalhostAuthBypass: true +## # security options +## security: +## authorization: disabled +## #keyFile: /opt/bitnami/mongodb/conf/keyfile +## +configuration: "" + +## ConfigMap with MongoDB configuration for Primary and Secondary nodes +## NOTE: When it's set the arbiter.configuration parameter is ignored +## +# existingConfigmap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +initdbScripts: {} + +## Existing ConfigMap with custom init scripts +## +# initdbScriptsConfigMap: + +## Command and args for running the container (set to default if not set). Use array form +## +# command: +# args: + +## Additional command line flags +## Example: +## extraFlags: +## - "--wiredTigerCacheSizeGB=2" +## +extraFlags: [] + +## Additional environment variables to set +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] + +## ConfigMap with extra environment variables +## +# extraEnvVarsCM: + +## Secret with extra environment variables +## +# extraEnvVarsSecret: + +## Annotations to be added to the MongoDB statefulset. Evaluated as a template. +## +annotations: {} + +## Additional labels to be added to the MongoDB statefulset. Evaluated as a template. +## +labels: {} + +## Number of MongoDB replicas to deploy. +## Ignored when mongodb.architecture=standalone +## +replicaCount: 2 + +## StrategyType for MongoDB statefulset +## It can be set to RollingUpdate or Recreate by default. +## +strategyType: RollingUpdate + +## MongoDB should be initialized one by one when building the replicaset for the first time. +## +podManagementPolicy: OrderedReady + +## Affinity for pod assignment. Evaluated as a template. +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Lables for MongoDB pods. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Annotations for MongoDB pods. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## MongoDB pods' priority. +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +# priorityClassName: "" + +## MongoDB pods' Security Context. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings + ## Example: + ## sysctls: + ## - name: net.core.somaxconn + ## value: "10000" + ## + sysctls: [] + +## MongoDB containers' Security Context (main and metrics container). +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + +## MongoDB containers' resource requests and limits. +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## MongoDB pods' liveness and readiness probes. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Custom Liveness probes for MongoDB pods +## +customLivenessProbe: {} + +## Custom Rediness probes MongoDB pods +## +customReadinessProbe: {} + +## Add init containers to the MongoDB pods. +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: {} + +## Add sidecars to the MongoDB pods. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +## extraVolumes and extraVolumeMounts allows you to mount other volumes on MongoDB pods +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## extraVolumes: +## - name: extras +## emptyDir: {} +extraVolumeMounts: [] +extraVolumes: [] + +## MongoDB Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + create: false + ## Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + # maxUnavailable: 1 + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## Ignored when mongodb.architecture=replicaset + ## + # existingClaim: + ## PV Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + ## PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## PVC size + ## + size: 8Gi + ## PVC annotations + ## + annotations: {} + ## The path the volume will be mounted at, useful when using different + ## MongoDB images. + ## + mountPath: /bitnami/mongodb + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + +## Service parameters +## +service: + ## Service type + ## + type: ClusterIP + ## MongoDB service port + ## + port: 27017 + ## MongoDB service port name + ## + portName: mongodb + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## MongoDB service clusterIP IP + ## + # clusterIP: None + ## Specify the externalIP value ClusterIP service type. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + ## Specify the loadBalancerIP value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + # loadBalancerIP: + ## Specify the loadBalancerSourceRanges value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## External Access to MongoDB nodes configuration +## +externalAccess: + ## Enable Kubernetes external cluster access to MongoDB nodes + ## + enabled: false + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## Enable external IP/ports auto-discovery + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.18.9-debian-10-r30 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + ## Parameters to configure K8s service(s) used to externally access MongoDB + ## A new service per broker will be created + ## + service: + ## Service type. Allowed values: LoadBalancer or NodePort + ## + type: LoadBalancer + ## Port used when service type is LoadBalancer + ## + port: 27017 + ## Array of load balancer IPs for each MongoDB node. Length must be the same as replicaCount + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Array of node ports used for each MongoDB nodes. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## When service type is NodePort, you can specify the domain used for MongoDB advertised hostnames. + ## If not specified, the container will try to get the kubernetes node external IP + ## + # domain: mydomain.com + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## +## MongoDB Arbiter parameters. +## +arbiter: + ## Enable deploying the MongoDB Arbiter + ## https://docs.mongodb.com/manual/tutorial/add-replica-set-arbiter/ + enabled: true + + ## MongoDB configuration file for the Arbiter. For documentation of all options, see: + ## http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configuration: "" + + ## ConfigMap with MongoDB configuration for the Arbiter + ## NOTE: When it's set the arbiter.configuration parameter is ignored + ## + # existingConfigmap: + + ## Command and args for running the container (set to default if not set). Use array form + ## + # command: + # args: + + ## Additional command line flags + ## Example: + ## extraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + extraFlags: [] + + ## Additional environment variables to set + ## E.g: + ## extraEnvVars: + ## - name: FOO + ## value: BAR + ## + extraEnvVars: [] + + ## ConfigMap with extra environment variables + ## + # extraEnvVarsCM: + + ## Secret with extra environment variables + ## + # extraEnvVarsSecret: + + ## Annotations to be added to the Arbiter statefulset. Evaluated as a template. + ## + annotations: {} + + ## Additional to be added to the Arbiter statefulset. Evaluated as a template. + ## + labels: {} + + ## Affinity for pod assignment. Evaluated as a template. + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Node labels for pod assignment. Evaluated as a template. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for pod assignment. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## Lables for MongoDB Arbiter pods. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + + ## Annotations for MongoDB Arbiter pods. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## MongoDB Arbiter pods' priority. + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + # priorityClassName: "" + + ## MongoDB Arbiter pods' Security Context. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings + ## Example: + ## sysctls: + ## - name: net.core.somaxconn + ## value: "10000" + ## + sysctls: [] + + ## MongoDB Arbiter containers' Security Context (only main container). + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + + ## MongoDB Arbiter containers' resource requests and limits. + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## MongoDB Arbiter pods' liveness and readiness probes. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + ## Custom Liveness probes for MongoDB Arbiter pods + ## + customLivenessProbe: {} + + ## Custom Rediness probes MongoDB Arbiter pods + ## + customReadinessProbe: {} + + ## Add init containers to the MongoDB Arbiter pods. + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: {} + + ## Add sidecars to the MongoDB Arbiter pods. + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: {} + + ## extraVolumes and extraVolumeMounts allows you to mount other volumes on MongoDB Arbiter pods + ## Examples: + ## extraVolumeMounts: + ## - name: extras + ## mountPath: /usr/share/extras + ## readOnly: true + ## extraVolumes: + ## - name: extras + ## emptyDir: {} + extraVolumeMounts: [] + extraVolumes: [] + + ## MongoDB Arbiter Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + create: false + ## Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + # maxUnavailable: 1 + +## ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + # name: + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## binding MongoDB ServiceAccount to a role + ## that allows MongoDB pods querying the K8s API + ## + create: false + +## Init Container paramaters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + enabled: false + ## Bitnami Minideb image + ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + ## Bitnami MongoDB Promtheus Exporter image + ## ref: https://hub.docker.com/r/bitnami/mongodb-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.11.2-debian-10-r18 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## String with extra flags to the metrics exporter + ## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go + ## + extraFlags: "" + + ## String with additional URI options to the metrics exporter + ## ref: https://docs.mongodb.com/manual/reference/connection-string + ## + extraUri: "" + + ## Metrics exporter container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Prometheus Exporter service configuration + ## + service: + ## Annotations for Prometheus Exporter pods. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + type: ClusterIP + port: 9216 + + ## Metrics exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md + ## + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + + ## Specify the namespace where Prometheus Operator is running + ## + # namespace: monitoring + + ## Specify the interval at which metrics should be scraped + ## + interval: 30s + ## Specify the timeout after which the scrape is ended + ## + # scrapeTimeout: 30s + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + ## Specify the namespace where Prometheus Operator is running + ## + # namespace: monitoring + ## Define individual alerting rules as required + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup + ## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + ## + rules: {} diff --git a/qliksense/.helmignore b/qliksense/.helmignore new file mode 100644 index 0000000..3b12c64 --- /dev/null +++ b/qliksense/.helmignore @@ -0,0 +1,2 @@ +README-internal.md +dependencies.yaml diff --git a/qliksense/Chart.yaml b/qliksense/Chart.yaml new file mode 100644 index 0000000..647dbe2 --- /dev/null +++ b/qliksense/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +appVersion: "2020.04" +description: Qlik Sense Enterprise on Kubernetes +home: https://www.qlik.com +kubeVersion: '>=1.10.0-0' +name: qliksense +version: 1.54.25 diff --git a/qliksense/README.md b/qliksense/README.md new file mode 100644 index 0000000..67e05a1 --- /dev/null +++ b/qliksense/README.md @@ -0,0 +1,179 @@ +# Qlik Sense Enterprise + +## Introduction + +This chart bootstraps a Qlik Sense Enterprise on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +- [Qlik Sense Enterprise](#qlik_sense_enterprise) + - [Introduction](#introduction) + - [Installing the Chart](#installing-the-chart) + - [Uninstalling the Chart](#uninstalling-the-chart) + - [Global chart Configuration](#global-chart-configuration) + - [Configuration](#configuration) + - [Identity provider, authentication and tenant configuration](#identity-provider-authentication-and-tenant-configuration) + - [Encryption](#encryption) + +## Installing the Chart + +Run these prerequisite steps to download the chart and install the custom resource definitions. Should only be performed once per cluster. + +```console +helm install --name qliksense-init qlik/qliksense-init +``` + +To install the chart with the release name `qliksense`: + +```console +helm install --name qliksense qlik/qsefe +``` + +For a local development install, do the following: +```shell +helm upgrade --install qliksense qlik/qsefe --set devMode.enabled=true,engine.acceptEULA="yes",edge-auth.deployment.oidc.enabled=true +``` + +The command deploys qliksense on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +The command removes all the Kubernetes components associated with the chart and deletes the release. +To uninstall/delete the `qliksense` deployment: + +```console +helm delete qliksense +``` + +To uninstall/delete init chart you cannot have any qliksense charts left in the cluster (all namespaces): + +```console +helm delete qliksense-init +``` + +## Global chart Configuration + +The following table lists the global configurable parameters of the qliksense chart and their default values. These values will propagate to all sub-components that utilize such configurations so they need only be defined once to apply to everything. +For the full list of available options, see `values.yaml`. + +| Parameter | Description | Default | +| ---------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- | +| `global.imageRegistry` | Docker image registry to use to pull all images (e.g. if you have a self-hosted registry with all the images) | `nil` | +| `global.ingressClass` | Global ingress class used for all the qliksense ingresses as well as the nginx-ingress-controller. Change this if you have multiple controllers that might otherwise pick up on these ingresses | `"qlik-nginx"` | +| `global.certs.enabled` | enable/disable the usage of a global CA certificate file, replacing all existing trusted CAs | `false` | +| `global.certs.configMap.create` | enable/disable creating a CA certificate ConfigMap | `false` | +| `global.certs.configMap.certs` | The global CA certificate chain - use this to supply a FULL CA certificate chain that your qliksense installation should trust | See` values.yaml` | +| `global.certs.volume.hostPath` | Mounts a CA certificate file or directory from the host node’s filesystem into the Pod | `nil` | +| `global.persistence.storageClass` | Defines the name of a global Persistent Volume Storage Class. This can be a pre-existing SC defined in the cluster | `"-"` | +| `global.persistence.internalStorageClass.enabled` | Enable/disable deploying a storage class as part of the qliksense chart | `false` | +| `global.persistence.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled. | `{}` | + +## Configuration + +The following table lists some of the configurable parameters of the qliksense chart and their default values. For the full list of available options, see `values.yaml`. + +| Parameter | Description | Default | +| ------------------------------------ | --------------------------------------------------------------------------------------------------- | ------------- | +| engine.acceptEULA | Agree to the Qlik sense engine EULA in order to activate it | `"no"` | +| engine.replicaCount | The number of replicas of the Qlik Sense Engine to deploy | `"no"` | +| engine.persistence | Defines the persistence layer of the engine - ReadWriteMany is required for multiple engines | | +| devMode.enabled | activates "devMode" for local development and deploys a mongodb chart (e.g. with minikube) | `false` | +| mongodb.uri | The uri (with credentials) to the mongodb to use. Not used if `devMode` is active. | | +| messaging.nats.auth.users[].user | User is used to authenticate nats-streaming (Should be set in a production cluster) | `natsClient` | +| messaging.nats.auth.users[].password | Password is used to authenticate nats-streaming (Should be set in a production cluster) | `clientPass` | +| messaging.nats.clusterAuth.password | Password is used to authenticate nats-to-nats communication (Should be set in a production cluster) | `clusterPass` | + +### NATS to NATS-Streaming password rotation + +The nats chart supports an array of users (`messaging.nats.auth.users`) used for authenticating NATS-Streaming to NATS. NATS-Streaming will use the first entry in the array to authenticate to NATS. Any additional entries can still be used to authenticate against. + +#### How to rotate + +In this example we have a deployed cluster with a NATS-Streaming that is authenticated using user `user1` with password `password1` from the following config. We want to update this to use `password2`. +```yaml +auth: + users: + - user: user1 + password: password1 +``` + +1) Add new user/password to the first entry in the array, but leave the old entry as *second* in the list. Then `helm update` your release. +```yaml +auth: + users: + - user: user2 + password: password2 + - user: user1 + password: password1 +``` +2) NATS will now have both user/passwords configured, but NATS-Streaming will still be using the original entry to authenticate. NATS-Streaming servers will need to be restarted to pickup the new password from the first entry in `messaging.nats.auth.users`. +```sh +kubectl delete pod {Release.Name}-nats-streaming-2 #wait for new pod to become ready +kubectl delete pod {Release.Name}-nats-streaming-1 #wait for new pod to become ready +kubectl delete pod {Release.Name}-nats-streaming-0 #wait for new pod to become ready +``` +3) Finally remove the old user from the `messaging.nats.auth.users` array and `helm update` to remove authentication for the old user. +```yaml +auth: + users: + - user: user2 + password: password2 +``` + +### Identity provider, authentication and tenant configuration + +The following table lists the authentication, tenant and identity provider configurations. You will need to configure an identity provider to be able to login and use QlikSense. + +| Parameter | Description | Default | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `edge-auth.configs.enforceTLS` | When enabled edge-auth will reject non-TLS requests on it's external endpoints | `true` | +| `edge-auth.configs.secureCookies` | Restrict cookies to only be sent over SSL | `false` | +| `edge-auth.configs.sessionTTLSeconds` | The length of time in seconds that a session will live passed the last interaction | `1800` | +| `edge-auth.configs.sessionMaxLifetimeSeconds` | The maximum length of time in seconds that a session can exist | `86400` (one day) | +| `edge-auth.configs.loginStateLifetime` | The length of time between initiating and completing login is allowed to take | `5m` | +| `edge-auth.secrets.cookieKeys` | Array of strings used for signing cookies | `["A secret key"]` | +| `edge-auth.secrets.stringData.tokenAuthPrivateKey` | RSA or EC Private signing key for internal JWTs | Generate EC 384 private key `ssh-keygen -t ecdsa -b 384 -f jwtPrivateKey -N ''` | +| `edge-auth.secrets.stringData.loginStateKey` | The key with which to sign the state parameter (encoded in base64), must be larger than 256 bits | To generate use `openssl rand -base64 32` | +| `identity-providers.secrets.create` | Create the secret resource for identity-providers | `true` | +| `identity-providers.secrets.idpConfigs` | Array of configs for Identity providers | _See following_ | +| `identity-providers.secrets.idpConfigs[].allowedClientIds` | An array of the IDs of allowed API clients, only client tokens with these client IDs will be allowed access, if no value is provided then any client with the correct claims will be allowed access | `nil` | +| `identity-providers.secrets.idpConfigs[].audience` | The audience value that tokens from the IdP will be asserted to be issued for, default is `qlik.api` | `qlik.api` | +| `identity-providers.secrets.idpConfigs[].claimsMapping` | How to map the IdP's `userinfo` to internal fields (_See [Claims Mappings](#claims-mappings)_) | `{sub: "sub", name: "name"}` | +| `identity-providers.secrets.idpConfigs[].claimsMapping.name` | `userinfo` field to be mapped to internal name field | `nil` | +| `identity-providers.secrets.idpConfigs[].claimsMapping.sub` | `userinfo` field to be mapped to internal sub field | `nil` | +| `identity-providers.secrets.idpConfigs[].clientId` | IdP client ID | `foo` | +| `identity-providers.secrets.idpConfigs[].clientSecret` | IdP client secret | `bar` | +| `identity-providers.secrets.idpConfigs[].clockToleranceSec` | The clock tolerance in seconds, this is to compensate for clock skew between the IdP and this service, default is 5 | `nil` | +| `identity-providers.secrets.idpConfigs[].discoveryUrl` | IdP discovery URL | `http://localhost:32123/.well-known/openid-configuration` | +| `identity-providers.secrets.idpConfigs[].hostname` | Requests to this hostname will use this IdP | `elastic.example` | +| `identity-providers.secrets.idpConfigs[].issuerConfig` | IdP issuer config | _See following_ | +| `identity-providers.secrets.idpConfigs[].issuerConfig.authorization_endpoint` | IdP authorization_endpoint URI | `nil` | +| `identity-providers.secrets.idpConfigs[].issuerConfig.end_session_endpoint` | IdP end_session_endpoint URI | `nil` | +| `identity-providers.secrets.idpConfigs[].issuerConfig.introspection_endpoint` | IdP introspection_endpoint URI | `nil` | +| `identity-providers.secrets.idpConfigs[].issuerConfig.issuer` | IdP issuer URI | | +| `identity-providers.secrets.idpConfigs[].issuerConfig.jwks_uri` | IdP jwks_uri URI | | +| `identity-providers.secrets.idpConfigs[].issuerConfig.token_endpoint` | IdP token_endpoint URI | | +| `identity-providers.secrets.idpConfigs[].issuerConfig.userinfo_endpoint` | IdP userinfo_endpoint URI | | +| `identity-providers.secrets.idpConfigs[].postLogoutRedirectUri` | URI to redirect to on logout, this only takes effect when `end_session_endpoint` is not configured | | +| `identity-providers.secrets.idpConfigs[].primary` | Boolean denoting if this IdP is the primary one for the hostname. Primary IdPs are those for which will be used for the interactive login, non-primary IdPs can only exchange tokens, default is true | | +| `identity-providers.secrets.idpConfigs[].realm` | realm name to associate with IdP users | `simple` | +| `identity-providers.secrets.idpConfigs[].staticKeys` | An array of public keys. This allows IdP JWT verifier to use a static set (one or more) of public keys to verify external JWTs (identity token) | `[]` | +| `identity-providers.secrets.idpConfigs[].staticKeys[].kid` | The key id | | +| `identity-providers.secrets.idpConfigs[].staticKeys[].pem` | The pem format key | | + +### Encryption + +An encryption backend should be configured before starting to use QlikSense. Currently the system supports running with encryption disabled, but the system does not currently support enabling encryption on an already running QlikSense install. + +**This means if encryption is disabled and then enabled, any data added to the system before encryption was enabled may become inaccessible.** + +At this time [Vault](https://www.vaultproject.io/) is the only support encryption backend. + +| Parameter | Description | Default | +| ---------------------------------------------- | -----------------------------------------------------------------------|---------| +| `encryption.enabled` | Should encryption be enabled | true | +| `encryption.backend.type` | Backend to use. Set this to `vault` | | +| `encryption.backend.uri` | URI where vault is located | | +| `encryption.backend.auth.type` | type of auth to use when communicating with Vault. Set this to `token` | | +| `encryption.backend.auth.token` | Token to use when communicating with Vault | | +| `encryption.backend.auth.tokenRenew` | Should encryption auto-renew the token | | +| `encryption.backend.auth.tokenRenewFrequency` | How often should the token be auto-renewed | | +| `encryption.backend.auth.tokenRenewTTL` | How long should the renewed token be good for | | diff --git a/qliksense/charts/api-keys/.helmignore b/qliksense/charts/api-keys/.helmignore new file mode 100644 index 0000000..4c7bf9b --- /dev/null +++ b/qliksense/charts/api-keys/.helmignore @@ -0,0 +1,2 @@ +dependencies.yaml +dev-values.yaml diff --git a/qliksense/charts/api-keys/Chart.yaml b/qliksense/charts/api-keys/Chart.yaml new file mode 100644 index 0000000..f7a91f1 --- /dev/null +++ b/qliksense/charts/api-keys/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +description: A Service to retrieve and store public keys and api keys metadata +home: https://www.qlik.com +keywords: +- api-keys +name: api-keys +sources: +- https://github.com/qlik-trial/api-keys +version: 3.0.2 diff --git a/qliksense/charts/api-keys/README.md b/qliksense/charts/api-keys/README.md new file mode 100644 index 0000000..eb16052 --- /dev/null +++ b/qliksense/charts/api-keys/README.md @@ -0,0 +1,85 @@ +# Api-keys + +[api-keys](https://github.com/qlik-trial/api-keys) retrieves and stores public keys and api keys metadata. + +## Introduction + +This chart bootstraps an api-keys deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `api-keys`: + +```console +helm install --name api-keys qlik/api-keys +``` + +The command deploys `api-keys` on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `api-keys` deployment: + +```console +helm delete api-keys +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the `api-keys` chart and their default values. + +| Key | Kind | Default | Description | +| --- | --- | --- | --- | +| logLevel | config | "verbose" | Log level | +| environment | config | "qseok" | The environment name | +| region | config | "example" | Deployed region | +| natsEnabled | config | true | Toggle to enable NATS / Streaming messaging | +| mongoSsl | config | nil (default: false) | Enables/disables ssl for the MongoDB connection (`true` or `false`). Can be overridden using ssl query parameter in the URI (?ssl=true or ?ssl=false). | +| mongoSslValidate | config | nil (default: false) | Validate mongo server certificate against CA. Untrusted certificates will be rejected. | +| mongoCheckServerIdentity | config | nil (default: false) | Enforces that mongo server certificate CN matches mongo URI hostname/IP address. | +| keysUri | config | "http://{{ .Release.Name }}-keys:8080/v1/" | uri for keys service resource | +| jwksEndpoint | config | "http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal" | URI where the JWKS to validate JWTs is located | +| UsersUri | config | "http://{{ .Release.Name }}-users:8080/v1" | uri for users resource | +| tokenAuthUri | config | "http://{{ .Release.Name }}-edge-auth:8080/v1" | internal-tokens URL | +| ingressAuthUrl | config | http://{.Release.Name}.{.Release.Namespace}.svc.cluster.local:8080/v1/auth |The URL to use for nginx's `auth-url` configuration to authenticate `/api` requests | +| natsUri | config | "nats://{{ .Release.Name }}-nats-client:4222" | Address of NATS server | +| natsStreamingClusterId | config | "{{ .Release.Name }}-nats-streaming-cluster" | A unique NATS client id for this instance of the service | +| rollbarEnabled | config| nil | Enable rollbar to track server errors (nil default to false | +| rollbarToken | config| nil | The rollbar token | +| mongodbUri | secret | "mongodb://{{ .Release.Name }}-mongodb:27017" | A mongo db connection uri | +| redisUri | secret | "{{ .Release.Name }}-redis-master:6379" | Full Redis URI (port included)| +| redisPassword | secret | nil | Custom password for authentication (needs `usePassword` set to `true`| +| tokenAuthPrivateKeyId | secret | The key id that matches the kid in the JWKS | +| tokenAuthPrivateKey | secret | PEM formated private key to sign service identity JWT | +| replicas | deployment | 1 | number of users replicas | + + +| Parameter | Description | Default | +|-------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------| +| `mongodb.enabled` | enable Mongodb as a chart dependency | `false` | +| `messaging.enabled` | enable messaging chart as a chart dependency | `false` | +| `redis.enabled` | Enable Redis as chart's dependency | `false` | | +| `redis.encryption` | Whether to use TLS while connecting to Redis | `false` | +| `redis.usePassword` | Disable password authentication | `true` | +| `redis.cluster.enabled` | Use master-secondary topology | `false` | +| `redis.master.statefulset.updateStrategy` | Update strategy for Redis StatefulSet | `RollingUpdate` | +| `redis.master.resources.requests.cpu` | Redis master CPU reservation | `100m` | +| `redis.master.resources.requests.memory` | Redis master memory reservation | `256Mi` | +| `redis.master.limits.requests.cpu` | Redis master CPU reservation | `400m` | +| `redis.master.limits.requests.memory` | Redis master memory reservation | `5Gi` | +| `redis.slave.resources.requests.cpu` | Redis master CPU reservation | `100m` | +| `redis.slave.resources.requests.memory` | Redis master memory reservation | `256Mi` | +| `redis.slave.limits.requests.cpu` | Redis master CPU reservation | `400m` | +| `redis.slave.limits.requests.memory` | Redis master memory reservation + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install --name api-keys -f values.yaml qlik/api-keys +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/api-keys/charts/messaging/.helmignore b/qliksense/charts/api-keys/charts/messaging/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/api-keys/charts/messaging/Chart.yaml b/qliksense/charts/api-keys/charts/messaging/Chart.yaml new file mode 100644 index 0000000..8f4c1e1 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +description: | + Messaging system services. NATS and NATS Streaming are supported. Other services can communicate with each other and orchestrate their works using the services provided by this chart. +home: https://www.qlik.com +keywords: +- messaging +- queue +- nats +- nats-streaming +name: messaging +sources: +- https://github.com/nats-io/gnatsd +- https://github.com/nats-io/nats-streaming-server +- https://github.com/helm/charts/tree/master/stable/nats +- https://github.com/nats-io/prometheus-nats-exporter +- https://github.com/qlik-trial/nats-prom-exporter +version: 2.0.29 diff --git a/qliksense/charts/api-keys/charts/messaging/README.md b/qliksense/charts/api-keys/charts/messaging/README.md new file mode 100644 index 0000000..9627063 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/README.md @@ -0,0 +1,288 @@ +# messaging + +This charts provides **messaging system** (a.k.a. message queue, message bus, etc.) capabilities for services. +Currently, [NATS](https://www.nats.io) and [NATS Streaming](https://nats.io/documentation/streaming/nats-streaming-intro/) +are included in this chart, but in the future, other message systems like RabbitMQ can also be added. + +## Installing the Chart + +To install the chart with the release name `messaging`: + +```console +helm install --name messaging qlik/messaging +``` + +## Uninstalling the Chart + +To uninstall/delete the `messaging` deployment: + +```console +helm delete messaging +``` + +## Configuration + +### NATS + +| Parameter | Description | Default | +| --------------------------------- | ------------------------------------------- | ------------------------------------- | +| `nats.enabled` | enable NATS messaging system | `true` | +| `nats.image.registry` | NATS image registry | `qliktech-docker.jfrog.io` | +| `nats.image.repository` | NATS Image name | `qnatsd` | +| `nats.image.tag` | NATS Image tag | `0.3.1` | +| `nats.image.pullPolicy` | pull policy for nats docker image | `IfNotPresent` | +| `nats.image.pullSecrets` | specify image pull secrets | `artifactory-docker-secret` | +| `nats.replicaCount` | number of nats replicas | `1` | +| `nats.antiAffinity` | anti-affinity for nats pod assignment | `soft` | +| `nats.auth.enabled` | enable authentication for nats clients | `true` | +| `nats.auth.user` | username for nats client authentication | `nats_client` | +| `nats.auth.password` | password for nats client authentication | `T0pS3cr3t` | +| `auth.users` | Client authentication users | `[]` See [Rotation](#how-to-rotate) | +| `auth.users[].user` | Client authentication user | `nil` | +| `auth.users[].password` | Client authentication password | `nil` | +| `nats.auth.jwtUsers` | array of jwt authenticated users | See [Authentication](#authentication) | +| `nats.clusterAuth.enabled` | enable authentication for nats clustering | `false` | +| `nats.clusterAuth.user` | username for nats clustering authentication | `nats_cluster` | +| `nats.clusterAuth.password` | password for nats clustering authentication | random string | +| `nats.statefulset.updateStrategy` | update strategy for nats statefulsets | `onDelete` | +| `nats.client.service.type` | nats-client service type | `ClusterIP` | +| `nats.client.service.port` | nats-client service port | `4222` | +| `nats.cluster.service.type` | nats-cluster service type | `ClusterIP` | +| `nats.cluster.service.port` | nats-cluster service port | `6222` | +| `nats.monitoring.service.type` | nats-monitoring service type | `ClusterIP` | +| `nats.monitoring.service.port` | nats-monitoring service port | `8222` | +| `nats.livenessProbe.enabled` | enable liveness probe | `true` | +| `nats.readinessProbe.enabled` | enable readiness probe | `true` | +| `nats.resources` | CPU and memory requests and limits for nats | `{}` | +| `extraArgs` | Optional flags for NATS | See [values.yaml](./values.yaml) | + +### NATS Streaming + +| Parameter | Description | Default | +| ------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- | +| `nats-streaming.enabled` | enable NATS messaging system | `false` | +| `nats-streaming.image.registry` | NATS streaming image registry | `qliktech-docker.jfrog.io` | +| `nats-streaming.image.repository` | NATS streaming image name | `nats-streaming` | +| `nats-streaming.image.tag` | NATS Streaming image tag | `0.14.1` | +| `nats-streaming.image.pullPolicy` | pull policy for nats docker image | `IfNotPresent` | +| `nats-streaming.image.pullSecrets` | specify image pull secrets | `artifactory-registry-secret` | +| `nats-streaming.replicaCount` | number of nats replicas | `3` | +| `nats-streaming.antiAffinity` | anti-affinity for nats pod assignment | `soft` | +| `nats-streaming.auth.enabled` | enable authentication for nats clients | `true` | +| `nats-streaming.auth.user` | username for nats client authentication | `nats_client` | +| `nats-streaming.auth.password` | password for nats client authentication | `nil` (Uses Secret below for password) | +| `nats-streaming.auth.secretName` | secretName for nats client authentication | `{{ .Release.Name }}-nats-secret` | +| `nats-streaming.auth.secretKey` | secretKey for nats client authentication | `client-password` | +| `nats-streaming.statefulset.updateStrategy` | update strategy for nats statefulsets | `onDelete` | +| `nats-streaming.monitoring.service.type` | nats-streaming-monitoring service type | `ClusterIP` | +| `nats-streaming.monitoring.service.port` | nats-streaming-monitoring service port | `8222` | +| `nats-streaming.livenessProbe.enabled` | enable liveness probe | `true` | +| `nats-streaming.readinessProbe.enabled` | enable readiness probe | `true` | +| `nats-streaming.resources` | CPU and memory requests and limits for nats | `{}` | +| `nats-streaming.clusterID` | nats streaming cluster name id | `{{ .Release.Name }}-nats-streaming-cluster` | +| `nats-streaming.natsSvc` | external nats server url | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats-streaming.hbInterval` | Interval at which server sends heartbeat to a client | `10s` | +| `nats-streaming.hbTimeout` | How long server waits for a heartbeat response | `10s` | +| `nats-streaming.hbFailCount` | Number of failed heartbeats before server closes the client connection | `5` | +| `clustered` | Run NATS Streaming in clustered mode (incompatible with ftGroup value) | `false` | +| `cluster_raft_logging` | Used for raft related debugging | `false` | +| `ftGroup` | Enable Fault Tolerance mode with this group name (incompatible with clustered value) | `nil` | +| `store` | Storage options (Support values are `memory` and `file`) | `file` | +| `nats-streaming.persistence.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `nats-streaming.persistence.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `nats-streaming.persistence.size` | Persistence volume size | `nil` | +| `nats-streaming.persistence.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `nats-streaming.persistence.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +### Network Policy for NATS and NATS Streaming + +| Parameter | Description | Default | +| -------------------------------------- | ---------------------------------------------------------------- | --------------------- | +| `networkPolicy.nats.enabled` | enable custom network policy for NATS messaging system | `false` | +| `networkPolicy.nats-streaming.enabled` | enable custom network policy for NATS Streaming messaging system | `false` | +| `networkPolicy.keys.release` | keys service release name for egress rules | `{{ .Release.Name }}` | + +## Requirements + +### Network Plugin to enable Network Policies in Kubernetes cluster + +This chart include options to enable [Network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) for the created +`nats` and `nats-streaming` clusters. + +Network policies are implemented by the network plugin, so the Kubernetes cluster must be configured with a networking solution which supports NetworkPolicy - +simply creating the resource without a controller to implement it will have no effect. + +For local development, please refer to [Setting Up a Minikube Cluster - Configuring Network Plugin to support Network Policies](https://github.com/qlik-trial/elastic-charts/blob/master/docs/prerequisites/minikube-cluster.md#configuring-network-plugin-to-support-network-policies) +for detailed instructions. + +### Secrets + +For deploying this chart to **stage**/**prod**, you need the following secrets written to **vault**. + +*The passwords should not start with a number!* + +| Secret | Key | Purpose | +| -------------------------------------------------------------- | ------- | ----------------------------------- | +| `/secret/{environment}/messaging/{region}/natsClientPassword` | `value` | password for client authentication | +| `/secret/{environment}/messaging/{region}/natsClusterPassword` | `value` | password for cluster authentication | + +## Connecting to NATS / NATS Streaming + +### From the command line: +#### Port-forward NATS Client Service: +```sh + > kubectl port-forward messaging-nats-0 4222 +``` +#### Connect via `telnet`: +```sh + > telnet localhost 4222 +``` +#### Connect with no auth: +```sh + CONNECT {} +``` +#### Connect with auth: +```sh + CONNECT {"user":"my-user","pass":"T0pS3cr3t"} +``` +#### Subscribing to channel, publishing to a channel, and receiving the published message: +```sh + SUB foo 1 + +OK + PUB foo 11 + Hello World + +OK + MSG foo 1 11 + Hello World +``` + +### Using [go-nats](https://github.com/nats-io/go-nats/) and [go-nats-streaming](https://github.com/nats-io/go-nats-streaming) clients: +```golang +package main + +import ( + "log" + + "github.com/nats-io/go-nats" + "github.com/nats-io/go-nats-streaming" +) + +func main() { + nc, err := nats.Connect("nats://nats_client:asdf@localhost:4222") + if err != nil { + log.Fatal(err) + } + sc, err := stan.Connect("messaging-nats-streaming-cluster", "client-123", stan.NatsConn(nc)) + if err != nil { + log.Fatal(err) + } + sc.Publish("hello", []byte("msg1")) + + sc.Subscribe("hello", func(m *stan.Msg) { + log.Printf("[Received] %+v", m) + }, stan.StartWithLastReceived()) + + sc.Publish("hello", []byte("msg2")) + + select{} +} +``` + +### With Network Policies enabled + +To connect to `NATS` as a client with Network Policies enabled , the pod in which the service client is in must have the label +`{{ .Release.Name }}-nats-client=true`. + +Otherwise, if enabled, the `ingress` `Network Policy` for `NATS` will block incoming traffic from any pod without the appropriate label. + +`Network Policy` is enabled in `stage` and `production` environments. + +## Authentication + +It's important to know that when using NATS Streaming, a NATS connection is also required and that it is the NATS connection that handles authentication and authorization not the NATS Streaming connnection. + +### NATS to NATS-Streaming password rotation + +The nats chart supports an array of users (`nats.auth.users`) used for authenticating NATS-Streaming to NATS. NATS-Streaming will use the first entry in the array to authenticate to NATS. Any additional entries can still be used to authenticate against. + +#### How to rotate + +In this example we have a deployed cluster with a NATS-Streaming that is authenticated using user `user1` with password `password1` from the following config. We want to update this to use `password2`. +```yaml +auth: + users: + - user: user1 + password: password1 +``` + +1) Add new user/password to the first entry in the array, but leave the old entry as *second* in the list. Then `helm update` your release. +```yaml +auth: + users: + - user: user2 + password: password2 + - user: user1 + password: password1 +``` +2) NATS will now have both user/passwords configured, but NATS-Streaming will still be using the original entry to authenticate. NATS-Streaming servers will need to be restarted to pickup the new password from the first entry in `nats.auth-users`. +```sh +kubectl delete pod {Release.Name}-nats-streaming-2 #wait for new pod to become ready +kubectl delete pod {Release.Name}-nats-streaming-1 #wait for new pod to become ready +kubectl delete pod {Release.Name}-nats-streaming-0 #wait for new pod to become ready +``` +3) Finally remove the old user from the `nats.auth.users` array and `helm update` to remove authentication for the old user. +```yaml +auth: + users: + - user: user2 + password: password2 +``` + +### JWT Authentication + +NATS has been configured to allow authentication using service-to-service(S2S) JWTs, but in order to be authenticated, a service must be whitelisted. +The `nats.auth.jwtUsers` value can be used to provide a whitelist of users that should be authenticated using a S2S JWT. +**Note** when using a S2S JWT both the NATS username and JWT `subject` must match + +Adding a new service to the whitelist is as simple as updating `nats.auth.jwtUsers` value as such: +```yaml +nats: + auth: + jwtUsers: + - user: "my-service" + - user: "my-service"` + ...etc +``` + +### Authorization + +The above method of adding a JWT authentication whitelist also allows for setting authorization rules. +NATS [authorization rules](https://nats.io/documentation/managing_the_server/authorization/) can be configured on a per subject basis. + +The following is an example of adding publish/subscribe authorization rules +```yaml +nats: + auth: + jwtUsers: + - user: "my-service" + stanPermissions: + publish: + - "events.mysubject.>" # service can publish to any subject that starts with `events.mysubject.` + - "system-events.mysubject" # service can publish to `system-events.mysubject` subject + subscribe: + - "events.somesubject" # service can subscribe `events.somesubject` subject + natsPermissions: + publish: + - "events.mysubject1" # service can publish to `events.mysubject1` subject + subscribe: + - "events.somesubject1" # service can subscribe `events.somesubject1` subject +``` +Wildcard support works as follow: + +The dot character `.` is the token separator. + +The asterisk character `*` is a token wildcard match. +`e.g foo.* matches foo.bar, foo.baz, but not foo.bar.baz.` + +The greater-than symbol `>` is a full wildcard match. +`e.g. foo.> matches foo.bar, foo.baz, foo.bar.baz, foo.bar.1, etc.` diff --git a/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/Chart.yaml b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/Chart.yaml new file mode 100644 index 0000000..32bfb9d --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +description: | + Service that monitors NATS/NATS-Streaming message delivery metrics +home: https://www.qlik.com +name: message-delivery-monitor +sources: +- https://github.com/qlik-trial/message-delivery-monitor +version: 0.1.0 diff --git a/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/README.md b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/README.md new file mode 100644 index 0000000..a770466 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/README.md @@ -0,0 +1,70 @@ +# message-delivery-monitor + +[message-delivery-monitor](https://github.com/qlik-trial/message-delivery-monitor) is responsible for measuring delivery and latency of NATS/NATS-Streaming. + +## Introduction + +This chart bootstraps a message-delivery-monitor deployment on a [Kubernetes](http://kubernetes.io) cluster using the +[Helm](https://helm.sh) package manager. + +## Installing the chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/message-delivery-monitor +``` + +The command deploys message-delivery-monitor on the Kubernetes cluster in the default configuration. +The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the data-engineering-exporter chart and their default values. + +| Parameter | Description | Default | +| -------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------- | +| `image.registry` | Image registry | `qliktech-docker.jfrog.io` | +| `image.repository` | Image repository | `message-delivery-monitor` | +| `image.tag` | Image version | `0.1.0` | +| `image.pullPolicy` | Image pull policy\* | `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `logLevel` | Level of logging | `info` | +| `nats.server` | Nats server address | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats.auth.enabled` | Enabled authentication to NATS | `false` | +| `nats.auth.user` | Username to authenticate to NATS | `nil` | +| `nats.auth.password` | Password to authenticate to NATS | `nil` | +| `nats.auth.secretName` | Read user/passowrd from a this K8s secret with this name | `nil` | +| `nats.auth.secretClientUser` | Key to read from a K8s secret key to retrieve a username | `nil` | +| `nats.auth.secretClientPassword` | Key to read from a K8s secret key to retrieve a password | `nil` | +| `stan.clusterID` | NATS Streaming cluster ID | `{{ .Release.Name }}-nats-streaming-cluster` | +| `stan.monitorChannel` | NATS Streaming channel to monitor on | `monitor-channel` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | message-delivery-monitor listen port | `8080` | +| `metrics.prometheus.enabled` | Whether Prometheus metrics are enabled | `true` | + +(\*) If setting `image.tag` to `latest`, it is recommended to change `image.pullPolicy` to `Always` + +### Setting Parameters + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. +For example, + +```console +helm install --name my-release -f values.yaml qlik/message-delivery-monitor +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl new file mode 100644 index 0000000..1ac7ca5 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl @@ -0,0 +1,47 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "message-delivery-monitor.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. +*/}} +{{- define "message-delivery-monitor.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper message-delivery-monitor image name +*/}} +{{- define "message-delivery-monitor.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml new file mode 100644 index 0000000..a48a06f --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml @@ -0,0 +1,73 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "message-delivery-monitor.fullname" . }} + labels: + app: {{ template "message-delivery-monitor.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} + {{- range $key, $val := .Values.podLabels }} + {{- if tpl ($val) $}} + {{ tpl ($key) $ }}: {{ tpl ($val) $ | quote }} + {{- end }} + {{- end}} + spec: + containers: + - name: {{ template "message-delivery-monitor.name" . }} + image: {{ template "message-delivery-monitor.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- end }} + ports: + - containerPort: {{ .Values.service.port }} + env: + {{- if .Values.nats.auth.enabled }} + - name: NATS_USER + {{- if .Values.nats.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.nats.auth.secretName) . }} + key: {{ .Values.nats.auth.secretClientUser }} + {{- else }} + value: {{ .Values.nats.auth.user }} + {{- end }} + - name: NATS_PASSWORD + {{- if .Values.nats.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.nats.auth.secretName) . }} + key: {{ .Values.nats.auth.secretClientPassword }} + {{- else }} + value: {{ .Values.nats.auth.password }} + {{- end }} + {{- end }} + - name: LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: NATS_ADDR + value: {{ tpl (.Values.nats.server) . | quote }} + - name: NATS_STREAMING_CLUSTER_ID + value: {{ tpl (.Values.stan.clusterID) . | quote }} + - name: NATS_STREAMING_MONITORING_ENDPOINT + value: {{ .Values.stan.monitor_channel | quote }} + livenessProbe: + httpGet: + path: /metrics + port: {{ .Values.service.port }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + selector: + matchLabels: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/service.yaml b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/service.yaml new file mode 100644 index 0000000..b45beeb --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/templates/service.yaml @@ -0,0 +1,24 @@ +{{- if .Values.metrics.prometheus.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "message-delivery-monitor.fullname" . }} + labels: + app: {{ template "message-delivery-monitor.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: {{ template "message-delivery-monitor.name" . }} + selector: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/values.yaml b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/values.yaml new file mode 100644 index 0000000..b0ee34d --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/message-delivery-monitor/values.yaml @@ -0,0 +1,90 @@ +## Default values for the message-delivery-monitor Helm chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## Sets service log level +logLevel: info + +## NATS configuration +## +nats: + ## Comma seperated list of NATS servers + server: "nats://{{ .Release.Name }}-nats-client:4222" + ## For localdev use this configuration instead + # servers: "nats://messaging-nats-client:4222" + + auth: + enabled: false + ## NATS client authentication user + ## user: + + ## password: + + # secretName: "{{ .Release.Name }}-message-delivery-monitor-secret" + # seceretClientUser: "client-user" + # seceretClientPassword: "client-password" + +## NATS Streaming configuration +## +stan: + ## NATS Streaming cluster ID + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + ## For localdev use this configuration instead + # clusterID: "messaging-nats-streaming-cluster" + + ## Channels to send monitoring messages on + monitorChannel: "monitor-channel" + +image: + registry: ghcr.io + repository: qlik-download/message-delivery-monitor + tag: 0.1.1 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: IfNotPresent + +## Secrets for pulling images from a private Docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Number of replicas. +## +replicaCount: 1 + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: + ## Pod label required to allow communication with NATS + "{{ .Release.Name }}-nats-client": "true" + ## Pod label required to allow communication with NATS Streaming Monitoring endpoint + "{{ .Release.Name }}-nats-streaming-admin": "true" + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services +## +service: + type: ClusterIP + port: 8080 + +## Metrics configuration +## +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/Chart.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/Chart.yaml new file mode 100644 index 0000000..869289d --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +appVersion: 0.6.0 +description: A NATS Streaming cluster setup +home: https://nats.io/ +keywords: +- NATS +- Messaging +- publish +- subscribe +- streaming +- cluster +- persistence +name: nats-streaming +version: 0.4.0 diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/README.md b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/README.md new file mode 100644 index 0000000..bbb2ccb --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/README.md @@ -0,0 +1,137 @@ +# NATS Streaming Clustering Helm Chart + +Sets up a [NATS](http://nats.io/) Streaming server cluster with Raft based replication. + +## Getting started + +This chart relies on an already available NATS Service to which the +NATS Streaming nodes that will form a clusters can connect to. +You can install the NATS Operator and then use it to create a NATS cluster +via the following: + +```console +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/example-nats-cluster.yaml +``` + +This will create a NATS cluster on the `nats-io` namespace. Then, to +install a NATS Streaming cluster the URL to the NATS cluster can be +specified as follows (using `my-release` for a name label for the +cluster): + +```console +$ git clone https://github.com/wallyqs/nats-streaming-cluster-chart nats-streaming-cluster +$ helm install nats-streaming-cluster -n my-release --set natsUrl=nats://nats.nats-io.svc.cluster.local:4222 +``` + +This will create 3 follower nodes plus an extra Pod which is +configured to be in bootstrapping mode, which will start as the leader +of the Raft group as soon as it joins. + +```console +$ kubectl get pods --namespace default -l "app=nats-streaming-cluster,release=my-release" +NAME READY STATUS RESTARTS AGE +my-release-nats-streaming-cluster-0 1/1 Running 0 30s +my-release-nats-streaming-cluster-1 1/1 Running 0 23s +my-release-nats-streaming-cluster-2 1/1 Running 0 17s +my-release-nats-streaming-cluster-bootstrap 1/1 Running 0 30s +``` + +Note that in case the bootstrapping Pod fails then it will not be +recreated and instead one of the extra follower Pods will take over +the leadership. The follower Pods are part of a Deployment so those +in case of failure they will be recreated. + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | -------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS Streaming image registry | `docker.io` | +| `image.repository` | NATS Streaming Image name | `nats-streaming` | +| `image.tag` | NATS Streaming Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.secretName` | Client authentication secret name | `nil` | +| `auth.secretKey` | Client authentication secret key | `nil` | +| `clusterID` | NATS Streaming Cluster Name ID | `"test-cluster"` | +| `natsSvc` | External NATS Server URL | `"nats://username:password@nats:4222"` | +| `maxChannels` | Max # of channels | `100` | +| `maxSubs` | Max # of subscriptions per channel | `1000` | +| `maxMsgs` | Max # of messages per channel | `"1000000"` | +| `maxBytes` | Max messages total size per channel | `900mb` | +| `maxAge` | Max duration a message can be stored | `"0s" (unlimited)` | +| `maxMsgs` | Max # of messages per channel | `1000000` | +| `debug` | Enable debugging | `false` | +| `trace` | Enable detailed tracing | `false` | +| `clustered` | Run NATS Streaming in clustered mode (incompatible with ftGroup value) | `false` | +| `cluster_raft_logging` | Used for raft related debugging | `false` | +| `ftGroup` | Enable Fault Tolerance mode with this group name (incompatible with clustered value) | `nil` | +| `store` | Storage options (Support values are `memory` and `file`) | `file` | +| `replicaCount` | Number of NATS Streaming nodes | `3` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `RollingUpdate` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `monitoring.service.type` | Kubernetes Service type (NATS Streaming monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS Streaming monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS Streaming monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS Streaming monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS Streaming monitoring service type is `LoadBalancer` | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `sidecars` | Attach additional containers to the pod. | `nil` | + +### File Specific Persistence Configuration + +| Parameter | Description | Default | +| --------------------------------- | ---------------------------------- | ---------------- | +| `file.compactEnabled` | Enable compaction | true | +| `file.bufferSize` | File buffer size (in bytes) | "2097152" (2MB) | +| `file.crc` | Enable file CRC-32 checksum | true | +| `file.sync` | Enable File.Sync on Flush | true | +| `file.fdsLimit` | Max File Descriptor limit (approx) | 0 (unlimited) | + +### Storage Specific Persistence Configuration + +| Parameter | Description | Default | +| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `persistence.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `persistence.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `persistence.size` | Persistence volume size | `nil` | +| `persistence.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `persistence.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +*Additional configuration parameters not typically used may be found in [values.yaml](values.yaml).* diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/NOTES.txt b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/NOTES.txt new file mode 100644 index 0000000..cc0eaa0 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/NOTES.txt @@ -0,0 +1,25 @@ +NATS Streaming server has been installed. + +Externally monitor the NATS streaming server by running these commands. + +{{- if contains "NodePort" .Values.monitoring.service.type }} + +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats-streaming-cluster.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + +echo http://$NODE_IP:$NODE_PORT/streaming + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "nats-streaming-cluster.fullname" . }}' +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats-streaming-cluster.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +echo http://$SERVICE_IP:{{ .Values.monitoring.service.port }}/streaming + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + +export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats-streaming-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 8222:{{ .Values.monitoring.service.port }} +echo "Visit http://127.0.0.1:8222/streaming to monitor the NATS streaming server" + +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/_helpers.tpl b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/_helpers.tpl new file mode 100644 index 0000000..69ba8e6 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/_helpers.tpl @@ -0,0 +1,101 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nats-streaming-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats-streaming-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper NATS Streaming image name +*/}} +{{- define "nats-streaming-cluster.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a NATS Streaming cluster peers list string based on fullname, namespace, # of servers +in a format like "host-1,host-2,host-3" +*/}} +{{- define "nats-streaming-cluster.peers" -}} +{{- $name := include "nats-streaming-cluster.fullname" . -}} +{{- $nats_streaming_cluster := dict "peers" (list) -}} +{{- range $idx, $v := until (int .Values.replicaCount) }} +{{- $noop := printf "%s-%d" $name $idx | append $nats_streaming_cluster.peers | set $nats_streaming_cluster "peers" -}} +{{- end }} +{{- printf "%s" (join "," $nats_streaming_cluster.peers) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + + +{{/* Return nats-streaming storage class name */}} +{{- define "nats-streaming.StorageClassName" -}} +{{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) -}} +storageClassName: "" + {{- else -}} +storageClassName: {{ .Values.persistence.storageClass }} + {{- end -}} +{{- else -}} + {{- if .Values.global }} + {{- if .Values.global.persistence }} + {{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) -}} +storageClassName: "" + {{- else -}} +storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml new file mode 100644 index 0000000..2950f19 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }}-monitoring + labels: + app: {{ template "nats-streaming-cluster.name" . }} + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - name: monitoring + port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml new file mode 100644 index 0000000..b642078 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/pvc.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/pvc.yaml new file mode 100644 index 0000000..dacfc5d --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/pvc.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "nats-streaming.fullname" . }} +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- include "nats-streaming.StorageClassName" . | nindent 2 }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/sc.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/sc.yaml new file mode 100644 index 0000000..6b08ff8 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/sc.yaml @@ -0,0 +1,7 @@ +{{- if .Values.persistence.internalStorageClass.enabled -}} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.persistence.storageClass }} +{{ toYaml .Values.persistence.internalStorageClass.definition }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/statefulset.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/statefulset.yaml new file mode 100644 index 0000000..d270f30 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/templates/statefulset.yaml @@ -0,0 +1,256 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats-streaming-cluster.name" . }} + image: {{ template "nats-streaming-cluster.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.auth.enabled }} + - name: USER + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientUser }} + {{- else }} + value: {{ .Values.auth.user }} + {{- end }} + {{- if .Values.auth.token }} + - name: AUTH + value: {{ .Values.auth.token }} + {{- else }} + - name: PASSWORD + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientPassword }} + {{- else }} + value: {{ .Values.auth.password }} + {{- end }} + {{- end }} + {{- end }} + args: [ + "-cid", "{{ tpl (.Values.clusterID) . }}", + "-m", "{{ .Values.monitoring.service.port }}", + "-ns", "{{ tpl (.Values.natsSvc) . }}", + "-mc", "{{ .Values.maxChannels }}", + "-msu", "{{ .Values.maxSubs }}", + "-mm", "{{ .Values.maxMsgs }}", + "-mb", "{{ .Values.maxBytes }}", + "-ma", "{{ .Values.maxAge }}", + "-hbi", "{{ .Values.hbInterval }}", + "-hbt", "{{ .Values.hbTimeout }}", + "-hbf", "{{ .Values.hbFailCount }}", + + {{- if .Values.clustered }} + "-clustered", + "--cluster_node_id", "$(POD_NAME)", + "--cluster_peers", "{{ template "nats-streaming-cluster.peers" . }}", + "--cluster_log_path", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/raft", + {{- if .Values.cluster_raft_logging }} + "--cluster_raft_logging", + {{- end }} + {{- end }} + + {{- if .Values.ftGroup }} + "--ft_group", "{{.Values.ftGroup}}", + {{- end}} + + "--store", "{{ .Values.store }}", + {{- if eq .Values.store "file" }} + {{- if .Values.clustered }} + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/data", + {{- else }} + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/{{ .Release.Name }}-nats-streaming-0/data", + {{- end }} + + {{- if .Values.file.compactEnabled }} + "--file_compact_enabled", + "--file_compact_frag", "{{ .Values.file.compactFrag }}", + "--file_compact_interval", "{{ .Values.file.compactInterval }}", + "--file_compact_min_size", "{{ .Values.file.compactMinSize }}", + {{- end }} + "--file_buffer_size", "{{ .Values.file.bufferSize }}", + {{- if .Values.file.crc }} + "--file_crc", + "--file_crc_poly", "{{ .Values.file.crcPoly }}", + {{- end }} + {{- if .Values.file.sync }} + "--file_sync", + {{- end }} + "--file_slice_max_msgs", "{{ .Values.file.sliceMaxMsgs }}", + "--file_slice_max_bytes", "{{ .Values.file.sliceMaxBytes }}", + "--file_slice_max_age", "{{ .Values.file.sliceMaxAge }}", + {{- if ne .Values.file.sliceArchiveScript "" }} + "--file_slice_archive_script", "{{ .Values.file.sliceArchiveScript }}", + {{- end }} + "--file_fds_limit", "{{ .Values.file.fdsLimit }}", + "--file_parallel_recovery", "{{ .Values.file.parallelRecovery }}", + {{- end}} + {{- if .Values.auth.enabled }} + "--user", "$(USER)", + {{- if .Values.auth.token }} + "--auth", "$(AUTH)", + {{- else }} + "--pass", "$(PASSWORD)", + {{- end }} + {{- end }} + + {{- if .Values.debug }} + "-SD", + {{- end }} + {{- if .Values.trace }} + "-SV", + {{- end }} + ] + ports: + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + volumeMounts: + - name: datadir + mountPath: /nats + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: + {{- range .Values.metrics.args }} + - {{ . }} + {{- end }} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + {{ if .Values.persistence.enabled }} + - name: datadir + persistentVolumeClaim: + claimName: {{ template "nats-streaming.fullname" . }} + {{- else }} + - name: datadir + emptyDir: {} + {{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/values.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/values.yaml new file mode 100644 index 0000000..1c32da9 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats-streaming/values.yaml @@ -0,0 +1,328 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## NATS Streaming image +## +image: + registry: docker.io + repository: nats-streaming + tag: 0.11.2 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS Streaming replicas +replicaCount: 3 + +# +# Streaming server configuration +# + +# Cluster ID (default: test-cluster) +clusterID: "test-cluster" + +# Connect to this external NATS Server URL +natsSvc: "nats://nats:4222" + +# Max number of channels (0 for unlimited) +maxChannels: 100 + +# Max number of subscriptions per channel (0 for unlimited) +maxSubs: 1000 + +# Max number of messages per channel (0 for unlimited) +maxMsgs: "1000000" + +# Max messages total size per channel (0 for unlimited) +maxBytes: "900mb" + +# Max duration a message can be stored ("0s" for unlimited) +maxAge: 0 + +# +# ADVANCED CONFIGURATION: Change these with caution. +# + +# Interval at which server sends heartbeat to a client +hbInterval: 30s + +# How long server waits for a heartbeat response +hbTimeout: 10s + +# Number of failed heartbeats before server closes the client connection +hbFailCount: 330 + +# Use for general debugging. Enabling this will negatively affect performance. +debug: false + +# Do not normally set this as it will drastically decrease performance and generate +# volumous logs. +trace: false + +# Run NATS Streaming in clustered mode (incompatible with ftGroup value) +# https://github.com/nats-io/nats-streaming-server#clustering +clustered: false + +# Use for raft related debugging +cluster_raft_logging: false + +# Run NATS Streaming in fault tolerance mode with this group name (incompatible with clustered value) +# https://github.com/nats-io/nats-streaming-server#fault-tolerance +ftGroup: ~ + +store: "file" + +file: + # Enable compaction + compactEnabled: true + + # Enable file CRC-32 checksum + crc: true + + # Enable File.Sync on Flush + sync: true + + # Store will try to use no more file descriptors than this given limit + fdsLimit: 0 + + ## + ## ADVANCED CONFIGURATION: Change these with caution. + ## + # File buffer size (in bytes) + bufferSize: "2097152" + + # File fragmentation % threshold for compaction + compactFrag: 50 + + # Minimum interval (in seconds) between file compactions, 5min + compactInterval: 300 + + # Minimum file size for compaction + compactMinSize: "1048576" + + # Polynomial used to make the table used for CRC-32 checksum (default is crc32.IEEE) + crcPoly : "3988292384" + + # Maximum number of messages per file slice (subject to channel limits) + sliceMaxMsgs: 0 + + # Maximum file slice size - including index file (subject to channel limits) 64MB + sliceMaxBytes: "67108931" + + # Maximum file slice duration starting when the first message is stored (subject to channel limits) + sliceMaxAge: 0 + + # Path to script to use if you want to archive a file slice being removed + sliceArchiveScript: "" + + # On startup, number of channels that can be recovered in parallel + parallelRecovery: 1 + +persistence: + # If false, emptyDir will be used as a volume. + enabled: false + + ## Persistence volume default size + # size: 10Gi + + ## nats-streaming Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + internalStorageClass: + ## If enabled storage class will be configured as part of the chart. + ## If not enabled an external storageclass can be used by providing storageClassName above. + enabled: false + + ## Storageclass definition + definition: {} + ## Storage classes have a provisioner that determines what volume plugin is used for provisioning PVs. + ## This field must be specified. + ## See https://kubernetes.io/docs/concepts/storage/storage-classes/ + # provisioner: kubernetes.io/no-provisioner + + ## Reclaim policy should normally be set to Retain to avoid loosing data when deleting this helm chart. + # reclaimPolicy: Retain + + ## Persistent Volumes that are dynamically created by a storage class will have the mount options specified + ## in the mountOptions field of the class. + # mountOptions: {} + + ## Storage classes have parameters that describe volumes belonging to the storage class. + ## Different parameters may be accepted depending on the provisioner. + # parameters: {} + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +## NATS Streaming Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + ## Annotations for Promethues metrics + # prometheus.io/scrape: "true" + # prometheus.io/port: "7777" + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/nats-streaming-server#authorization +## +auth: + enabled: false + # user: nats_client + # password: + # token: + # secretName: "{{ .Release.Name }}-nats-secret" + # secretKey: "client-password" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + +## NATS Streaming svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + # TODO: Uncomment these args once synadia/prometheus-nats-exporter adds NATS Streaming support + # - -serverz + # - -storez + # - -clientz + # - -channelz + +## Sidecars +sidecars: + ## e.g. + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/Chart.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/Chart.yaml new file mode 100644 index 0000000..07f566c --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +appVersion: 1.3.0 +description: An open-source, cloud-native messaging system +home: https://nats.io/ +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png +keywords: +- nats +- messaging +- addressing +- discovery +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: nats +sources: +- https://github.com/bitnami/bitnami-docker-nats +version: 2.4.1 diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/README.md b/qliksense/charts/api-keys/charts/messaging/charts/nats/README.md new file mode 100644 index 0000000..368d388 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/README.md @@ -0,0 +1,194 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.users` | Client authentication users | `nil` | +| `auth.users[].user` | Client authentication user | `nil` | +| `auth.users[].password` | Client authentication password | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `OnDelete` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `extraArgs` | Optional flags for NATS | `[]` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `client.service.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `client.service.port` | NATS client port | `4222` | +| `client.service.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `client.service.annotations` | Annotations for NATS client service | {} | +| `client.service.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `cluster.service.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `cluster.service.port` | NATS cluster port | `6222` | +| `cluster.service.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `cluster.service.annotations` | Annotations for NATS cluster service | {} | +| `cluster.service.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `cluster.noAdvertise` | Do not advertise known cluster IPs to clients | `false` | +| `monitoring.service.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | +| `metrics.enabled` | Enable Prometheus metrics via exporter side-car | `false` | +| `metrics.image.registry` | Prometheus metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Prometheus metrics exporter image name | `synadia/prometheus-nats-exporter` | +| `metrics.image.tag` | Prometheus metrics exporter image tag | `0.1.0` | +| `metrics.image.pullPolicy` | Prometheus metrics image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Prometheus metrics image pull secrets | `nil` | +| `metrics.port` | Prometheus metrics exporter port | `7777` | +| `metrics.podAnnotations` | Prometheus metrics exporter annotations | `prometheus.io/scrape: "true"`, `prometheus.io/port: "7777"` | +| `metrics.resources` | Prometheus metrics exporter resource requests/limit | {} | +| `sidecars` | Attach additional containers to the pod | `nil` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Sidecars + +If you have a need for additional containers to run within the same pod as NATS (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` + +## Upgrading + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is nats: + +```console +$ kubectl delete statefulset nats-nats --cascade=false +``` diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/NOTES.txt b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/NOTES.txt new file mode 100644 index 0000000..224df8f --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/NOTES.txt @@ -0,0 +1,88 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.client.service.type "LoadBalancer") (contains .Values.client.service.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "client.service.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.client.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoring.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoring.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "Monitoring URL: http://$SERVICE_IP/" + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoring.service.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "nats.fullname" . }}-monitoring {{ .Values.monitoring.service.port }}:{{ .Values.monitoring.service.port }} +{{- end }} + +2. Access NATS monitoring by opening the URL obtained in a browser. +{{- end }} + +{{- if .Values.metrics.enabled }} + +3. Get the NATS Prometheus Metrics URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +4. Access NATS Prometheus metrics by opening the URL obtained in a browser. +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/_helpers.tpl b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/_helpers.tpl new file mode 100644 index 0000000..ef93757 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Nats image name +*/}} +{{- define "nats.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random alphanumeric password string. +We prepend a random letter to the string to avoid password validation errors +*/}} +{{- define "nats.randomPassword" -}} +{{- randAlpha 1 -}}{{- randAlphaNum 9 -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/client-svc.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/client-svc.yaml new file mode 100644 index 0000000..aacab44 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.client.service.annotations }} + annotations: +{{ toYaml .Values.client.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.client.service.type }} + {{- if and (eq .Values.client.service.type "LoadBalancer") .Values.client.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.client.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.client.service.port }} + targetPort: client + name: client + {{- if and (eq .Values.client.service.type "NodePort") (not (empty .Values.client.service.nodePort)) }} + nodePort: {{ .Values.client.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/cluster-svc.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000..b48c791 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.cluster.service.annotations }} + annotations: +{{ toYaml .Values.cluster.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.cluster.service.type }} + {{- if and (eq .Values.cluster.service.type "LoadBalancer") .Values.cluster.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.cluster.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.cluster.service.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.cluster.service.type "NodePort") (not (empty .Values.cluster.service.nodePort)) }} + nodePort: {{ .Values.cluster.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/configmap.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/configmap.yaml new file mode 100644 index 0000000..072826b --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/configmap.yaml @@ -0,0 +1,114 @@ +{{- $authPwd := default (include "nats.randomPassword" .) .Values.auth.password -}} +{{- $clusterAuthPwd := default (include "nats.randomPassword" .) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.client.service.port }} + http: 0.0.0.0:{{ .Values.monitoring.service.port }} + + {{- if .Values.clientAdvertise }} + client_advertise: {{ tpl (.Values.clientAdvertise) . }} + {{- end }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + + {{- if .Values.auth.users }} + users: [ + {{- range .Values.auth.users }} + { + user: {{ .user | quote }}, + {{- if .password }} + password: {{ .password | quote }}, + {{- end }} + {{- if .permissions }} + permissions: {{ toJson .permissions | replace "\\u003e" ">"}} + {{- end }} + } + {{- end }} + {{- if .Values.auth.monitor.enabled }} + { + user: {{ .Values.auth.monitor.user | quote }}, + password: {{ .Values.auth.monitor.password | quote }}, + permissions: {{ toJson .Values.auth.monitor.permissions | replace "\\u003e" ">"}} + } + {{- end }} + ] + {{- end }} + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.cluster.service.port }} + + no_advertise: {{ .Values.cluster.noAdvertise }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + ] + } + users.json: {{ if .Values.auth.enabled }}{{ toJson .Values.auth.jwtUsers | quote }}{{else}}"[]"{{ end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/headless-svc.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/headless-svc.yaml new file mode 100644 index 0000000..d340551 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/headless-svc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-headless + labels: + app: {{ template "nats.name" . }} + chart: {{ template "nats.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: client + port: 4222 + targetPort: client + - name: cluster + port: 6222 + targetPort: cluster + selector: + app: {{ template "nats.name" . }} + release: {{ .Release.Name | quote }} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/ingress.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/ingress.yaml new file mode 100644 index 0000000..eec9749 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/monitoring-svc.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000..f8f4081 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/networkpolicy.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000..12032f8 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.client.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.cluster.service.port }} + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/statefulset.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/statefulset.yaml new file mode 100644 index 0000000..3c7d71a --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/statefulset.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + serviceName: {{ template "nats.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} +{{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: + checksum/secrets: {{ toYaml .Values.auth.users | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - qnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + {{- range .Values.extraArgs }} + - {{ tpl (.) $ }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.client.service.port }} + - name: cluster + containerPort: {{ .Values.cluster.service.port }} + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: + {{- range .Values.metrics.args }} + - {{ . }} + {{- end }} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/tls-secret.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/tls-secret.yaml new file mode 100644 index 0000000..5acf441 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/charts/nats/values.yaml b/qliksense/charts/api-keys/charts/messaging/charts/nats/values.yaml new file mode 100644 index 0000000..9200bff --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/charts/nats/values.yaml @@ -0,0 +1,306 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.3.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: false + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Client URL to advertise to other servers +## +# clientAdvertise: + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +client: + service: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +cluster: + service: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: + + ## Do not advertise known cluster IPs to clients + ## + noAdvertise: false + +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: + +# Optional additional arguments +extraArgs: [] + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +sidecars: +## Add sidecars to the pod. +## e.g. +# - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/Chart.yaml b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/Chart.yaml new file mode 100644 index 0000000..66a9006 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +description: > + Service that monitors NATS/NATS-Streaming message delivery metrics +name: message-delivery-monitor +version: 0.1.0 +home: https://www.qlik.com +sources: + - https://github.com/qlik-trial/message-delivery-monitor diff --git a/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/README.md b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/README.md new file mode 100644 index 0000000..a770466 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/README.md @@ -0,0 +1,70 @@ +# message-delivery-monitor + +[message-delivery-monitor](https://github.com/qlik-trial/message-delivery-monitor) is responsible for measuring delivery and latency of NATS/NATS-Streaming. + +## Introduction + +This chart bootstraps a message-delivery-monitor deployment on a [Kubernetes](http://kubernetes.io) cluster using the +[Helm](https://helm.sh) package manager. + +## Installing the chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/message-delivery-monitor +``` + +The command deploys message-delivery-monitor on the Kubernetes cluster in the default configuration. +The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the data-engineering-exporter chart and their default values. + +| Parameter | Description | Default | +| -------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------- | +| `image.registry` | Image registry | `qliktech-docker.jfrog.io` | +| `image.repository` | Image repository | `message-delivery-monitor` | +| `image.tag` | Image version | `0.1.0` | +| `image.pullPolicy` | Image pull policy\* | `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `logLevel` | Level of logging | `info` | +| `nats.server` | Nats server address | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats.auth.enabled` | Enabled authentication to NATS | `false` | +| `nats.auth.user` | Username to authenticate to NATS | `nil` | +| `nats.auth.password` | Password to authenticate to NATS | `nil` | +| `nats.auth.secretName` | Read user/passowrd from a this K8s secret with this name | `nil` | +| `nats.auth.secretClientUser` | Key to read from a K8s secret key to retrieve a username | `nil` | +| `nats.auth.secretClientPassword` | Key to read from a K8s secret key to retrieve a password | `nil` | +| `stan.clusterID` | NATS Streaming cluster ID | `{{ .Release.Name }}-nats-streaming-cluster` | +| `stan.monitorChannel` | NATS Streaming channel to monitor on | `monitor-channel` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | message-delivery-monitor listen port | `8080` | +| `metrics.prometheus.enabled` | Whether Prometheus metrics are enabled | `true` | + +(\*) If setting `image.tag` to `latest`, it is recommended to change `image.pullPolicy` to `Always` + +### Setting Parameters + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. +For example, + +```console +helm install --name my-release -f values.yaml qlik/message-delivery-monitor +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/_helper.tpl b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/_helper.tpl new file mode 100644 index 0000000..1ac7ca5 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/_helper.tpl @@ -0,0 +1,47 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "message-delivery-monitor.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. +*/}} +{{- define "message-delivery-monitor.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper message-delivery-monitor image name +*/}} +{{- define "message-delivery-monitor.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/deployment.yaml b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/deployment.yaml new file mode 100644 index 0000000..a48a06f --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/deployment.yaml @@ -0,0 +1,73 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "message-delivery-monitor.fullname" . }} + labels: + app: {{ template "message-delivery-monitor.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} + {{- range $key, $val := .Values.podLabels }} + {{- if tpl ($val) $}} + {{ tpl ($key) $ }}: {{ tpl ($val) $ | quote }} + {{- end }} + {{- end}} + spec: + containers: + - name: {{ template "message-delivery-monitor.name" . }} + image: {{ template "message-delivery-monitor.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- end }} + ports: + - containerPort: {{ .Values.service.port }} + env: + {{- if .Values.nats.auth.enabled }} + - name: NATS_USER + {{- if .Values.nats.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.nats.auth.secretName) . }} + key: {{ .Values.nats.auth.secretClientUser }} + {{- else }} + value: {{ .Values.nats.auth.user }} + {{- end }} + - name: NATS_PASSWORD + {{- if .Values.nats.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.nats.auth.secretName) . }} + key: {{ .Values.nats.auth.secretClientPassword }} + {{- else }} + value: {{ .Values.nats.auth.password }} + {{- end }} + {{- end }} + - name: LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: NATS_ADDR + value: {{ tpl (.Values.nats.server) . | quote }} + - name: NATS_STREAMING_CLUSTER_ID + value: {{ tpl (.Values.stan.clusterID) . | quote }} + - name: NATS_STREAMING_MONITORING_ENDPOINT + value: {{ .Values.stan.monitor_channel | quote }} + livenessProbe: + httpGet: + path: /metrics + port: {{ .Values.service.port }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + selector: + matchLabels: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/service.yaml b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/service.yaml new file mode 100644 index 0000000..b45beeb --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/templates/service.yaml @@ -0,0 +1,24 @@ +{{- if .Values.metrics.prometheus.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "message-delivery-monitor.fullname" . }} + labels: + app: {{ template "message-delivery-monitor.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: {{ template "message-delivery-monitor.name" . }} + selector: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/values.yaml b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/values.yaml new file mode 100644 index 0000000..b0ee34d --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/message-delivery-monitor/values.yaml @@ -0,0 +1,90 @@ +## Default values for the message-delivery-monitor Helm chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## Sets service log level +logLevel: info + +## NATS configuration +## +nats: + ## Comma seperated list of NATS servers + server: "nats://{{ .Release.Name }}-nats-client:4222" + ## For localdev use this configuration instead + # servers: "nats://messaging-nats-client:4222" + + auth: + enabled: false + ## NATS client authentication user + ## user: + + ## password: + + # secretName: "{{ .Release.Name }}-message-delivery-monitor-secret" + # seceretClientUser: "client-user" + # seceretClientPassword: "client-password" + +## NATS Streaming configuration +## +stan: + ## NATS Streaming cluster ID + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + ## For localdev use this configuration instead + # clusterID: "messaging-nats-streaming-cluster" + + ## Channels to send monitoring messages on + monitorChannel: "monitor-channel" + +image: + registry: ghcr.io + repository: qlik-download/message-delivery-monitor + tag: 0.1.1 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: IfNotPresent + +## Secrets for pulling images from a private Docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Number of replicas. +## +replicaCount: 1 + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: + ## Pod label required to allow communication with NATS + "{{ .Release.Name }}-nats-client": "true" + ## Pod label required to allow communication with NATS Streaming Monitoring endpoint + "{{ .Release.Name }}-nats-streaming-admin": "true" + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services +## +service: + type: ClusterIP + port: 8080 + +## Metrics configuration +## +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true diff --git a/qliksense/charts/api-keys/charts/messaging/nats-streaming/Chart.yaml b/qliksense/charts/api-keys/charts/messaging/nats-streaming/Chart.yaml new file mode 100644 index 0000000..af41960 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats-streaming/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +description: A NATS Streaming cluster setup +name: nats-streaming +version: 0.4.0 +appVersion: 0.6.0 +keywords: +- NATS +- Messaging +- publish +- subscribe +- streaming +- cluster +- persistence +home: https://nats.io/ diff --git a/qliksense/charts/api-keys/charts/messaging/nats-streaming/README.md b/qliksense/charts/api-keys/charts/messaging/nats-streaming/README.md new file mode 100644 index 0000000..bbb2ccb --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats-streaming/README.md @@ -0,0 +1,137 @@ +# NATS Streaming Clustering Helm Chart + +Sets up a [NATS](http://nats.io/) Streaming server cluster with Raft based replication. + +## Getting started + +This chart relies on an already available NATS Service to which the +NATS Streaming nodes that will form a clusters can connect to. +You can install the NATS Operator and then use it to create a NATS cluster +via the following: + +```console +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/example-nats-cluster.yaml +``` + +This will create a NATS cluster on the `nats-io` namespace. Then, to +install a NATS Streaming cluster the URL to the NATS cluster can be +specified as follows (using `my-release` for a name label for the +cluster): + +```console +$ git clone https://github.com/wallyqs/nats-streaming-cluster-chart nats-streaming-cluster +$ helm install nats-streaming-cluster -n my-release --set natsUrl=nats://nats.nats-io.svc.cluster.local:4222 +``` + +This will create 3 follower nodes plus an extra Pod which is +configured to be in bootstrapping mode, which will start as the leader +of the Raft group as soon as it joins. + +```console +$ kubectl get pods --namespace default -l "app=nats-streaming-cluster,release=my-release" +NAME READY STATUS RESTARTS AGE +my-release-nats-streaming-cluster-0 1/1 Running 0 30s +my-release-nats-streaming-cluster-1 1/1 Running 0 23s +my-release-nats-streaming-cluster-2 1/1 Running 0 17s +my-release-nats-streaming-cluster-bootstrap 1/1 Running 0 30s +``` + +Note that in case the bootstrapping Pod fails then it will not be +recreated and instead one of the extra follower Pods will take over +the leadership. The follower Pods are part of a Deployment so those +in case of failure they will be recreated. + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | -------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS Streaming image registry | `docker.io` | +| `image.repository` | NATS Streaming Image name | `nats-streaming` | +| `image.tag` | NATS Streaming Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.secretName` | Client authentication secret name | `nil` | +| `auth.secretKey` | Client authentication secret key | `nil` | +| `clusterID` | NATS Streaming Cluster Name ID | `"test-cluster"` | +| `natsSvc` | External NATS Server URL | `"nats://username:password@nats:4222"` | +| `maxChannels` | Max # of channels | `100` | +| `maxSubs` | Max # of subscriptions per channel | `1000` | +| `maxMsgs` | Max # of messages per channel | `"1000000"` | +| `maxBytes` | Max messages total size per channel | `900mb` | +| `maxAge` | Max duration a message can be stored | `"0s" (unlimited)` | +| `maxMsgs` | Max # of messages per channel | `1000000` | +| `debug` | Enable debugging | `false` | +| `trace` | Enable detailed tracing | `false` | +| `clustered` | Run NATS Streaming in clustered mode (incompatible with ftGroup value) | `false` | +| `cluster_raft_logging` | Used for raft related debugging | `false` | +| `ftGroup` | Enable Fault Tolerance mode with this group name (incompatible with clustered value) | `nil` | +| `store` | Storage options (Support values are `memory` and `file`) | `file` | +| `replicaCount` | Number of NATS Streaming nodes | `3` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `RollingUpdate` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `monitoring.service.type` | Kubernetes Service type (NATS Streaming monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS Streaming monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS Streaming monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS Streaming monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS Streaming monitoring service type is `LoadBalancer` | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `sidecars` | Attach additional containers to the pod. | `nil` | + +### File Specific Persistence Configuration + +| Parameter | Description | Default | +| --------------------------------- | ---------------------------------- | ---------------- | +| `file.compactEnabled` | Enable compaction | true | +| `file.bufferSize` | File buffer size (in bytes) | "2097152" (2MB) | +| `file.crc` | Enable file CRC-32 checksum | true | +| `file.sync` | Enable File.Sync on Flush | true | +| `file.fdsLimit` | Max File Descriptor limit (approx) | 0 (unlimited) | + +### Storage Specific Persistence Configuration + +| Parameter | Description | Default | +| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `persistence.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `persistence.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `persistence.size` | Persistence volume size | `nil` | +| `persistence.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `persistence.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +*Additional configuration parameters not typically used may be found in [values.yaml](values.yaml).* diff --git a/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/NOTES.txt b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/NOTES.txt new file mode 100644 index 0000000..cc0eaa0 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/NOTES.txt @@ -0,0 +1,25 @@ +NATS Streaming server has been installed. + +Externally monitor the NATS streaming server by running these commands. + +{{- if contains "NodePort" .Values.monitoring.service.type }} + +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats-streaming-cluster.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + +echo http://$NODE_IP:$NODE_PORT/streaming + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "nats-streaming-cluster.fullname" . }}' +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats-streaming-cluster.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +echo http://$SERVICE_IP:{{ .Values.monitoring.service.port }}/streaming + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + +export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats-streaming-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 8222:{{ .Values.monitoring.service.port }} +echo "Visit http://127.0.0.1:8222/streaming to monitor the NATS streaming server" + +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/_helpers.tpl b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/_helpers.tpl new file mode 100644 index 0000000..69ba8e6 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/_helpers.tpl @@ -0,0 +1,101 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nats-streaming-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats-streaming-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper NATS Streaming image name +*/}} +{{- define "nats-streaming-cluster.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a NATS Streaming cluster peers list string based on fullname, namespace, # of servers +in a format like "host-1,host-2,host-3" +*/}} +{{- define "nats-streaming-cluster.peers" -}} +{{- $name := include "nats-streaming-cluster.fullname" . -}} +{{- $nats_streaming_cluster := dict "peers" (list) -}} +{{- range $idx, $v := until (int .Values.replicaCount) }} +{{- $noop := printf "%s-%d" $name $idx | append $nats_streaming_cluster.peers | set $nats_streaming_cluster "peers" -}} +{{- end }} +{{- printf "%s" (join "," $nats_streaming_cluster.peers) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + + +{{/* Return nats-streaming storage class name */}} +{{- define "nats-streaming.StorageClassName" -}} +{{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) -}} +storageClassName: "" + {{- else -}} +storageClassName: {{ .Values.persistence.storageClass }} + {{- end -}} +{{- else -}} + {{- if .Values.global }} + {{- if .Values.global.persistence }} + {{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) -}} +storageClassName: "" + {{- else -}} +storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/monitoring-svc.yaml b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/monitoring-svc.yaml new file mode 100644 index 0000000..2950f19 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }}-monitoring + labels: + app: {{ template "nats-streaming-cluster.name" . }} + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - name: monitoring + port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/networkpolicy.yaml b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/networkpolicy.yaml new file mode 100644 index 0000000..b642078 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/networkpolicy.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/pvc.yaml b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/pvc.yaml new file mode 100644 index 0000000..dacfc5d --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/pvc.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "nats-streaming.fullname" . }} +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- include "nats-streaming.StorageClassName" . | nindent 2 }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/sc.yaml b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/sc.yaml new file mode 100644 index 0000000..6b08ff8 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/sc.yaml @@ -0,0 +1,7 @@ +{{- if .Values.persistence.internalStorageClass.enabled -}} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.persistence.storageClass }} +{{ toYaml .Values.persistence.internalStorageClass.definition }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/statefulset.yaml b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/statefulset.yaml new file mode 100644 index 0000000..d270f30 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats-streaming/templates/statefulset.yaml @@ -0,0 +1,256 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats-streaming-cluster.name" . }} + image: {{ template "nats-streaming-cluster.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.auth.enabled }} + - name: USER + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientUser }} + {{- else }} + value: {{ .Values.auth.user }} + {{- end }} + {{- if .Values.auth.token }} + - name: AUTH + value: {{ .Values.auth.token }} + {{- else }} + - name: PASSWORD + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientPassword }} + {{- else }} + value: {{ .Values.auth.password }} + {{- end }} + {{- end }} + {{- end }} + args: [ + "-cid", "{{ tpl (.Values.clusterID) . }}", + "-m", "{{ .Values.monitoring.service.port }}", + "-ns", "{{ tpl (.Values.natsSvc) . }}", + "-mc", "{{ .Values.maxChannels }}", + "-msu", "{{ .Values.maxSubs }}", + "-mm", "{{ .Values.maxMsgs }}", + "-mb", "{{ .Values.maxBytes }}", + "-ma", "{{ .Values.maxAge }}", + "-hbi", "{{ .Values.hbInterval }}", + "-hbt", "{{ .Values.hbTimeout }}", + "-hbf", "{{ .Values.hbFailCount }}", + + {{- if .Values.clustered }} + "-clustered", + "--cluster_node_id", "$(POD_NAME)", + "--cluster_peers", "{{ template "nats-streaming-cluster.peers" . }}", + "--cluster_log_path", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/raft", + {{- if .Values.cluster_raft_logging }} + "--cluster_raft_logging", + {{- end }} + {{- end }} + + {{- if .Values.ftGroup }} + "--ft_group", "{{.Values.ftGroup}}", + {{- end}} + + "--store", "{{ .Values.store }}", + {{- if eq .Values.store "file" }} + {{- if .Values.clustered }} + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/data", + {{- else }} + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/{{ .Release.Name }}-nats-streaming-0/data", + {{- end }} + + {{- if .Values.file.compactEnabled }} + "--file_compact_enabled", + "--file_compact_frag", "{{ .Values.file.compactFrag }}", + "--file_compact_interval", "{{ .Values.file.compactInterval }}", + "--file_compact_min_size", "{{ .Values.file.compactMinSize }}", + {{- end }} + "--file_buffer_size", "{{ .Values.file.bufferSize }}", + {{- if .Values.file.crc }} + "--file_crc", + "--file_crc_poly", "{{ .Values.file.crcPoly }}", + {{- end }} + {{- if .Values.file.sync }} + "--file_sync", + {{- end }} + "--file_slice_max_msgs", "{{ .Values.file.sliceMaxMsgs }}", + "--file_slice_max_bytes", "{{ .Values.file.sliceMaxBytes }}", + "--file_slice_max_age", "{{ .Values.file.sliceMaxAge }}", + {{- if ne .Values.file.sliceArchiveScript "" }} + "--file_slice_archive_script", "{{ .Values.file.sliceArchiveScript }}", + {{- end }} + "--file_fds_limit", "{{ .Values.file.fdsLimit }}", + "--file_parallel_recovery", "{{ .Values.file.parallelRecovery }}", + {{- end}} + {{- if .Values.auth.enabled }} + "--user", "$(USER)", + {{- if .Values.auth.token }} + "--auth", "$(AUTH)", + {{- else }} + "--pass", "$(PASSWORD)", + {{- end }} + {{- end }} + + {{- if .Values.debug }} + "-SD", + {{- end }} + {{- if .Values.trace }} + "-SV", + {{- end }} + ] + ports: + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + volumeMounts: + - name: datadir + mountPath: /nats + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: + {{- range .Values.metrics.args }} + - {{ . }} + {{- end }} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + {{ if .Values.persistence.enabled }} + - name: datadir + persistentVolumeClaim: + claimName: {{ template "nats-streaming.fullname" . }} + {{- else }} + - name: datadir + emptyDir: {} + {{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats-streaming/values.yaml b/qliksense/charts/api-keys/charts/messaging/nats-streaming/values.yaml new file mode 100644 index 0000000..1c32da9 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats-streaming/values.yaml @@ -0,0 +1,328 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## NATS Streaming image +## +image: + registry: docker.io + repository: nats-streaming + tag: 0.11.2 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS Streaming replicas +replicaCount: 3 + +# +# Streaming server configuration +# + +# Cluster ID (default: test-cluster) +clusterID: "test-cluster" + +# Connect to this external NATS Server URL +natsSvc: "nats://nats:4222" + +# Max number of channels (0 for unlimited) +maxChannels: 100 + +# Max number of subscriptions per channel (0 for unlimited) +maxSubs: 1000 + +# Max number of messages per channel (0 for unlimited) +maxMsgs: "1000000" + +# Max messages total size per channel (0 for unlimited) +maxBytes: "900mb" + +# Max duration a message can be stored ("0s" for unlimited) +maxAge: 0 + +# +# ADVANCED CONFIGURATION: Change these with caution. +# + +# Interval at which server sends heartbeat to a client +hbInterval: 30s + +# How long server waits for a heartbeat response +hbTimeout: 10s + +# Number of failed heartbeats before server closes the client connection +hbFailCount: 330 + +# Use for general debugging. Enabling this will negatively affect performance. +debug: false + +# Do not normally set this as it will drastically decrease performance and generate +# volumous logs. +trace: false + +# Run NATS Streaming in clustered mode (incompatible with ftGroup value) +# https://github.com/nats-io/nats-streaming-server#clustering +clustered: false + +# Use for raft related debugging +cluster_raft_logging: false + +# Run NATS Streaming in fault tolerance mode with this group name (incompatible with clustered value) +# https://github.com/nats-io/nats-streaming-server#fault-tolerance +ftGroup: ~ + +store: "file" + +file: + # Enable compaction + compactEnabled: true + + # Enable file CRC-32 checksum + crc: true + + # Enable File.Sync on Flush + sync: true + + # Store will try to use no more file descriptors than this given limit + fdsLimit: 0 + + ## + ## ADVANCED CONFIGURATION: Change these with caution. + ## + # File buffer size (in bytes) + bufferSize: "2097152" + + # File fragmentation % threshold for compaction + compactFrag: 50 + + # Minimum interval (in seconds) between file compactions, 5min + compactInterval: 300 + + # Minimum file size for compaction + compactMinSize: "1048576" + + # Polynomial used to make the table used for CRC-32 checksum (default is crc32.IEEE) + crcPoly : "3988292384" + + # Maximum number of messages per file slice (subject to channel limits) + sliceMaxMsgs: 0 + + # Maximum file slice size - including index file (subject to channel limits) 64MB + sliceMaxBytes: "67108931" + + # Maximum file slice duration starting when the first message is stored (subject to channel limits) + sliceMaxAge: 0 + + # Path to script to use if you want to archive a file slice being removed + sliceArchiveScript: "" + + # On startup, number of channels that can be recovered in parallel + parallelRecovery: 1 + +persistence: + # If false, emptyDir will be used as a volume. + enabled: false + + ## Persistence volume default size + # size: 10Gi + + ## nats-streaming Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + internalStorageClass: + ## If enabled storage class will be configured as part of the chart. + ## If not enabled an external storageclass can be used by providing storageClassName above. + enabled: false + + ## Storageclass definition + definition: {} + ## Storage classes have a provisioner that determines what volume plugin is used for provisioning PVs. + ## This field must be specified. + ## See https://kubernetes.io/docs/concepts/storage/storage-classes/ + # provisioner: kubernetes.io/no-provisioner + + ## Reclaim policy should normally be set to Retain to avoid loosing data when deleting this helm chart. + # reclaimPolicy: Retain + + ## Persistent Volumes that are dynamically created by a storage class will have the mount options specified + ## in the mountOptions field of the class. + # mountOptions: {} + + ## Storage classes have parameters that describe volumes belonging to the storage class. + ## Different parameters may be accepted depending on the provisioner. + # parameters: {} + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +## NATS Streaming Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + ## Annotations for Promethues metrics + # prometheus.io/scrape: "true" + # prometheus.io/port: "7777" + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/nats-streaming-server#authorization +## +auth: + enabled: false + # user: nats_client + # password: + # token: + # secretName: "{{ .Release.Name }}-nats-secret" + # secretKey: "client-password" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + +## NATS Streaming svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + # TODO: Uncomment these args once synadia/prometheus-nats-exporter adds NATS Streaming support + # - -serverz + # - -storez + # - -clientz + # - -channelz + +## Sidecars +sidecars: + ## e.g. + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/api-keys/charts/messaging/nats/Chart.yaml b/qliksense/charts/api-keys/charts/messaging/nats/Chart.yaml new file mode 100644 index 0000000..f8596f4 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/Chart.yaml @@ -0,0 +1,17 @@ +name: nats +version: 2.4.1 +appVersion: 1.3.0 +description: An open-source, cloud-native messaging system +keywords: +- nats +- messaging +- addressing +- discovery +home: https://nats.io/ +sources: +- https://github.com/bitnami/bitnami-docker-nats +maintainers: +- name: Bitnami + email: containers@bitnami.com +engine: gotpl +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png diff --git a/qliksense/charts/api-keys/charts/messaging/nats/README.md b/qliksense/charts/api-keys/charts/messaging/nats/README.md new file mode 100644 index 0000000..368d388 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/README.md @@ -0,0 +1,194 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.users` | Client authentication users | `nil` | +| `auth.users[].user` | Client authentication user | `nil` | +| `auth.users[].password` | Client authentication password | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `OnDelete` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `extraArgs` | Optional flags for NATS | `[]` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `client.service.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `client.service.port` | NATS client port | `4222` | +| `client.service.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `client.service.annotations` | Annotations for NATS client service | {} | +| `client.service.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `cluster.service.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `cluster.service.port` | NATS cluster port | `6222` | +| `cluster.service.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `cluster.service.annotations` | Annotations for NATS cluster service | {} | +| `cluster.service.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `cluster.noAdvertise` | Do not advertise known cluster IPs to clients | `false` | +| `monitoring.service.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | +| `metrics.enabled` | Enable Prometheus metrics via exporter side-car | `false` | +| `metrics.image.registry` | Prometheus metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Prometheus metrics exporter image name | `synadia/prometheus-nats-exporter` | +| `metrics.image.tag` | Prometheus metrics exporter image tag | `0.1.0` | +| `metrics.image.pullPolicy` | Prometheus metrics image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Prometheus metrics image pull secrets | `nil` | +| `metrics.port` | Prometheus metrics exporter port | `7777` | +| `metrics.podAnnotations` | Prometheus metrics exporter annotations | `prometheus.io/scrape: "true"`, `prometheus.io/port: "7777"` | +| `metrics.resources` | Prometheus metrics exporter resource requests/limit | {} | +| `sidecars` | Attach additional containers to the pod | `nil` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Sidecars + +If you have a need for additional containers to run within the same pod as NATS (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` + +## Upgrading + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is nats: + +```console +$ kubectl delete statefulset nats-nats --cascade=false +``` diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/NOTES.txt b/qliksense/charts/api-keys/charts/messaging/nats/templates/NOTES.txt new file mode 100644 index 0000000..224df8f --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/NOTES.txt @@ -0,0 +1,88 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.client.service.type "LoadBalancer") (contains .Values.client.service.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "client.service.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.client.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoring.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoring.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "Monitoring URL: http://$SERVICE_IP/" + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoring.service.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "nats.fullname" . }}-monitoring {{ .Values.monitoring.service.port }}:{{ .Values.monitoring.service.port }} +{{- end }} + +2. Access NATS monitoring by opening the URL obtained in a browser. +{{- end }} + +{{- if .Values.metrics.enabled }} + +3. Get the NATS Prometheus Metrics URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +4. Access NATS Prometheus metrics by opening the URL obtained in a browser. +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/_helpers.tpl b/qliksense/charts/api-keys/charts/messaging/nats/templates/_helpers.tpl new file mode 100644 index 0000000..ef93757 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Nats image name +*/}} +{{- define "nats.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random alphanumeric password string. +We prepend a random letter to the string to avoid password validation errors +*/}} +{{- define "nats.randomPassword" -}} +{{- randAlpha 1 -}}{{- randAlphaNum 9 -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/client-svc.yaml b/qliksense/charts/api-keys/charts/messaging/nats/templates/client-svc.yaml new file mode 100644 index 0000000..aacab44 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.client.service.annotations }} + annotations: +{{ toYaml .Values.client.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.client.service.type }} + {{- if and (eq .Values.client.service.type "LoadBalancer") .Values.client.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.client.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.client.service.port }} + targetPort: client + name: client + {{- if and (eq .Values.client.service.type "NodePort") (not (empty .Values.client.service.nodePort)) }} + nodePort: {{ .Values.client.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/cluster-svc.yaml b/qliksense/charts/api-keys/charts/messaging/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000..b48c791 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.cluster.service.annotations }} + annotations: +{{ toYaml .Values.cluster.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.cluster.service.type }} + {{- if and (eq .Values.cluster.service.type "LoadBalancer") .Values.cluster.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.cluster.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.cluster.service.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.cluster.service.type "NodePort") (not (empty .Values.cluster.service.nodePort)) }} + nodePort: {{ .Values.cluster.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/configmap.yaml b/qliksense/charts/api-keys/charts/messaging/nats/templates/configmap.yaml new file mode 100644 index 0000000..072826b --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/configmap.yaml @@ -0,0 +1,114 @@ +{{- $authPwd := default (include "nats.randomPassword" .) .Values.auth.password -}} +{{- $clusterAuthPwd := default (include "nats.randomPassword" .) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.client.service.port }} + http: 0.0.0.0:{{ .Values.monitoring.service.port }} + + {{- if .Values.clientAdvertise }} + client_advertise: {{ tpl (.Values.clientAdvertise) . }} + {{- end }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + + {{- if .Values.auth.users }} + users: [ + {{- range .Values.auth.users }} + { + user: {{ .user | quote }}, + {{- if .password }} + password: {{ .password | quote }}, + {{- end }} + {{- if .permissions }} + permissions: {{ toJson .permissions | replace "\\u003e" ">"}} + {{- end }} + } + {{- end }} + {{- if .Values.auth.monitor.enabled }} + { + user: {{ .Values.auth.monitor.user | quote }}, + password: {{ .Values.auth.monitor.password | quote }}, + permissions: {{ toJson .Values.auth.monitor.permissions | replace "\\u003e" ">"}} + } + {{- end }} + ] + {{- end }} + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.cluster.service.port }} + + no_advertise: {{ .Values.cluster.noAdvertise }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + ] + } + users.json: {{ if .Values.auth.enabled }}{{ toJson .Values.auth.jwtUsers | quote }}{{else}}"[]"{{ end }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/headless-svc.yaml b/qliksense/charts/api-keys/charts/messaging/nats/templates/headless-svc.yaml new file mode 100644 index 0000000..d340551 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/headless-svc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-headless + labels: + app: {{ template "nats.name" . }} + chart: {{ template "nats.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: client + port: 4222 + targetPort: client + - name: cluster + port: 6222 + targetPort: cluster + selector: + app: {{ template "nats.name" . }} + release: {{ .Release.Name | quote }} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/ingress.yaml b/qliksense/charts/api-keys/charts/messaging/nats/templates/ingress.yaml new file mode 100644 index 0000000..eec9749 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/monitoring-svc.yaml b/qliksense/charts/api-keys/charts/messaging/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000..f8f4081 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/networkpolicy.yaml b/qliksense/charts/api-keys/charts/messaging/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000..12032f8 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.client.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.cluster.service.port }} + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/statefulset.yaml b/qliksense/charts/api-keys/charts/messaging/nats/templates/statefulset.yaml new file mode 100644 index 0000000..3c7d71a --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/statefulset.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + serviceName: {{ template "nats.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} +{{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: + checksum/secrets: {{ toYaml .Values.auth.users | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - qnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + {{- range .Values.extraArgs }} + - {{ tpl (.) $ }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.client.service.port }} + - name: cluster + containerPort: {{ .Values.cluster.service.port }} + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: + {{- range .Values.metrics.args }} + - {{ . }} + {{- end }} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats/templates/tls-secret.yaml b/qliksense/charts/api-keys/charts/messaging/nats/templates/tls-secret.yaml new file mode 100644 index 0000000..5acf441 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/nats/values.yaml b/qliksense/charts/api-keys/charts/messaging/nats/values.yaml new file mode 100644 index 0000000..9200bff --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/nats/values.yaml @@ -0,0 +1,306 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.3.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: false + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Client URL to advertise to other servers +## +# clientAdvertise: + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +client: + service: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +cluster: + service: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: + + ## Do not advertise known cluster IPs to clients + ## + noAdvertise: false + +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: + +# Optional additional arguments +extraArgs: [] + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +sidecars: +## Add sidecars to the pod. +## e.g. +# - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/api-keys/charts/messaging/requirements.yaml b/qliksense/charts/api-keys/charts/messaging/requirements.yaml new file mode 100644 index 0000000..87ba686 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/requirements.yaml @@ -0,0 +1,19 @@ +dependencies: + - name: message-delivery-monitor + version: 0.1.0 + repository: "file://./message-delivery-monitor" + # message-delivery-monitor.monitor.enabled is used by services that depend on the messaging chart to enable or disable nats + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.message-delivery-monitor.enabled,message-delivery-monitor.enabled + - name: nats + version: 2.4.1 + repository: "file://./nats" + # messaging.nats.enabled is used by services that depend on the messaging chart to enable or disable nats + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.nats.enabled,nats.enabled + - name: nats-streaming + version: 0.4.0 + repository: "file://./nats-streaming" + # messaging.nats-streaming.enabled is used by services that depend on the messaging chart to enable or disable nats streaming + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.nats-streaming.enabled,nats-streaming.enabled diff --git a/qliksense/charts/api-keys/charts/messaging/templates/_helper.tpl b/qliksense/charts/api-keys/charts/messaging/templates/_helper.tpl new file mode 100644 index 0000000..d03e4d7 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/templates/_helper.tpl @@ -0,0 +1,38 @@ +{{- define "messaging.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "messaging.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "messaging.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.name" -}} +{{- "nats" -}} +{{- end -}} + +{{- define "nats.fullname" -}} +{{- $name := "nats" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming.name" -}} +{{- "nats-streaming" -}} +{{- end -}} + +{{- define "nats-streaming.fullname" -}} +{{- $name := "nats-streaming" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/messaging/templates/message-delivery-monitor-secret.yaml b/qliksense/charts/api-keys/charts/messaging/templates/message-delivery-monitor-secret.yaml new file mode 100644 index 0000000..f17e47d --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/templates/message-delivery-monitor-secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ .Release.Name }}-message-delivery-monitor-secret +data: + {{ if .Values.nats.auth.monitor.enabled }} + client-user: {{ print .Values.nats.auth.monitor.user | b64enc }} + client-password: {{ print .Values.nats.auth.monitor.password | b64enc }} + {{- end -}} diff --git a/qliksense/charts/api-keys/charts/messaging/templates/nats-secret.yaml b/qliksense/charts/api-keys/charts/messaging/templates/nats-secret.yaml new file mode 100644 index 0000000..92ffbe4 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/templates/nats-secret.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ .Release.Name }}-nats-secret +data: + {{ if .Values.nats.auth.enabled }} + {{ if .Values.nats.auth.user }} + client-user: {{ print .Values.nats.auth.user | b64enc }} + client-password: {{ print .Values.nats.auth.password | b64enc }} + {{ else if .Values.nats.auth.users }} + client-user: {{ print (index .Values.nats.auth.users 0).user | b64enc }} + client-password: {{ print (index .Values.nats.auth.users 0).password | b64enc }} + {{- end -}} + {{- end -}} diff --git a/qliksense/charts/api-keys/charts/messaging/templates/networkpolicy-nats-streaming.yaml b/qliksense/charts/api-keys/charts/messaging/templates/networkpolicy-nats-streaming.yaml new file mode 100644 index 0000000..cd855c0 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/templates/networkpolicy-nats-streaming.yaml @@ -0,0 +1,51 @@ +{{- if and (index .Values "nats-streaming" "enabled") (index .Values "networkPolicy" "nats-streaming" "enabled") }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "{{ template "nats-streaming.fullname" . }}" + labels: + chart: "{{ template "messaging.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + - ports: + - port: {{ index .Values "nats-streaming" "monitoring" "service" "port" }} + from: + - podSelector: + matchLabels: + {{ template "nats-streaming.fullname" . }}-admin: "true" + - ports: + - port: {{ index .Values "nats-streaming" "metrics" "port" }} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/templates/networkpolicy-nats.yaml b/qliksense/charts/api-keys/charts/messaging/templates/networkpolicy-nats.yaml new file mode 100644 index 0000000..df645c6 --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/templates/networkpolicy-nats.yaml @@ -0,0 +1,51 @@ +{{- if and (.Values.nats.enabled) (.Values.networkPolicy.nats.enabled) }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "{{ template "nats.fullname" . }}" + labels: + chart: "{{ template "messaging.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + - ports: + - port: {{ .Values.nats.client.service.port }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + - ports: + - port: {{ .Values.nats.metrics.port }} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "keys" + release: {{ tpl ( .Values.networkPolicy.keys.release ) . | quote }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/messaging/values.yaml b/qliksense/charts/api-keys/charts/messaging/values.yaml new file mode 100644 index 0000000..3cf8fbd --- /dev/null +++ b/qliksense/charts/api-keys/charts/messaging/values.yaml @@ -0,0 +1,474 @@ +## Default values for the messaging Helm chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## NATS configuration +## +nats: + ## Enables NATS chart by default + enabled: true + + securityContext: + enabled: false + + ## Image pull policy for NATS chart + image: + registry: ghcr.io + repository: qlik-download/qnatsd + tag: 0.3.1 + pullPolicy: IfNotPresent + pullSecrets: + - name: artifactory-docker-secret + + ## Number of NATS nodes + replicaCount: 1 + + ## NATS statefulset configurations + statefulset: + updateStrategy: RollingUpdate + + ## NATS svc used for client connections + ## ref: https://github.com/nats-io/gnatsd#running + ## + client: + service: + type: ClusterIP + port: 4222 + + clientAdvertise: "{{.Release.Name}}-nats-client:4222" + + ## Kubernetes svc used for clustering + ## ref: https://github.com/nats-io/gnatsd#clustering + ## + cluster: + service: + type: ClusterIP + port: 6222 + # noAdvertise: false + + ## NATS svc used for monitoring + ## ref: https://github.com/nats-io/gnatsd#monitoring + ## + monitoring: + service: + type: ClusterIP + port: 8222 + ## Annotations for Promethues + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + + ## Client Authentication + auth: + enabled: true + + monitor: + enabled: false + user: "delivery-monitor" + password: password + permissions: + publish: + - "monitor-channel" + - "_STAN.pub.*.monitor-channel" + - "_STAN.discover.>" + - "_STAN.close.>" + - "_STAN.discover.*.ping" + - "_STAN.sub.>" + - "_STAN.unsub.>" + - "_STAN.subclose.>" + - "_INBOX.>" + subscribe: + - "monitor-channel" + - "_STAN.acks.>" + - "_INBOX.>" + + users: + - user: "nats_client" + password: T0pS3cr3t + + ## Configuration of users that are authenticated used JWTs + ## Users can be configured with permissions to allow or deny publish/subscribe access to subjects + ## ref: https://nats.io/documentation/managing_the_server/authorization/ + ## + jwtUsers: + - user: "audit" + stanPermissions: + subscribe: + - "system-events.odag.request" + - "system-events.engine.app" + - "system-events.user-session" + - "system-events.spaces" + - "system-events.licenses" + - "system-events.generic-links" + - "system-events.api-keys" + - "system-events.user-identity" + - "system-events.web-security" + - user: "chronos-worker" + stanPermissions: + publish: + - "chronos-worker.>" + - user: "data-engineering-exporter" + stanPermissions: + subscribe: + - "system-events.>" + - user: "edge-auth" + stanPermissions: + publish: + - "system-events.user-session" + - "system-events.user-identity" + subscribe: + - "system-events.users" + - "system-events.user-session" + - "system-events.identity-providers" + - "private.idp-sync" + - user: "engine" + stanPermissions: + publish: + - "com.qlik.app" + - "com.qlik.engine.session" + - "system-events.engine.app" + - "system-events.engine.session" + - user: "identity-providers" + stanPermissions: + publish: + - "private.idp-sync" + - "system-events.identity-providers" + - user: "invite" + stanPermissions: + subscribe: + - "system-events.users" + publish: + - "system-events.invite" + - user: "nl-parser" + stanPermissions: + subscribe: + - "system-events.engine.app" + - user: "odag" + stanPermissions: + publish: + - "system-events.odag.request" + - "odag.>" + subscribe: + - "odag.>" + - "system-events.engine.app" + - "system-events.reloadResults" + - user: "qix-data-reload" + stanPermissions: + publish: + - "reload" + - "system-events.reloadResults" + subscribe: + - "reload" + - user: "resource-library" + stanPermissions: + publish: + - "system-events.resource-library" + - user: "tenants" + stanPermissions: + publish: + - "system-events.tenants" + - "system-events.web-integrations" + - user: "users" + stanPermissions: + publish: + - "system-events.users" + - user: "api-keys" + stanPermissions: + publish: + - "system-events.api-keys" + - user: "collections" + stanPermissions: + publish: + - "system-events.items" + subscribe: + - "system-events.engine.app" + - user: "licenses" + stanPermissions: + publish: + - "system-events.licenses" + subscribe: + - "system-events.licenses" + - user: "spaces" + stanPermissions: + publish: + - "system-events.spaces" + subscribe: + - "system-events.tenants" + - user: "precedents" + stanPermissions: + subscribe: + - "system-events.engine.app" + - user: "eventing" + stanPermissions: + publish: + - "system-events.notification-request" + subscribe: + - "system-events.engine.app" + - user: "qix-sessions" + stanPermissions: + subscribe: + - "system-events.engine.app" + - "system-events.reloadResults" + - user: "qix-datafiles" + stanPermissions: + subscribe: + - "system-events.engine.app" + - "system-events.spaces" + - user: "sharing" + stanPermissions: + subscribe: + - "system-events.engine.app" + publish: + - "system-events.notification-request" + - "system-events.sharing" + - user: "subscriptions" + stanPermissions: + subscribe: + - "system-events.engine.app" + - user: "notification-prep" + stanPermissions: + subscribe: + - "system-events.notification-request" + publish: + - "system-events.transport-request" + - user: "web-notifications" + stanPermissions: + subscribe: + - "system-events.transport-request" + publish: + - "system-events.web-notifications" + - user: "generic-links" + stanPermissions: + publish: + - "system-events.generic-links" + - user: "data-connections" + stanPermissions: + subscribe: + - "system-events.spaces" + - user: "transport" + stanPermissions: + subscribe: + - "system-events.transport-request" + publish: + - "system-events.transport-response" + - user: "web-security" + stanPermissions: + publish: + - "system-events.web-security" + - user: "reload-tasks" + stanPermissions: + subscribe: + - "system-events.engine.app" + - user: "reporting" + stanPermissions: + publish: + - "reporting.>" + + extraArgs: + - --jwt_users_file=/opt/bitnami/nats/users.json + - --jwt_auth_url=http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal + ## for localdev use this configuration instead + # - --jwt_auth_url=http://keys:8080/v1/keys/qlik.api.internal + + ## Cluster Authentication + clusterAuth: + enabled: false + + ## Metrics / Prometheus NATS Exporter + ## + ## ref: https://github.com/nats-io/prometheus-nats-exporter + metrics: + enabled: true + image: + registry: ghcr.io + repository: qlik-download/prometheus-nats-exporter + tag: 0.3.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-docker-secret + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "false" # prometheus annotations are added on the monitoring service instead + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +## NATS Streaming configuration +## +nats-streaming: + enabled: true + + securityContext: + enabled: false + + ## NATS Streaming image + image: + registry: ghcr.io + repository: qlik-download/nats-streaming + tag: 0.14.2 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-docker-secret + + ## NATS Streaming replicas + replicaCount: 3 + + ## NATS Streaming statefulset configurations + # statefulset: + # updateStrategy: RollingUpdate + + ## NATS Streaming extra options for liveness and readiness probes + readinessProbe: + enabled: true + initialDelaySeconds: 30 + + ## NATS Streaming svc used for monitoring + ## + monitoring: + service: + type: ClusterIP + port: 8222 + # Annotations for Promethues + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + + ## NATS Streaming cluster id + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + + ## NATS server + natsSvc: "nats://{{ .Release.Name }}-nats-client:4222" + + ## NATS server client Authentication + auth: + enabled: true + secretName: "{{ .Release.Name }}-nats-secret" + secretClientUser: "client-user" + secretClientPassword: "client-password" + + ## Use for general debugging. Enabling this will negatively affect performance. + debug: true + + # Interval at which server sends heartbeat to a client + hbInterval: 10s + + # How long server waits for a heartbeat response + hbTimeout: 10s + + # Number of failed heartbeats before server closes the client connection + hbFailCount: 5 + + # Run NATS Streaming in clustered mode (incompatible with ftGroup value) + # https://github.com/nats-io/nats-streaming-server#clustering + clustered: true + + # Run NATS Streaming in fault tolerance mode with this group name (incompatible with clustered value) + # https://github.com/nats-io/nats-streaming-server#fault-tolerance + # ftGroup: "myGroupName" + + persistence: + ## If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + internalStorageClass: + ## Normally the storage class should be created outside this helm chart + ## If we want to deploy a storage class as part of the helm chart + ## - Provide a storageClassName above. + ## - set enabled true + ## - provide a storage class definition. + + ## If enabled storage class will be configured as part of the chart. + ## If not enabled an external storageclass can be used by providing storageClassName above. + enabled: false + + ## Storageclass definition + definition: {} + ## Storage classes have a provisioner that determines what volume plugin is used for provisioning PVs. + ## This field must be specified. + ## See https://kubernetes.io/docs/concepts/storage/storage-classes/ + # provisioner: kubernetes.io/no-provisioner + + ## Reclaim policy should normally be set to Retain to avoid loosing data when deleting this helm chart. + # reclaimPolicy: Retain + + ## Persistent Volumes that are dynamically created by a storage class will have the mount options specified + ## in the mountOptions field of the class. + # mountOptions: {} + + ## Storage classes have parameters that describe volumes belonging to the storage class. + ## Different parameters may be accepted depending on the provisioner. + # parameters: {} + maxAge: "2h" + + ## Metrics / Prometheus NATS Exporter + ## + ## ref: https://github.com/nats-io/prometheus-nats-exporter + metrics: + enabled: true + image: + registry: ghcr.io + repository: qlik-download/prometheus-nats-exporter + tag: 0.3.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-docker-secret + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "false" # prometheus annotations are added on the monitoring service instead + ## Metrics exporter flags + args: + - -channelz + - -serverz + + +## NATS and NATS Streaming Network Policy +## +networkPolicy: + ## NATS + nats: + enabled: false + ## NATS Streaminng + nats-streaming: + enabled: false + ## Keys + keys: + ## Set keys release name for egress rules + release: "{{ .Release.Name }}" + +message-delivery-monitor: + enabled: false + + nats: + auth: + enabled: true + secretName: "{{ .Release.Name }}-message-delivery-monitor-secret" + secretClientUser: "client-user" + secretClientPassword: "client-password" diff --git a/qliksense/charts/api-keys/charts/mongodb/.helmignore b/qliksense/charts/api-keys/charts/mongodb/.helmignore new file mode 100644 index 0000000..6b8710a --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/.helmignore @@ -0,0 +1 @@ +.git diff --git a/qliksense/charts/api-keys/charts/mongodb/Chart.yaml b/qliksense/charts/api-keys/charts/mongodb/Chart.yaml new file mode 100644 index 0000000..cc8038a --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 4.0.3 +description: NoSQL document-oriented database that stores JSON-like documents with + dynamic schemas, simplifying the integration of data in content-driven applications. +home: https://mongodb.org +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb +sources: +- https://github.com/bitnami/bitnami-docker-mongodb +version: 4.5.0 diff --git a/qliksense/charts/api-keys/charts/mongodb/OWNERS b/qliksense/charts/api-keys/charts/mongodb/OWNERS new file mode 100644 index 0000000..2c3e9fa --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/OWNERS @@ -0,0 +1,12 @@ +approvers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 +reviewers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 diff --git a/qliksense/charts/api-keys/charts/mongodb/README.md b/qliksense/charts/api-keys/charts/mongodb/README.md new file mode 100644 index 0000000..1b9d003 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/README.md @@ -0,0 +1,158 @@ +# MongoDB + +[MongoDB](https://www.mongodb.com/) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +## TL;DR; + +```bash +$ helm install stable/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/mongodb +``` + +The command deploys MongoDB on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the MongoDB chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------|----------------------------------------------------------------------------------------------|----------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | MongoDB image registry | `docker.io` | +| `image.repository` | MongoDB Image name | `bitnami/mongodb` | +| `image.tag` | MongoDB Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `usePassword` | Enable password authentication | `true` | +| `existingSecret` | Existing secret with MongoDB credentials | `nil` | +| `mongodbRootPassword` | MongoDB admin password | `random alhpanumeric string (10)` | +| `mongodbUsername` | MongoDB custom user | `nil` | +| `mongodbPassword` | MongoDB custom user password | `random alhpanumeric string (10)` | +| `mongodbDatabase` | Database to create | `nil` | +| `mongodbEnableIPv6` | Switch to enable/disable IPv6 on MongoDB | `true` | +| `mongodbExtraFlags` | MongoDB additional command line flags | [] | +| `service.annotations` | Kubernetes service annotations | `{}` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.nodePort` | Port to bind to for NodePort service type | `nil` | +| `port` | MongoDB service port | `27017` | +| `replicaSet.enabled` | Switch to enable/disable replica set configuration | `false` | +| `replicaSet.name` | Name of the replica set | `rs0` | +| `replicaSet.useHostnames` | Enable DNS hostnames in the replica set config | `true` | +| `replicaSet.key` | Key used for authentication in the replica set | `nil` | +| `replicaSet.replicas.secondary` | Number of secondary nodes in the replica set | `1` | +| `replicaSet.replicas.arbiter` | Number of arbiter nodes in the replica set | `1` | +| `replicaSet.pdb.minAvailable.primary` | PDB for the MongoDB Primary nodes | `1` | +| `replicaSet.pdb.minAvailable.secondary` | PDB for the MongoDB Secondary nodes | `1` | +| `replicaSet.pdb.minAvailable.arbiter` | PDB for the MongoDB Arbiter nodes | `1` | +| `podAnnotations` | Annotations to be added to pods | {} | +| `resources` | Pod resources | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | {} | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.annotations` | Persistent Volume annotations | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (avoids creating one if this is given) | `nil` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `configmap` | MongoDB configuration file to be used | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set mongodbRootPassword=secretpassword,mongodbUsername=my-user,mongodbPassword=my-password,mongodbDatabase=my-database \ + stable/mongodb +``` + +The above command sets the MongoDB `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/mongodb +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Replication + +You can start the MongoDB chart in replica set mode with the following command: + +```bash +$ helm install --name my-release stable/mongodb --set replication.enabled=true +``` +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available MongoDB deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/mongodb/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/mongodb +``` + +To horizontally scale this chart, run the following command to scale the number of secondary nodes in your MongoDB replica set. + +```console +$ kubectl scale statefulset my-release-mongodb-secondary --replicas=3 +``` + +Some characteristics of this chart are: + +* Each of the participants in the replication has a fixed stateful set so you always know where to find the primary, secondary or arbiter nodes. +* The number of secondary and arbiter nodes can be scaled out independently. +* Easy to move an application from using a standalone MongoDB server to use a replica set. + +## Initialize a fresh instance + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +The allowed extensions are `.sh`, and `.js`. + +## Persistence + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. diff --git a/qliksense/charts/api-keys/charts/mongodb/files/docker-entrypoint-initdb.d/README.md b/qliksense/charts/api-keys/charts/mongodb/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..a929990 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom .sh, or .js file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-mongodb](https://github.com/bitnami/bitnami-docker-mongodb#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/NOTES.txt b/qliksense/charts/api-keys/charts/mongodb/templates/NOTES.txt new file mode 100644 index 0000000..af81001 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/NOTES.txt @@ -0,0 +1,66 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.mongodbRootPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer" and not specifying "mongodbRootPassword" + you have most likely exposed the MongoDB service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "mongodbRootPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB can be accessed via port 27017 on the following DNS name from within your cluster: + + {{ template "mongodb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.usePassword -}} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- end }} +{{- if and .Values.mongodbUsername .Values.mongodbDatabase }} +{{- if .Values.mongodbPassword }} + +To get the password for "{{ .Values.mongodbUsername }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} +{{- end }} + +To connect to your database run the following command: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --image bitnami/mongodb --command -- mongo admin --host {{ template "mongodb.fullname" . }} {{- if .Values.usePassword }} -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.fullname" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "mongodb.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + mongo --host $SERVICE_IP --port {{ .Values.service.nodePort }} {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "mongodb.fullname" . }} 27017:27017 & + mongo --host 127.0.0.1 {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/_helpers.tpl b/qliksense/charts/api-keys/charts/mongodb/templates/_helpers.tpl new file mode 100644 index 0000000..855dc29 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "mongodb.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name for the admin secret. +*/}} +{{- define "mongodb.adminSecret" -}} + {{- if .Values.auth.existingAdminSecret -}} + {{- .Values.auth.existingAdminSecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-admin + {{- end -}} +{{- end -}} + +{{/* +Create the name for the key secret. +*/}} +{{- define "mongodb.keySecret" -}} + {{- if .Values.auth.existingKeySecret -}} + {{- .Values.auth.existingKeySecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-keyfile + {{- end -}} +{{- end -}} + +{{/* +Return the proper MongoDB image name +*/}} +{{- define "mongodb.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/configmap.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/configmap.yaml new file mode 100644 index 0000000..66dc853 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +data: + mongodb.conf: |- +{{ toYaml .Values.configmap | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/deployment-standalone.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/deployment-standalone.yaml new file mode 100644 index 0000000..d8ff01b --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/deployment-standalone.yaml @@ -0,0 +1,143 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + chart: {{ template "mongodb.chart" . }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.fullname" . }} + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + {{- if .Values.usePassword }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + {{- end }} + - name: MONGODB_USERNAME + value: {{ default "" .Values.mongodbUsername | quote }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_DATABASE + value: {{ default "" .Values.mongodbDatabase | quote }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + ports: + - name: mongodb + containerPort: 27017 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + + {{- else }} + emptyDir: {} + {{- end -}} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- end -}} diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/headless-svc-rs.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/headless-svc-rs.yaml new file mode 100644 index 0000000..29fcf34 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/headless-svc-rs.yaml @@ -0,0 +1,24 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }}-headless + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: {{ .Values.service.port }} + selector: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/initialization-configmap.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/initialization-configmap.yaml new file mode 100644 index 0000000..840e77c --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/initialization-configmap.yaml @@ -0,0 +1,13 @@ +{{ if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mongodb.fullname" . }}-init-scripts + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]").AsConfig | indent 2 }} +{{ end }} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml new file mode 100644 index 0000000..eb7f14a --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: arbiter + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.arbiter }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml new file mode 100644 index 0000000..6434e3f --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: primary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.primary }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml new file mode 100644 index 0000000..03f317d --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: secondary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.secondary }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/pvc-standalone.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/pvc-standalone.yaml new file mode 100644 index 0000000..8182ce7 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/pvc-standalone.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.replicaSet.enabled) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/secrets.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/secrets.yaml new file mode 100644 index 0000000..ecbf1eb --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/secrets.yaml @@ -0,0 +1,34 @@ +{{ if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{- if .Values.usePassword }} + {{- if .Values.mongodbRootPassword }} + mongodb-root-password: {{ .Values.mongodbRootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + {{- if .Values.mongodbPassword }} + mongodb-password: {{ .Values.mongodbPassword | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if .Values.replicaSet.enabled }} + {{- if .Values.replicaSet.key }} + mongodb-replica-set-key: {{ .Values.replicaSet.key | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/statefulset-arbiter-rs.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/statefulset-arbiter-rs.yaml new file mode 100644 index 0000000..4ed30a1 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/statefulset-arbiter-rs.yaml @@ -0,0 +1,121 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: arbiter + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.arbiter }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: arbiter + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-arbiter + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/statefulset-primary-rs.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/statefulset-primary-rs.yaml new file mode 100644 index 0000000..8dcb004 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/statefulset-primary-rs.yaml @@ -0,0 +1,174 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: 1 + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-primary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "primary" + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + - name: MONGODB_USERNAME + value: {{ .Values.mongodbUsername | quote }} + - name: MONGODB_DATABASE + value: {{ .Values.mongodbDatabase | quote }} + {{- if .Values.usePassword }} + {{- if .Values.mongodbPassword }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/statefulset-secondary-rs.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/statefulset-secondary-rs.yaml new file mode 100644 index 0000000..d4c4a97 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/statefulset-secondary-rs.yaml @@ -0,0 +1,157 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: secondary + podManagementPolicy: "Parallel" + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.secondary }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: secondary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-secondary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "secondary" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/svc-primary-rs.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/svc-primary-rs.yaml new file mode 100644 index 0000000..fd440c8 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/svc-primary-rs.yaml @@ -0,0 +1,28 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + component: primary +{{- end }} diff --git a/qliksense/charts/api-keys/charts/mongodb/templates/svc-standalone.yaml b/qliksense/charts/api-keys/charts/mongodb/templates/svc-standalone.yaml new file mode 100644 index 0000000..4ca9443 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/templates/svc-standalone.yaml @@ -0,0 +1,27 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" +{{- end }} diff --git a/qliksense/charts/api-keys/charts/mongodb/values-production.yaml b/qliksense/charts/api-keys/charts/mongodb/values-production.yaml new file mode 100644 index 0000000..9070f3b --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/values-production.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: true + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# replication: +# replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/api-keys/charts/mongodb/values.yaml b/qliksense/charts/api-keys/charts/mongodb/values.yaml new file mode 100644 index 0000000..4b090d4 --- /dev/null +++ b/qliksense/charts/api-keys/charts/mongodb/values.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# #replication: +# # replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/api-keys/charts/qlikcommon/.helmignore b/qliksense/charts/api-keys/charts/qlikcommon/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/qliksense/charts/api-keys/charts/qlikcommon/Chart.yaml b/qliksense/charts/api-keys/charts/qlikcommon/Chart.yaml new file mode 100644 index 0000000..484b110 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +appVersion: 1.0.14 +description: Qlik resource contract chartbuilding components and helpers +home: https://github.com/qlik-trial/resource-contract +maintainers: +- email: boris.kuschel@qlik.com + name: bkuschel +name: qlikcommon +version: 1.2.4 diff --git a/qliksense/charts/api-keys/charts/qlikcommon/README.md b/qliksense/charts/api-keys/charts/qlikcommon/README.md new file mode 100644 index 0000000..664b529 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/README.md @@ -0,0 +1,837 @@ +# Qlik Common + +This chart is based off of the Common helper chart hosts in the kubernetes incubator +helm chart repo. Documentation below. + +## Common: The Helm Helper Chart + +This chart is designed to make it easier for you to build and maintain Helm +charts. + +It provides utilities that reflect best practices of Kubernetes chart development, +making it faster for you to write charts. + +## Tips + +A few tips for working with Common: + +- Be careful when using functions that generate random data (like `common.fullname.unique`). + They may trigger unwanted upgrades or have other side effects. + +In this document, we use `RELEASE-NAME` as the name of the release. + +## Resource Kinds + +Kubernetes defines a variety of resource kinds, from `Secret` to `StatefulSet`. +We define some of the most common kinds in a way that lets you easily work with +them. + +The resource kind templates are designed to make it much faster for you to +define _basic_ versions of these resources. They allow you to extend and modify +just what you need, without having to copy around lots of boilerplate. + +To make use of these templates you must define a template that will extend the +base template (though it can be empty). The name of this template is then passed +to the base template, for example: + +```yaml +{{- template "common.service" (list . "mychart.service") -}} +{{- define "mychart.service" -}} +## Define overrides for your Service resource here, e.g. +# metadata: +# labels: +# custom: label +# spec: +# ports: +# - port: 8080 +{{- end -}} +``` + +Note that the `common.service` template defines two parameters: + + - The root context (usually `.`) + - A template name containing the service definition overrides + +A limitation of the Go template library is that a template can only take a +single argument. The `list` function is used to workaround this by constructing +a list or array of arguments that is passed to the template. + +The `common.service` template is responsible for rendering the templates with +the root context and merging any overrides. As you can see, this makes it very +easy to create a basic `Service` resource without having to copy around the +standard metadata and labels. + +Each implemented base resource is described in greater detail below. + +### `common.service` + +The `common.service` template creates a basic `Service` resource with the +following defaults: + +- Service type (ClusterIP, NodePort, LoadBalancer) made configurable by `.Values.service.type` +- Named port `http` configured on port 80 +- Selector set to `app: {{ template "common.name" }}, release: {{ .Release.Name | quote }}` to match the default used in the `Deployment` resource + +Example template: + +```yaml +{{- template "common.service" (list . "mychart.mail.service") -}} +{{- define "mychart.mail.service" -}} +metadata: + name: {{ template "common.fullname" . }}-mail # overrides the default name to add a suffix + labels: # appended to the labels section + protocol: mail +spec: + ports: # composes the `ports` section of the service definition. + - name: smtp + port: 25 + targetPort: 25 + - name: imaps + port: 993 + targetPort: 993 + selector: # this is appended to the default selector + protocol: mail +{{- end -}} +--- +{{ template "common.service" (list . "mychart.web.service") -}} +{{- define "mychart.web.service" -}} +metadata: + name: {{ template "common.fullname" . }}-www # overrides the default name to add a suffix + labels: # appended to the labels section + protocol: www +spec: + ports: # composes the `ports` section of the service definition. + - name: www + port: 80 + targetPort: 8080 +{{- end -}} +``` + +The above template defines _two_ services: a web service and a mail service. + +The most important part of a service definition is the `ports` object, which +defines the ports that this service will listen on. Most of the time, +`selector` is computed for you. But you can replace it or add to it. + +The output of the example above is: + +```yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app: service + chart: service-0.1.0 + heritage: Tiller + protocol: mail + release: release-name + name: release-name-service-mail +spec: + ports: + - name: smtp + port: 25 + targetPort: 25 + - name: imaps + port: 993 + targetPort: 993 + selector: + app: service + release: release-name + protocol: mail + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: service + chart: service-0.1.0 + heritage: Tiller + protocol: www + release: release-name + name: release-name-service-www +spec: + ports: + - name: www + port: 80 + targetPort: 8080 + type: ClusterIP +``` + +## `common.deployment` + +The `common.deployment` template defines a basic `Deployment`. Underneath the +hood, it uses `common.container` (see next section). + +By default, the pod template within the deployment defines the labels `app: {{ template "common.name" . }}` +and `release: {{ .Release.Name | quote }` as this is also used as the selector. The +standard set of labels are not used as some of these can change during upgrades, +which causes the replica sets and pods to not correctly match. + +Example use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +## Define overrides for your Deployment resource here, e.g. +spec: + replicas: {{ .Values.replicaCount }} +{{- end -}} +``` + +## `common.container` + +The `common.container` template creates a basic `Container` spec to be used +within a `Deployment` or `ReplicaSet`. It holds the following defaults: + +- The name is set to `main` +- Uses `.Values.image` to describe the image to run, with the following spec: + ```yaml + image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + ``` +- Exposes the named port `http` as port 80 +- Lays out the compute resources using `.Values.resources` + +Example use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +## Define overrides for your Deployment resource here, e.g. +spec: + template: + spec: + containers: + - +{{ include "common.container" (list (set . "container" .Values.deployment.container) "mychart.deployment.container") | indent 8}} +{{- end -}} +{{- define "mychart.deployment.container" -}} +## Define overrides for your Container here, e.g. +livenessProbe: + httpGet: + path: / + port: 80 +readinessProbe: + httpGet: + path: / + port: 80 +{{- end -}} +``` + +The above example creates a `Deployment` resource which makes use of the +`common.container` template to populate the PodSpec's container list. The usage +of this template is similar to the other resources, you must define and +reference a template that contains overrides for the container object. + +The most important part of a container definition is the image you want to run. +As mentioned above, this is derived from `.Values.image` by default. It is a +best practice to define the image, tag and pull policy in your charts' values as +this makes it easy for an operator to change the image registry, or use a +specific tag or version. Another example of configuration that should be exposed +to chart operators is the container's required compute resources, as this is +also very specific to an operators environment. An example `values.yaml` for +your chart could look like: + +```yaml +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +``` + +The output of running the above values through the earlier template is: + +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: deployment + chart: deployment-0.1.0 + heritage: Tiller + release: release-name + name: release-name-deployment +spec: + template: + metadata: + labels: + app: deployment + spec: + containers: + - image: nginx:stable + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: / + port: 80 + name: deployment + ports: + - containerPort: 80 + name: http + readinessProbe: + httpGet: + path: / + port: 80 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +``` + +## `common.configmap` + +The `common.configmap` template creates an empty `ConfigMap` resource that you +can override with your configuration. + +Example use: + +```yaml +{{- template "common.configmap" (list . "mychart.configmap") -}} +{{- define "mychart.configmap" -}} +data: + zeus: cat + athena: cat + julius: cat + one: |- + {{ .Files.Get "file1.txt" }} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +data: + athena: cat + julius: cat + one: This is a file. + zeus: cat +kind: ConfigMap +metadata: + labels: + app: configmap + chart: configmap-0.1.0 + heritage: Tiller + release: release-name + name: release-name-configmap +``` + +## `common.secret` + +The `common.secret` template creates an empty `Secret` resource that you +can override with your secrets. + +Example use: + +```yaml +{{- template "common.secret" (list . "mychart.secret") -}} +{{- define "mychart.secret" -}} +data: + zeus: {{ print "cat" | b64enc }} + athena: {{ print "cat" | b64enc }} + julius: {{ print "cat" | b64enc }} + one: |- + {{ .Files.Get "file1.txt" | b64enc }} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +data: + athena: Y2F0 + julius: Y2F0 + one: VGhpcyBpcyBhIGZpbGUuCg== + zeus: Y2F0 +kind: Secret +metadata: + labels: + app: secret + chart: secret-0.1.0 + heritage: Tiller + release: release-name + name: release-name-secret +type: Opaque +``` + +## `common.ingress` + +The `common.ingress` template is designed to give you a well-defined `Ingress` +resource, that can be configured using `.Values.ingress`. An example values file +that can be used to configure the `Ingress` resource is: + +```yaml +ingress: + hosts: + - chart-example.local + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + tls: + - secretName: chart-example-tls + hosts: + - chart-example.local +``` + +Example use: + +```yaml +{{- template "common.ingress" (list . "mychart.ingress") -}} +{{- define "mychart.ingress" -}} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + labels: + app: ingress + chart: ingress-0.1.0 + heritage: Tiller + release: release-name + name: release-name-ingress +spec: + rules: + - host: chart-example.local + http: + paths: + - backend: + serviceName: release-name-ingress + servicePort: 80 + path: / + tls: + - hosts: + - chart-example.local + secretName: chart-example-tls +``` + +## `common.persistentvolumeclaim` + +`common.persistentvolumeclaim` can be used to easily add a +`PersistentVolumeClaim` resource to your chart that can be configured using +`.Values.persistence`: + +| Value | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------- | +| persistence.enabled | Whether or not to claim a persistent volume. If false, `common.volume.pvc` will use an emptyDir instead | +| persistence.storageClass | `StorageClass` name | +| persistence.accessMode | Access mode for persistent volume | +| persistence.size | Size of persistent volume | +| persistence.existingClaim | If defined, `PersistentVolumeClaim` is not created and `common.volume.pvc` helper uses this claim | + +An example values file that can be used to configure the +`PersistentVolumeClaim` resource is: + +```yaml +persistence: + enabled: true + storageClass: fast + accessMode: ReadWriteOnce + size: 8Gi +``` + +Example use: + +```yaml +{{- template "common.persistentvolumeclaim" (list . "mychart.persistentvolumeclaim") -}} +{{- define "mychart.persistentvolumeclaim" -}} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app: persistentvolumeclaim + chart: persistentvolumeclaim-0.1.0 + heritage: Tiller + release: release-name + name: release-name-persistentvolumeclaim +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + storageClassName: "fast" +``` + +## Partial API Objects + +When writing Kubernetes resources, you may find the following helpers useful to +construct parts of the spec. + +### EnvVar + +Use the EnvVar helpers within a container spec to simplify specifying key-value +environment variables or referencing secrets as values. + +Example Use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +spec: + template: + spec: + containers: + - {{ template "common.container" (list . "mychart.deployment.container") }} +{{- end -}} +{{- define "mychart.deployment.container" -}} +{{- $fullname := include "common.fullname" . -}} +env: +- {{ template "common.envvar.value" (list "ZEUS" "cat") }} +- {{ template "common.envvar.secret" (list "ATHENA" "secret-name" "athena") }} +{{- end -}} +``` + +Output: + +```yaml +... + spec: + containers: + - env: + - name: ZEUS + value: cat + - name: ATHENA + valueFrom: + secretKeyRef: + key: athena + name: secret-name +... +``` + +### Volume + +Use the Volume helpers within a `Deployment` spec to help define ConfigMap and +PersistentVolumeClaim volumes. + +Example Use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +spec: + template: + spec: + volumes: + - {{ template "common.volume.configMap" (list "config" "configmap-name") }} + - {{ template "common.volume.pvc" (list "data" "pvc-name" .Values.persistence) }} +{{- end -}} +``` + +Output: + +```yaml +... + spec: + volumes: + - configMap: + name: configmap-name + name: config + - name: data + persistentVolumeClaim: + claimName: pvc-name +... +``` + +The `common.volume.pvc` helper uses the following configuration from the `.Values.persistence` object: + +| Value | Description | +| ------------------------- | ----------------------------------------------------- | +| persistence.enabled | If false, creates an `emptyDir` instead | +| persistence.existingClaim | If set, uses this instead of the passed in claim name | + +## Utilities + +### `common.fullname` + +The `common.fullname` template generates a name suitable for the `name:` field +in Kubernetes metadata. It is used like this: + +```yaml +name: {{ template "common.fullname" . }} +``` + +The following different values can influence it: + +```yaml +# By default, fullname uses '{{ .Release.Name }}-{{ .Chart.Name }}'. This +# overrides that and uses the given string instead. +fullnameOverride: "some-name" + +# This adds a prefix +fullnamePrefix: "pre-" +# This appends a suffix +fullnameSuffix: "-suf" + +# Global versions of the above +global: + fullnamePrefix: "pp-" + fullnameSuffix: "-ps" +``` + +Example output: + +```yaml +--- +# with the values above +name: pp-pre-some-name-suf-ps + +--- +# the default, for release "happy-panda" and chart "wordpress" +name: happy-panda-wordpress +``` + +Output of this function is truncated at 54 characters, which leaves 9 additional +characters for customized overriding. Thus you can easily extend this name +in your own charts: + +```yaml +{{- define "my.fullname" -}} + {{ template "common.fullname" . }}-my-stuff +{{- end -}} +``` + +### `common.fullname.unique` + +The `common.fullname.unique` variant of fullname appends a unique seven-character +sequence to the end of the common name field. + +This takes all of the same parameters as `common.fullname` + +Example template: + +```yaml +uniqueName: {{ template "common.fullname.unique" . }} +``` + +Example output: + +```yaml +uniqueName: release-name-fullname-jl0dbwx +``` + +It is also impacted by the prefix and suffix definitions, as well as by +`.Values.fullnameOverride` + +Note that the effective maximum length of this function is 63 characters, not 54. + +### `common.name` + +The `common.name` template generates a name suitable for the `app` label. It is used like this: + +```yaml +app: {{ template "common.name" . }} +``` + +The following different values can influence it: + +```yaml +# By default, name uses '{{ .Chart.Name }}'. This +# overrides that and uses the given string instead. +nameOverride: "some-name" + +# This adds a prefix +namePrefix: "pre-" +# This appends a suffix +nameSuffix: "-suf" + +# Global versions of the above +global: + namePrefix: "pp-" + nameSuffix: "-ps" +``` + +Example output: + +```yaml +--- +# with the values above +name: pp-pre-some-name-suf-ps + +--- +# the default, for chart "wordpress" +name: wordpress +``` + +Output of this function is truncated at 54 characters, which leaves 9 additional +characters for customized overriding. Thus you can easily extend this name +in your own charts: + +```yaml +{{- define "my.name" -}} + {{ template "common.name" . }}-my-stuff +{{- end -}} +``` + +### `common.metadata` + +The `common.metadata` helper generates the `metadata:` section of a Kubernetes +resource. + +This takes three objects: + - .top: top context + - .fullnameOverride: override the fullname with this name + - .metadata + - .labels: key/value list of labels + - .annotations: key/value list of annotations + - .hook: name(s) of hook(s) + +It generates standard labels, annotations, hooks, and a name field. + +Example template: + +```yaml +{{ template "common.metadata" (dict "top" . "metadata" .Values.bio) }} +--- +{{ template "common.metadata" (dict "top" . "metadata" .Values.pet "fullnameOverride" .Values.pet.fullnameOverride) }} +``` + +Example values: + +```yaml +bio: + name: example + labels: + first: matt + last: butcher + nick: technosophos + annotations: + format: bio + destination: archive + hook: pre-install + +pet: + fullnameOverride: Zeus + +``` + +Example output: + +```yaml +metadata: + name: release-name-metadata + labels: + app: metadata + heritage: "Tiller" + release: "RELEASE-NAME" + chart: metadata-0.1.0 + first: "matt" + last: "butcher" + nick: "technosophos" + annotations: + "destination": "archive" + "format": "bio" + "helm.sh/hook": "pre-install" +--- +metadata: + name: Zeus + labels: + app: metadata + heritage: "Tiller" + release: "RELEASE-NAME" + chart: metadata-0.1.0 + annotations: +``` + +Most of the common templates that define a resource type (e.g. `common.configmap` +or `common.job`) use this to generate the metadata, which means they inherit +the same `labels`, `annotations`, `nameOverride`, and `hook` fields. + +### `common.labelize` + +`common.labelize` turns a map into a set of labels. + +Example template: + +```yaml +{{- $map := dict "first" "1" "second" "2" "third" "3" -}} +{{- template "common.labelize" $map -}} +``` + +Example output: + +```yaml +first: "1" +second: "2" +third: "3" +``` + +### `common.labels.standard` + +`common.labels.standard` prints the standard set of labels. + +Example usage: + +``` +{{ template "common.labels.standard" . }} +``` + +Example output: + +```yaml +app: labelizer +heritage: "Tiller" +release: "RELEASE-NAME" +chart: labelizer-0.1.0 +``` + +### `common.hook` + +The `common.hook` template is a convenience for defining hooks. + +Example template: + +```yaml +{{ template "common.hook" "pre-install,post-install" }} +``` + +Example output: + +```yaml +"helm.sh/hook": "pre-install,post-install" +``` + +### `common.chartref` + +The `common.chartref` helper prints the chart name and version, escaped to be +legal in a Kubernetes label field. + +Example template: + +```yaml +chartref: {{ template "common.chartref" . }} +``` + +For the chart `foo` with version `1.2.3-beta.55+1234`, this will render: + +```yaml +chartref: foo-1.2.3-beta.55_1234 +``` + +(Note that `+` is an illegal character in label values) diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_certificates.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_certificates.tpl new file mode 100644 index 0000000..d385098 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_certificates.tpl @@ -0,0 +1,32 @@ +{{- define "common.ca-certificates.volume" -}} +{{- if .Values.certs }} +{{- if .Values.global }}{{- if .Values.global.certs }} +{{- if .Values.global.certs.volume }} +- name: ca-certificates + {{- if .Values.global.certs.volume.hostPath }} + hostPath: + path: {{ .Values.global.certs.volume.hostPath }} + type: Directory + {{- end }} + {{- if .Values.global.certs.volume.existingVolumeClaim }} + persistentVolumeClaim: + claimName: {{ .Values.global.certs.volume.existingVolumeClaim }} + {{- end }} +{{- else }} +- name: ca-certificates + persistentVolumeClaim: + claimName: {{ .Release.Name }}-certs-pvc +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "common.ca-certificates.volumeMount" -}} +{{- if .Values.certs }} +{{- if .Values.global }}{{- if .Values.global.certs }} +- name: ca-certificates + mountPath: {{ default "/etc/ssl/certs" .Values.certs.mountPath | quote }} + readOnly: true +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_chartref.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_chartref.tpl new file mode 100644 index 0000000..e6c1486 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_chartref.tpl @@ -0,0 +1,14 @@ +{{- /* +common.chartref prints a chart name and version. + +It does minimal escaping for use in Kubernetes labels. + +Example output: + + zookeeper-1.2.3 + wordpress-3.2.1_20170219 + +*/ -}} +{{- define "common.chartref" -}} + {{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_configmap.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_configmap.yaml new file mode 100644 index 0000000..f04def2 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_configmap.yaml @@ -0,0 +1,32 @@ +{{- define "common.configmap.tpl" -}} +apiVersion: v1 +kind: ConfigMap +{{ template "common.metadata.configs" . }} +data: + {{- $root := . -}} + {{- if .Values.deployment -}} + {{- range $name, $container:= .Values.deployment }} + {{- if kindIs "map" $container }}{{- if $container.configs }} + {{- range $key, $value := $container.configs.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | quote }} + {{- else }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }}{{- end }} + {{- end }} + {{- end }} + {{- if .Values.configs -}} + {{- range $key, $value := .Values.configs.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | quote }} + {{- else }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- define "common.configmap" -}} +{{- template "common.util.merge" (append . "common.configmap.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_container.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_container.yaml new file mode 100644 index 0000000..4c51b35 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_container.yaml @@ -0,0 +1,98 @@ +{{- define "common.container.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +name: {{ include "common.name" . }} +image: {{ include "common.image" (set . "container" .container ) }} +{{- $pullPolicy := .Values.image.pullPolicy }} +{{- if .container }}{{- if .container.image }}{{- if .container.image.pullPolicy }} + {{- $pullPolicy = .container.image.pullPolicy }} +{{- end }}{{- end }}{{- end }} +imagePullPolicy: {{ $pullPolicy }} +{{- if or .Values.configs .Values.secrets }} +env: +{{- if .Values.configs.data.natsUri }} + - name: NATS_CLIENT_ID + valueFrom: + fieldRef: + fieldPath: metadata.name +{{- end }} +{{ include "common.transformers" (set . "container" .container ) | indent 2 }} +{{- end }} +ports: +{{- $port := .Values.service.port }} +{{- if .container }}{{- if .container.port }} + {{- $port = .container.port }} +{{- end }}{{- end }} +- containerPort: {{ $port }} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end }}{{- end }} +{{- $hasSecret := false -}} +{{- if $secrets.stringData }} +{{- $hasSecret = true -}} +{{- end }} +{{- if $secrets.data }} +{{- $hasSecret = true -}} +{{- end }} +{{- $hasVolumeMounts := false -}} +{{- if .container }}{{- if .container.volumeMounts }} +{{- $hasVolumeMounts = true -}} +{{- end -}}{{- end -}} +{{- if or $hasSecret $hasVolumeMounts }} +volumeMounts: +{{- end }} +{{- if $hasSecret }} +{{- if contains $name .Release.Name }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +{{- else }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +- name: {{ .Release.Name }}-secrets + mountPath: "/run/secrets/qlik.com/{{ .Release.Name }}" + readOnly: true +{{- end }} +{{- end }} +{{- if $hasVolumeMounts }} +{{- range $key, $val:= .container.volumeMounts }} +{{- if kindIs "map" $val }} +{{- if eq $key "default" }} +{{ include "common.volume.mount" (list (include "common.fullname" $root) $root.Values.deployment.container.volumeMounts.mountPath) }} +{{- else }} +{{ include "common.volume.mount" (list (printf "%s-%s" (include "common.fullname" $root) $key) $val) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- include "common.ca-certificates.volumeMount" . }} +{{- $probes := .Values.probes }} +{{- if .container }}{{- if .container.probes }} + {{- $probes = .container.probes }} +{{- end }}{{- end }} +livenessProbe: + httpGet: + path: /health + port: {{ $port }} +{{- if $probes }} +{{ toYaml $probes | indent 2 }} +{{- end }} +readinessProbe: + httpGet: + path: /ready + port: {{ $port }} +{{- if $probes }} +{{ toYaml $probes | indent 2 }} +{{- end }} +{{- if .container }}{{- if .container.resources }} +resources: +{{ toYaml .container.resources | indent 2 }} +{{- end -}}{{- end -}} +{{- end -}} +{{- define "common.container" -}} +{{- /* clear new line so indentation works correctly */ -}} +{{- println "" -}} +{{- include "common.util.merge" (append . "common.container.tpl") | indent 8 -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_deployment.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_deployment.yaml new file mode 100644 index 0000000..0a46af0 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_deployment.yaml @@ -0,0 +1,93 @@ +{{- define "common.deployment.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +apiVersion: apps/v1 +kind: Deployment +{{ template "common.metadata.workload" . }} +spec: +{{- if .Values.deployment.replicas}} + replicas: {{ .Values.deployment.replicas }} +{{- end}} + template: + metadata: + annotations: + checksum/configs: {{ (print (include "common.configmap.tpl" .)) | sha256sum }} + checksum/secrets: {{ (print (include "common.secret.tpl" .)) | sha256sum }} +{{- if .Values.deployment }}{{- if .Values.deployment.annotations }} +{{ include "common.annote" (dict "annotations" .Values.deployment.annotations "root" . ) | indent 8 }} +{{- end }}{{- end }} + labels: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} +{{- if .Values.deployment }}{{- if .Values.deployment.labels }} +{{ include "common.labelize" .Values.deployment.labels | indent 8 }} +{{- end }}{{- end }} +{{- if .Values.configs }}{{- if .Values.configs.data }}{{- if .Values.configs.data.natsUri }} + {{ tpl .Values.configs.data.natsUri . | regexFind "//.*:" | trimAll ":" | trimAll "/" }}: "true" +{{- end }}{{- end }}{{- end }} + spec: +{{- if ne (.Values.rbacEnabled | default false) false }} + serviceAccountName: {{ template "common.fullname" . }} +{{- end }} +{{- if .Values.image }}{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }}{{- end }} + volumes: +{{- if contains $name .Release.Name }} + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} +{{- else }} + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} + - {{ template "common.volume.secret" (list (printf "%s-secrets" (.Release.Name)) (printf "%s" (.Release.Name))) }} +{{- end }} + +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.persistentVolumeClaim }} +{{- range $name, $claim:= .Values.persistence.persistentVolumeClaim }} +{{- if kindIs "map" $claim }} +{{- if eq $name "default" }} + - {{ template "common.volume.pvc" (list (include "common.fullname" $root) (include "common.fullname" $root) $claim) }} +{{- else }} + - {{ template "common.volume.pvc" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $claim) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.hostPath }} +{{- range $name, $hostPath:= .Values.persistence.hostPath }} +{{- if kindIs "map" $hostPath }} + - {{ template "common.volume.hostpath" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $hostPath) }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.emptyDir }} +{{- range $name, $dir:= .Values.persistence.emptyDir }} +{{- if kindIs "map" $dir }} +{{- if $dir.create }} + - {{ template "common.volume.emptydir" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $root.Values.persistence) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{ include "common.ca-certificates.volume" . | nindent 6 }} +{{- if .Values.configs }}{{- if .Values.configs.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} +{{- end }}{{- end }} +{{- if .Values.deployment }}{{- if .Values.deployment.initContainer }} + initContainers: + - +{{ include "common.initContainer.tpl" (set . "container" .Values.deployment.initContainer ) | indent 8 }} +{{- end }}{{- end }} + containers: + - +{{ include "common.container.tpl" (set . "container" .Values.deployment.container ) | indent 8 }} + selector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} +{{- end -}} +{{- define "common.deployment" -}} +{{- $top := first . -}} +{{- if and $top.Values.deployment }} +{{- template "common.util.merge" (append . "common.deployment.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_envvar.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_envvar.tpl new file mode 100644 index 0000000..39a997a --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_envvar.tpl @@ -0,0 +1,32 @@ +{{- define "common.envvar.value" -}} + {{- $name := index . 0 -}} + {{- $value := index . 1 -}} + + name: {{ $name }} + value: {{ default "" $value | quote }} +{{- end -}} + +{{- define "common.envvar.configmap" -}} + {{- $name := index . 0 -}} + {{- $configMapName := index . 1 -}} + {{- $configMapKey := index . 2 -}} + + name: {{ $name }} + valueFrom: + configMapKeyRef: + name: {{ $configMapName }}-configs + key: {{ $configMapKey }} +{{- end -}} + +{{- define "common.envvar.secret" -}} + {{- $name := index . 0 -}} + {{- $secretName := index . 1 -}} + {{- $secretKey := index . 2 -}} + + name: {{ $name }} + valueFrom: + secretKeyRef: + name: {{ $secretName }}-secrets + key: {{ $secretKey }} +{{- end -}} + diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_fullname.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_fullname.tpl new file mode 100644 index 0000000..0f6bc77 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_fullname.tpl @@ -0,0 +1,42 @@ +{{- /* +fullname defines a suitably unique name for a resource by combining +the release name and the chart name. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.fullnameOverride: Replaces the computed name with this given name +- .Values.fullnamePrefix: Prefix +- .Values.global.fullnamePrefix: Global prefix +- .Values.fullnameSuffix: Suffix +- .Values.global.fullnameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "common.fullname" . -}}"' +*/ -}} +{{- define "common.fullname" -}} + {{- $global := default (dict) .Values.global -}} + {{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- $name := default .Chart.Name .Values.nameOverride -}} + {{- if contains $name .Release.Name -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{- /* +common.fullname.unique adds a random suffix to the unique name. + +This takes the same parameters as common.fullname + +*/ -}} +{{- define "common.fullname.unique" -}} + {{ template "common.fullname" . }}-{{ randAlphaNum 7 | lower }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_hpa.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_hpa.yaml new file mode 100644 index 0000000..be4215d --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_hpa.yaml @@ -0,0 +1,31 @@ +{{- define "common.hpa.tpl" -}} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +{{ template "common.metadata" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "common.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: +{{ if .Values.hpa.targetAverageUtilizationCpu }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationCpu }} +{{- end }} +{{ if .Values.hpa.targetAverageUtilizationMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationMemory }} +{{- end }} +{{- end -}} +{{- define "common.hpa" -}} +{{- $top := first . -}} +{{- if and $top.Values.hpa }} +{{- template "common.util.merge" (append . "common.hpa.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_image.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_image.tpl new file mode 100644 index 0000000..6a2335a --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_image.tpl @@ -0,0 +1,21 @@ +{{/* Return the proper collections image name */}} +{{- define "common.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{ $image := .Values.image }} + {{- if .container }}{{- if .container.image }} + {{ $image = .container.image }} + {{- end -}}{{- end -}} + {{- $registry := default "docker.io" (default .Values.image.registry $image.registry) -}} + {{- $repository := $image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default .Values.image.tag $image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_ingress.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_ingress.yaml new file mode 100644 index 0000000..ab9a75d --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_ingress.yaml @@ -0,0 +1,49 @@ +{{- define "common.ingress.tpl" -}} +apiVersion: extensions/v1beta1 +kind: Ingress +{{ template "common.metadata" . }} + annotations: + kubernetes.io/ingress.class: {{ template "common.ingress.class" . }} + {{- if .Values.configs }}{{- if .Values.configs.data }}{{- if .Values.configs.data.ingressAuthUrl }} + nginx.ingress.kubernetes.io/auth-url: {{ tpl .Values.configs.data.ingressAuthUrl . | quote }} + {{- end }}{{- end }}{{- end }} + {{- if .Values.ingress}}{{- if .Values.ingress.annotations }} + {{ include "common.annote" (dict "annotations" .Values.ingress.annotations "root" . ) | indent 4 }} + {{- end }}{{- end }} +spec: + {{- if .Values.ingress }} + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: / + backend: + serviceName: {{ template "common.fullname" $ }} + servicePort: 80 + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} + {{- end }} +{{- define "common.ingress" -}} +{{- $top := first . -}} +{{- if and $top.Values.ingress }} +{{- template "common.util.merge" (append . "common.ingress.tpl") -}} +{{- end -}} +{{- end -}} + +{{- define "common.ingress.class" -}} + {{- $ingressClass := "nginx" }} + {{- if .Values.ingress }}{{- if .Values.ingress.class }} + {{- $ingressClass = .Values.ingress.class -}} + {{- end -}}{{- end -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_initContainer.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_initContainer.yaml new file mode 100644 index 0000000..3b12f55 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_initContainer.yaml @@ -0,0 +1,74 @@ +{{- define "common.initContainer.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +name: {{ .container.name }} +image: {{ include "common.image" (set . "container" .container ) }} +{{- $pullPolicy := .Values.image.pullPolicy }} +{{- if .container }}{{- if .container.image }}{{- if .container.image.pullPolicy }} + {{- $pullPolicy = .container.image.pullPolicy }} +{{- end }}{{- end }}{{- end }} +imagePullPolicy: {{ $pullPolicy }} +env: + - name: SERVICE_NAME + value: {{ .Chart.Name }} +{{- if or .container.configs .container.secrets }} +{{ include "common.transformers" (set . "container" .container ) | indent 2 }} +{{- end }} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end }}{{- end }} +{{- $hasSecret := false -}} +{{- if $secrets.stringData }} +{{- $hasSecret = true -}} +{{- end }} +{{- if $secrets.data }} +{{- $hasSecret = true -}} +{{- end }} +{{- $hasVolumeMounts := false -}} +{{- if .container }}{{- if .container.volumeMounts }} +{{- $hasVolumeMounts = true -}} +{{- end -}}{{- end -}} +{{- if or $hasSecret $hasVolumeMounts }} +volumeMounts: +{{- end }} +{{- if $hasSecret }} +{{- if contains $name .Release.Name }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +{{- else }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +- name: {{ .Release.Name }}-secrets + mountPath: "/run/secrets/qlik.com/{{ .Release.Name }}" + readOnly: true +{{- end }} +{{- end }} +{{- if $hasVolumeMounts }} +{{- range $key, $val:= .container.volumeMounts }} +{{- if kindIs "map" $val }} +{{- if eq $key "default" }} +{{ include "common.volume.mount" (list (include "common.fullname" $root) $root.Values.deployment.container.volumeMounts.mountPath) }} +{{- else }} +{{ include "common.volume.mount" (list (printf "%s-%s" (include "common.fullname" $root) $key) $val) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- include "common.ca-certificates.volumeMount" . }} +{{- $probes := .Values.probes }} +{{- if .container }}{{- if .container.probes }} + {{- $probes = .container.probes }} +{{- end }}{{- end }} +{{- if .container }}{{- if .container.resources }} +resources: +{{ toYaml .container.resources | indent 2 }} +{{- end -}}{{- end -}} +{{- end -}} +{{- define "common.initContainer" -}} +{{- /* clear new line so indentation works correctly */ -}} +{{- println "" -}} +{{- include "common.util.merge" (append . "common.initContainer.tpl") | indent 8 -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata.yaml new file mode 100644 index 0000000..83c42d5 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata.yaml @@ -0,0 +1,35 @@ +{{- /* +common.metadata creates a standard metadata header. +It creates a 'metadata:' section with name and labels. +*/ -}} +{{ define "common.metadata" -}} +metadata: + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.configs" -}} +metadata: + name: {{ template "common.fullname" . }}-configs + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.secrets" -}} +metadata: + name: {{ template "common.fullname" . }}-secrets + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.workload" -}} +metadata: + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata_annotations.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata_annotations.tpl new file mode 100644 index 0000000..ed28474 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata_annotations.tpl @@ -0,0 +1,23 @@ +{{- /* +common.hook defines a hook. + +This is to be used in a 'metadata.annotations' section. + +This should be called as 'template "common.metadata.hook" "post-install"' + +Any valid hook may be passed in. Separate multiple hooks with a ",". +*/ -}} +{{- define "common.hook" -}} +"helm.sh/hook": {{printf "%s" . | quote}} +{{- end -}} + +{{- define "common.annote" -}} +{{ $root := .root}} +{{- range $k, $v := .annotations }} +{{- if kindIs "string" $v }} +{{ $k | quote }}: {{ tpl $v $root | quote }} +{{- else -}} +{{ $k | quote }}: {{ $v }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata_labels.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata_labels.tpl new file mode 100644 index 0000000..15fe00c --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_metadata_labels.tpl @@ -0,0 +1,28 @@ +{{- /* +common.labelize takes a dict or map and generates labels. + +Values will be quoted. Keys will not. + +Example output: + + first: "Matt" + last: "Butcher" + +*/ -}} +{{- define "common.labelize" -}} +{{- range $k, $v := . }} +{{ $k }}: {{ $v | quote }} +{{- end -}} +{{- end -}} + +{{- /* +common.labels.standard prints the standard Helm labels. + +The standard labels are frequently used in metadata. +*/ -}} +{{- define "common.labels.standard" -}} +app: {{ template "common.name" . }} +chart: {{ template "common.chartref" . }} +heritage: {{ .Release.Service | quote }} +release: {{ .Release.Name | quote }} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_name.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_name.tpl new file mode 100644 index 0000000..1d42fb0 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_name.tpl @@ -0,0 +1,29 @@ +{{- /* +name defines a template for the name of the chart. It should be used for the `app` label. +This is common practice in many Kubernetes manifests, and is not Helm-specific. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.nameOverride: Replaces the computed name with this given name +- .Values.namePrefix: Prefix +- .Values.global.namePrefix: Global prefix +- .Values.nameSuffix: Suffix +- .Values.global.nameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "common.name" . -}}"' +*/ -}} +{{- define "common.name"}} + {{- $global := default (dict) .Values.global -}} + {{- $base := default .Chart.Name .Values.nameOverride -}} + {{- $gpre := default "" $global.namePrefix -}} + {{- $pre := default "" .Values.namePrefix -}} + {{- $suf := default "" .Values.nameSuffix -}} + {{- $gsuf := default "" $global.nameSuffix -}} + {{- $name := print $gpre $pre $base $suf $gsuf -}} + {{- $name | lower | trunc 54 | trimSuffix "-" -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_networkpolicy.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_networkpolicy.yaml new file mode 100644 index 0000000..e0c4922 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_networkpolicy.yaml @@ -0,0 +1,52 @@ +{{- define "common.networkpolicy.tpl" -}} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +{{ template "common.metadata" . }} +spec: + podSelector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} + policyTypes: + - Egress + egress: + - to: +{{- if .Values.configs }}{{- if .Values.configs.data }} +{{- if .Values.configs.data.natsUri }} + - podSelector: + matchLabels: + app: "nats" + release: {{ .Values.natsRelease | default .Release.Name | quote }} + - podSelector: + matchLabels: + app: "nats-streaming" + release: {{ .Values.natsRelease | default .Release.Name | quote }} +{{- end }} +{{- if or .Values.configs.data.tokenAuthUri .Values.configs.data.ingressAuthUrl }} + - podSelector: + matchLabels: + app: "edge-auth" + release: {{ .Values.edgeAuthRelease | default .Release.Name | quote }} +{{- end }} +{{- if .Values.configs.data.keysUri }} + - podSelector: + matchLabels: + app: "keys" + release: {{ .Values.keysRelease | default .Release.Name | quote }} +{{- end }} +{{- if .Values.configs.data.pdsUri }} + - podSelector: + matchLabels: + app: "policy-decisions" + release: {{ .Values.pdsRelease | default .Release.Name | quote }} +{{- end }} +{{- end }}{{- end }} + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP +{{- end }} +{{- define "common.networkpolicy" -}} +{{- template "common.util.merge" (append . "common.networkpolicy.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_persistentvolumeclaim.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_persistentvolumeclaim.yaml new file mode 100644 index 0000000..4c2ed62 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_persistentvolumeclaim.yaml @@ -0,0 +1,47 @@ +{{- define "common.persistentvolumeclaim.tpl" -}} +{{- $persistence := default .Values.persistence .claim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +{{ template "common.metadata" . }} +spec: + accessModes: + - {{ $persistence.accessMode | quote }} + resources: + requests: + storage: {{ $persistence.size | quote }} +{{- if $persistence.matchLabels }} + selector: + matchLabels: +{{- include "common.labelize" $persistence.matchLabels | indent 6 -}} +{{- end -}} +{{- if $persistence.storageClass }} +{{- if (eq "-" $persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ $persistence.storageClass }}" +{{- end }} +{{- else -}} + {{- if .Values.global }} + {{- if .Values.global.persistence }} + {{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) -}} + storageClassName: "" + {{- else -}} + storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- define "common.persistentvolumeclaim" -}} +{{- $top := first . -}} +{{- if $top.Values.persistence -}}{{- if $top.Values.persistence.enabled -}}{{- if $top.Values.persistence.persistentVolumeClaim -}} + {{- if not $top.claim -}} + {{- $top = set $top "claim" $top.Values.persistence.persistentVolumeClaim.default -}} + {{- end -}} + {{- if not $top.claim.existingClaim -}} + {{- template "common.util.merge" (append . "common.persistentvolumeclaim.tpl") -}} + {{- end -}} +{{- end -}}{{- end -}}{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_persistentvolumeclaims.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_persistentvolumeclaims.yaml new file mode 100644 index 0000000..2cb894b --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_persistentvolumeclaims.yaml @@ -0,0 +1,27 @@ +{{- define "common.persistentvolumeclaims" -}} +{{- $root := . -}} +{{- if .Values.persistence -}}{{- if .Values.persistence.enabled -}} + {{- if .Values.persistence.persistentVolumeClaim -}} + {{- range $name, $claim:= .Values.persistence.persistentVolumeClaim }} + {{- if kindIs "map" $claim }} + {{- if eq $name "default" }} + {{- $root = set $root "claim" $claim -}} + {{- include "common.persistentvolumeclaim" (list $root "mychart.persistentvolumeclaim") -}} + {{- else -}} + {{- $values := set $root.Values "fullnameOverride" (printf "%s-%s" (include "common.fullname" $root) $name) -}} + {{- $root = set (set $root "claim" $claim) "Values" $values -}} + {{- include "common.persistentvolumeclaim" (list $root "mychart.persistentvolumeclaim") -}} + {{- end -}} + {{- end -}} + {{- printf "\n" -}}{{- printf "\n" -}} + {{- printf "---" -}} + {{- printf "\n" -}} + {{- $_:= unset $root.Values "fullnameOverride" -}} + {{- end -}} + {{- end -}} +{{- end -}}{{- end -}} +{{- end -}} + +## No override templates are needed for the case of defining multiple PVCs +{{- define "mychart.persistentvolumeclaim" -}} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_podSecurityPolicy.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_podSecurityPolicy.yaml new file mode 100644 index 0000000..c06f607 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_podSecurityPolicy.yaml @@ -0,0 +1,55 @@ +{{- define "common.podsecuritypolicy.tpl" -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +{{ template "common.metadata" . }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end -}} +{{- define "common.podsecuritypolicy" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}}{{- if ne ($top.Values.podSecurityPolicy | default false) false -}} +{{- template "common.util.merge" (append . "common.podsecuritypolicy.tpl") -}} +{{- end -}}{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_role.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_role.yaml new file mode 100644 index 0000000..cf1d6f6 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_role.yaml @@ -0,0 +1,23 @@ +{{- define "common.role.tpl" -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +{{ template "common.metadata" . }} +rules: +{{- if .Values.podSecurityPolicy | default false }} +- apiGroups: + - policy + resourceNames: + - {{ template "common.fullname" . }} + resources: + - podsecuritypolicies + verbs: + - use +{{- end }} +{{- end -}} +{{- define "common.role" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.role.tpl") -}} +{{- end -}} +{{- end -}} + diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_rolebinding.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_rolebinding.yaml new file mode 100644 index 0000000..021e896 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_rolebinding.yaml @@ -0,0 +1,19 @@ +{{- define "common.rolebinding.tpl" -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +{{ template "common.metadata" . }} +roleRef: + kind: Role + name: {{ template "common.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} +{{- define "common.rolebinding" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.rolebinding.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_secret.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_secret.yaml new file mode 100644 index 0000000..45ec55f --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_secret.yaml @@ -0,0 +1,45 @@ +{{- define "common.secret.tpl" -}} +apiVersion: v1 +kind: Secret +{{ template "common.metadata.secrets" . }} +type: Opaque +data: + {{- $root := . -}} + {{- if .Values.deployment -}} + {{- range $name, $container:= .Values.deployment }} + {{- if kindIs "map" $container }}{{- if $container.secrets }} + {{- range $key, $value := $container.secrets.stringData }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | b64enc }} + {{- else }} + {{ $key }}: {{ $value | b64enc }} + {{- end }} + {{- end }} + {{- range $key, $value := $container.secrets.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }}{{- end }} + {{- end }} + {{- end }} + {{- if .Values.secrets -}} + {{- if .Values.secrets.stringData -}} + {{- range $key, $value := .Values.secrets.stringData }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | b64enc }} + {{- else }} + {{ $key }}: {{ $value | b64enc }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.secrets.data -}} + {{- range $key, $value := .Values.secrets.data }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- define "common.secret" -}} +{{- template "common.util.merge" (append . "common.secret.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_service.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_service.yaml new file mode 100644 index 0000000..fb4a9e8 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_service.yaml @@ -0,0 +1,25 @@ +{{- define "common.service.tpl" -}} +apiVersion: v1 +kind: Service +{{ template "common.metadata" . }} + annotations: + {{- if .Values.service }}{{- if .Values.service.annotations }} + {{ include "common.annote" (dict "annotations" .Values.service.annotations "root" . ) | indent 4 }} + {{- end }}{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: {{ template "common.name" . }} + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} +{{- end -}} +{{- define "common.service" -}} +{{- $top := first . -}} +{{- if and $top.Values.service}} +{{- template "common.util.merge" (append . "common.service.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_serviceaccount.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_serviceaccount.yaml new file mode 100644 index 0000000..534a4bf --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- define "common.serviceaccount.tpl" -}} +apiVersion: v1 +kind: ServiceAccount +{{ template "common.metadata" . }} +{{- end -}} +{{- define "common.serviceaccount" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.serviceaccount.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_statefulset.yaml b/qliksense/charts/api-keys/charts/qlikcommon/templates/_statefulset.yaml new file mode 100644 index 0000000..04155e3 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_statefulset.yaml @@ -0,0 +1,44 @@ +{{- define "common.statefulset.tpl" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +apiVersion: apps/v1 +kind: statefulset +{{ template "common.metadata.workload" . }} +spec: +{{- if .Values.statefulset.replicas}} + replicas: {{ .Values.statefulset.replicas }} +{{- end}} + template: + metadata: + labels: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} + spec: +{{- if ne (.Values.rbacEnabled | default false) false }} + serviceAccountName: {{ template "common.fullname" . }} +{{- end }} +{{- if .Values.image }}{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }}{{- end }} + volumes: + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} +{{- if .Values.persistence }} + - {{ template "common.volume.pvc" (list (include "common.fullname" .) (include "common.fullname" .) .Values.persistence) }} +{{- end }} +{{- if .Values.configs.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} +{{- end }} + containers: + - +{{ include "common.container.tpl" (set . "container" .Values.statefulset.container ) | indent 8 }} + selector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} +{{- end -}} +{{- define "common.statefulset" -}} +{{- $top := first . -}} +{{- if and $top.Values.statefulset }} +{{- template "common.util.merge" (append . "common.statefulset.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_transformers.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_transformers.tpl new file mode 100644 index 0000000..f42e742 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_transformers.tpl @@ -0,0 +1,41 @@ +{{- define "common.transformers" -}} +{{- $fullname := include "common.fullname" . -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $release := .Release.Name -}} +{{- $commonSecretList := list "mongodbUri" "redisUri" "redisPassword" -}} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end -}}{{- end -}} +{{- if $secrets -}} +{{- range $key, $value := $secrets.stringData }} +{{- if has $key $commonSecretList -}}{{- if not (contains $name $release) }} +{{- $fullname = $release -}} +{{- end }}{{- end }} +- {{ template "common.envvar.value" (list (print $key "_FILE" | snakecase | upper) ( print "/run/secrets/qlik.com/" $fullname ( print "/" $key ) )) }} +{{- $fullname = include "common.fullname" $root -}} +{{- end }} +{{- range $key, $value := $secrets.data }} +{{- if has $key $commonSecretList -}}{{- if not (contains $name $release) }} +{{- $fullname = $release -}} +{{- end }}{{- end }} +- {{ template "common.envvar.value" (list (print $key "_FILE" | snakecase | upper) ( print "/run/secrets/qlik.com/" $fullname ( print "/" $key ) )) }} +{{- $fullname = include "common.fullname" $root -}} +{{- end }} +{{- end }} +{{ $configs := .Values.configs}} +{{- if .container }}{{- if .container.configs }} +{{ $configs = .container.configs}} +{{- end -}}{{- end -}} +{{- if $configs -}} +{{- range $key, $value := $configs.data }} +- {{ template "common.envvar.configmap" (list (print $key | snakecase | upper) $fullname $key ) }} +{{- end }} +{{- range $key, $value := $configs }} +{{- if ne $key "data" }} +- {{ template "common.envvar.value" (list (print $key | snakecase | upper) $value ) }} +{{- end }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_util.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_util.tpl new file mode 100644 index 0000000..6abeec0 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_util.tpl @@ -0,0 +1,15 @@ +{{- /* +common.util.merge will merge two YAML templates and output the result. + +This takes an array of three values: +- the top context +- the template name of the overrides (destination) +- the template name of the base (source) + +*/ -}} +{{- define "common.util.merge" -}} +{{- $top := first . -}} +{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}} +{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}} +{{- regexReplaceAll ".*: null|.*: nil" (toYaml (merge $overrides $tpl)) "${1}" -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/api-keys/charts/qlikcommon/templates/_volume.tpl b/qliksense/charts/api-keys/charts/qlikcommon/templates/_volume.tpl new file mode 100644 index 0000000..360e239 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/templates/_volume.tpl @@ -0,0 +1,62 @@ +{{- define "common.volume.configmap" -}} + {{- $name := index . 0 -}} + {{- $configMapName := index . 1 -}} + + name: {{ $name }} + configMap: + name: {{ $configMapName }}-configs +{{- end -}} + +{{- define "common.volume.secret" -}} + {{- $name := index . 0 -}} + {{- $secretName := index . 1 -}} + + name: {{ $name }} + secret: + secretName: {{ $secretName }}-secrets +{{- end -}} + +{{- define "common.volume.pvc" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $claim := index . 2 -}} + + name: {{ $name }} + {{- if $claim }} + persistentVolumeClaim: + claimName: {{ $claim.existingClaim | default $claimName }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end -}} + +{{- define "common.volume.emptydir" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $persistence := index . 2 -}} + + {{- if $persistence.emptyDir }} + name: {{ $name }} + emptyDir: {} + {{- end -}} +{{- end -}} + +{{- define "common.volume.hostpath" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $persistence := index . 2 -}} + + name: {{ $name }} + hostPath: + path: {{ $persistence.path }} + type: {{ $persistence.type }} +{{- end -}} + + +{{- define "common.volume.mount" -}} +{{- $volume := index . 0 -}} +{{- $mountPath := index . 1 -}} +- name: {{ $volume }} + mountPath: {{ default "/tmp" $mountPath.mountPath | quote }} + readOnly: {{ default false $mountPath.readOnly }} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/qlikcommon/values.yaml b/qliksense/charts/api-keys/charts/qlikcommon/values.yaml new file mode 100644 index 0000000..b7cf514 --- /dev/null +++ b/qliksense/charts/api-keys/charts/qlikcommon/values.yaml @@ -0,0 +1,4 @@ +# Default values for commons. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value diff --git a/qliksense/charts/api-keys/charts/redis/.helmignore b/qliksense/charts/api-keys/charts/redis/.helmignore new file mode 100644 index 0000000..b2767ae --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/.helmignore @@ -0,0 +1,3 @@ +.git +# OWNERS file for Kubernetes +OWNERS diff --git a/qliksense/charts/api-keys/charts/redis/Chart.yaml b/qliksense/charts/api-keys/charts/redis/Chart.yaml new file mode 100644 index 0000000..0b1ce8a --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 5.0.7 +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: http://redis.io/ +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +version: 10.5.6 diff --git a/qliksense/charts/api-keys/charts/redis/README.md b/qliksense/charts/api-keys/charts/redis/README.md new file mode 100644 index 0000000..72eb836 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/README.md @@ -0,0 +1,497 @@ + +# Redis + +[Redis](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +## TL;DR; + +```bash +# Testing configuration +$ helm install my-release stable/redis +``` + +```bash +# Production configuration +$ helm install my-release stable/redis --values values-production.yaml +``` + +## Introduction + +This chart bootstraps a [Redis](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release stable/redis +``` + +The command deploys Redis on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the Redis chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | Redis password (overrides `password`) | `nil` | +| `image.registry` | Redis Image registry | `docker.io` | +| `image.repository` | Redis Image name | `bitnami/redis` | +| `image.tag` | Redis Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `1` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | Redis password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common Redis node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.runAsUser` | User ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.sysctls` | Set namespaced sysctls for the container (both redis master and slave pods) | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Redis exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | {} | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.podLabels` | Additional labels for Redis master pod | {} | +| `master.podAnnotations` | Additional annotations for Redis master pod | {} | +| `redisPort` | Redis port (in both master and slaves) | `6379` | +| `master.command` | Redis master entrypoint string. The command `redis-server` is executed if this is not provided. | `/run.sh` | +| `master.configmap` | Additional Redis configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of Redis commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | Redis master additional command line flags | [] | +| `master.nodeSelector` | Redis master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for Redis master pod assignment | [] | +| `master.affinity` | Affinity settings for Redis master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.priorityClassName` | Redis Master pod priorityClassName | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.command` | Redis slave entrypoint array. The docker image's ENTRYPOINT is used if this is not provided. | `/run.sh` | +| `slave.configmap` | Additional Redis configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of Redis commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | Redis slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `30` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `10` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.priorityClassName` | Redis Slave pod priorityClassName | {} | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | Redis Sentinel port | `26379` | +| `sentinel.configmap` | Additional Redis configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for Redis read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | Redis sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | Redis Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | Redis Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + stable/redis +``` + +The above command sets the Redis server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml stable/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis pod as it attempts to write to the `/bitnami` directory. Consider installing Redis with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of slaves: +```diff +- cluster.slaveCount: 2 ++ cluster.slaveCount: 3 +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a Redis master StatefulSet (only one master node allowed) and a Redis slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - Redis Master service: Points to the master, where read-write operations can be performed + - Redis Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis master StatefulSet (only one master allowed) and a Redis slave StatefulSet. In this case, the pods will contain en extra container with Redis Sentinel. This container will form a cluster of Redis Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - Redis service: Exposes port 6379 for Redis read-only operations and port 26379 for accesing Redis Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for Redis you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Host Kernel Settings +Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME stable/redis +``` + +## NetworkPolicy + +To enable network policy for Redis, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to Redis. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: +* Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +* Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + + - Recommended: Create a clone of the Redis Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release stable/redis --set persistence.existingClaim= + ``` + + - Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install stable/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + + - `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) + - `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis StatefulSet before upgrading: +```bash +$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` +And edit the Redis slave (and metrics if enabled) deployment: +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Notable changes + +### 9.0.0 +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### 7.0.0 +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis Sentinel containers inside of the Redis Pods (feature disabled by default). In case the master crashes, a new Redis node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/qliksense/charts/api-keys/charts/redis/ci/default-values.yaml b/qliksense/charts/api-keys/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/qliksense/charts/api-keys/charts/redis/ci/dev-values.yaml b/qliksense/charts/api-keys/charts/redis/ci/dev-values.yaml new file mode 100644 index 0000000..be01913 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/ci/dev-values.yaml @@ -0,0 +1,9 @@ +master: + persistence: + enabled: false + +cluster: + enabled: true + slaveCount: 1 + +usePassword: false diff --git a/qliksense/charts/api-keys/charts/redis/ci/extra-flags-values.yaml b/qliksense/charts/api-keys/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/qliksense/charts/api-keys/charts/redis/ci/insecure-sentinel-values.yaml b/qliksense/charts/api-keys/charts/redis/ci/insecure-sentinel-values.yaml new file mode 100644 index 0000000..2e9174f --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/ci/insecure-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: false + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/api-keys/charts/redis/ci/production-sentinel-values.yaml b/qliksense/charts/api-keys/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..36a00e3 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/api-keys/charts/redis/ci/production-values.yaml b/qliksense/charts/api-keys/charts/redis/ci/production-values.yaml new file mode 100644 index 0000000..6fa9c88 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/ci/production-values.yaml @@ -0,0 +1,525 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/api-keys/charts/redis/ci/redis-lib-values.yaml b/qliksense/charts/api-keys/charts/redis/ci/redis-lib-values.yaml new file mode 100644 index 0000000..e03382b --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/ci/redis-lib-values.yaml @@ -0,0 +1,13 @@ +## Redis library image +## ref: https://hub.docker.com/r/library/redis/ +## +image: + registry: docker.io + repository: redis + tag: '5.0.5' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/api-keys/charts/redis/ci/redisgraph-module-values.yaml b/qliksense/charts/api-keys/charts/redis/ci/redisgraph-module-values.yaml new file mode 100644 index 0000000..8096020 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/ci/redisgraph-module-values.yaml @@ -0,0 +1,10 @@ +image: + registry: docker.io + repository: redislabs/redisgraph + tag: '1.0.0' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/api-keys/charts/redis/templates/NOTES.txt b/qliksense/charts/api-keys/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..5b1089e --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/NOTES.txt @@ -0,0 +1,104 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis server: + +1. Run a Redis pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash + +2. Connect using the Redis CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/_helpers.tpl b/qliksense/charts/api-keys/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..3397a7b --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/_helpers.tpl @@ -0,0 +1,355 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/redis/templates/configmap.yaml b/qliksense/charts/api-keys/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..d17ec26 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{ tpl .Values.configmap . | indent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{ tpl .Values.master.configmap . | indent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{ tpl .Values.slave.configmap . | indent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{ tpl .Values.sentinel.configmap . | indent 4 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/headless-svc.yaml b/qliksense/charts/api-keys/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..909cbce --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis +{{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel +{{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/health-configmap.yaml b/qliksense/charts/api-keys/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..35c61b5 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,134 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_SENTINEL_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/qliksense/charts/api-keys/charts/redis/templates/metrics-prometheus.yaml b/qliksense/charts/api-keys/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..3f33454 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,30 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/redis/templates/metrics-svc.yaml b/qliksense/charts/api-keys/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..74f6fa8 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,30 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.service.labels -}} + {{ toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" -}} {{ if .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/networkpolicy.yaml b/qliksense/charts/api-keys/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..da05552 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,73 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/prometheusrule.yaml b/qliksense/charts/api-keys/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..500c3b3 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/psp.yaml b/qliksense/charts/api-keys/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..28ae22a --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/psp.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/redis-master-statefulset.yaml b/qliksense/charts/api-keys/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..b61c539 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,419 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master +{{- if .Values.master.podLabels }} +{{ toYaml .Values.master.podLabels | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} +{{ toYaml .Values.master.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" + {{- end }} + {{- with .Values.master.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: "{{ .Values.master.schedulerName }}" + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: "{{ template "redis.image" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.command }} + {{ .Values.master.command }} ${ARGS[@]} + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.master.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + echo "Getting information about current running sentinels" + # Get information from existing sentinels + existing_sentinels=$(timeout -s 9 {{ .Values.sentinel.initialCheckTimeout }} redis-cli --raw -h {{ template "redis.fullname" . }} -a "$REDIS_PASSWORD" -p {{ .Values.sentinel.service.sentinelPort }} SENTINEL sentinels {{ .Values.sentinel.masterSet }}) + echo "$existing_sentinels" | awk -f /health/parse_sentinels.awk | tee -a /opt/bitnami/redis-sentinel/etc/sentinel.conf + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc/ + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel-tmp-conf + emptyDir: {} + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.master.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.master.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/redis-master-svc.yaml b/qliksense/charts/api-keys/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..3a98e66 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,39 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{ toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{ toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/redis-role.yaml b/qliksense/charts/api-keys/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..71f75ef --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/redis-role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{ toYaml .Values.rbac.role.rules | indent 2 }} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/redis/templates/redis-rolebinding.yaml b/qliksense/charts/api-keys/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..aceb258 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/redis/templates/redis-serviceaccount.yaml b/qliksense/charts/api-keys/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..f027176 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/redis/templates/redis-slave-statefulset.yaml b/qliksense/charts/api-keys/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..d5a8db5 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,437 @@ +{{- if .Values.cluster.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: +{{ toYaml .Values.slave.updateStrategy | indent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} +{{ toYaml .Values.slave.podLabels | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} +{{ toYaml .Values.slave.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: "{{ .Values.slave.schedulerName }}" + {{- end }} + {{- with .Values.slave.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.command }} + {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.slave.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.slave.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.slave.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/redis-slave-svc.yaml b/qliksense/charts/api-keys/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..052ecea --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{ toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: +{{ toYaml .Values.slave.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/redis-with-sentinel-svc.yaml b/qliksense/charts/api-keys/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..5017c22 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,40 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{ toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: +{{ toYaml .Values.sentinel.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/api-keys/charts/redis/templates/secret.yaml b/qliksense/charts/api-keys/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..ead9c61 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/templates/secret.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/qliksense/charts/api-keys/charts/redis/values-production.yaml b/qliksense/charts/api-keys/charts/redis/values-production.yaml new file mode 100644 index 0000000..cae2af1 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/values-production.yaml @@ -0,0 +1,630 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/api-keys/charts/redis/values.schema.json b/qliksense/charts/api-keys/charts/redis/values.schema.json new file mode 100644 index 0000000..2138e45 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "condition": false, + "value": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "master.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "slave.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/qliksense/charts/api-keys/charts/redis/values.yaml b/qliksense/charts/api-keys/charts/redis/values.yaml new file mode 100644 index 0000000..2649466 --- /dev/null +++ b/qliksense/charts/api-keys/charts/redis/values.yaml @@ -0,0 +1,631 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/api-keys/requirements.yaml b/qliksense/charts/api-keys/requirements.yaml new file mode 100644 index 0000000..9de46b7 --- /dev/null +++ b/qliksense/charts/api-keys/requirements.yaml @@ -0,0 +1,17 @@ +dependencies: + - name: qlikcommon + version: "1.2.4" + repository: "@qlik" + condition: global.component-common-imports + - name: mongodb + version: 4.5.0 + repository: "@stable" + condition: global.component-common-imports,mongodb.enabled + - name: redis + version: 10.5.6 + repository: "@stable" + condition: global.component-common-imports,redis.enabled + - name: messaging + version: 2.0.29 + repository: "@qlik" + condition: global.component-common-imports,messaging.enabled diff --git a/qliksense/charts/api-keys/templates/manifest.yaml b/qliksense/charts/api-keys/templates/manifest.yaml new file mode 100644 index 0000000..1282bb3 --- /dev/null +++ b/qliksense/charts/api-keys/templates/manifest.yaml @@ -0,0 +1,69 @@ +{{- template "common.configmap" (list . "api-keys.configmap") -}} +{{- define "api-keys.configmap" -}} +{{- end }} + +--- +{{ template "common.secret" (list . "api-keys.secret") -}} +{{- define "api-keys.secret" -}} +{{- end }} + +--- +{{ template "common.ingress" (list . "api-keys.ingress") -}} +{{- define "api-keys.ingress" -}} +spec: + rules: + - http: + paths: + - path: /api/v1/api-keys + backend: + serviceName: {{ template "common.fullname" . }} + servicePort: {{ .Values.service.port }} +{{- end }} + +--- +{{ template "common.service" (list . "api-keys.service") -}} +{{- define "api-keys.service" -}} +{{- end }} + +--- +{{ template "common.deployment" (list . "api-keys.deployment") -}} +{{- define "api-keys.deployment" -}} +spec: + template: + spec: + dnsConfig: + options: + - name: timeout + value: "1" + containers: + - +{{ include "common.container" (list (set . "container" .Values.deployment.container) "api-keys.deployment.container") | indent 8 }} +{{- end }} + +{{- define "api-keys.deployment.container" -}} +{{- if .Values.deployment }}{{- if .Values.deployment.container }}{{- if .Values.deployment.container.command }} +command: ["{{ .Values.deployment.container.command }}"] +{{- end }}{{- end }}{{- end }} +livenessProbe: + httpGet: + path: /live + port: http +readinessProbe: + httpGet: + path: /ready + port: http +ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP +{{- end }} + +--- +{{ template "common.hpa" (list . "api-keys.hpa") -}} +{{- define "api-keys.hpa" -}} +{{- end }} + +--- +{{ template "common.persistentvolumeclaims" . -}} + +--- diff --git a/qliksense/charts/api-keys/values.yaml b/qliksense/charts/api-keys/values.yaml new file mode 100644 index 0000000..bfd5714 --- /dev/null +++ b/qliksense/charts/api-keys/values.yaml @@ -0,0 +1,158 @@ +## Default values for Api-keys Service Helm Chart. +image: + ## Default registry where this repository should be pulled from. + ## Will be overridden by `global.imageRegistry` if set + registry: ghcr.io + ## Api-keys image name. + repository: qlik-download/api-keys + ## Api-keys image version. + ## ref: https://hub.docker.com/r/qlik/tenants/tags/ + tag: 2.1.1 + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', + ## else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: + pullPolicy: IfNotPresent + ## Secrets for pulling images from a private docker registry. + ## + pullSecrets: + - name: artifactory-docker-secret + +configs: + environment: "qseok" + region: "example" + natsEnabled: "true" + data: + ## Log level (silly|debug|verbose|info|warn|error) + logLevel: "verbose" + # Users service URL + usersUri: "http://{{ .Release.Name }}-users:8080/v1" + # Keys service URL + keysUri: "http://{{ .Release.Name }}-keys:8080/v1" + # Endpoint to retrieve the JWKS + jwksEndpoint: "http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal" + # Address of NATS server + natsUri: nats://{{ .Release.Name }}-nats-client:4222 + # NATS Streaming cluster ID + natsStreamingClusterId: "{{ .Release.Name }}-nats-streaming-cluster" + # Token auth URL + tokenAuthUri: "http://{{ .Release.Name }}-edge-auth:8080/v1" + # Ingress auth URL + ingressAuthUrl: "http://{{ .Release.Name }}-edge-auth.{{ .Release.Namespace }}.svc.cluster.local:8080/v1/auth" + # Full Redis URI (port included) + redisUri: "{{ .Release.Name }}-redis-master:6379" + +secrets: + stringData: + ## Specify a custom mongo uri. Not needed when the local mongo is enabled. + mongodbUri: "mongodb://{{ .Release.Name }}-mongodb:27017/{{ .Release.Name }}?ssl=false" + ## Service identity token authentication keys + tokenAuthPrivateKeyId: GR4BOyUiWkDpR1SGrquOymiuKSMdrYE4uuDBIkYnjyo + tokenAuthPrivateKey: | + -----BEGIN EC PRIVATE KEY----- + MIGkAgEBBDBAgM63vuS1TMBem4AFn7sCi8+yRFT1ogvNZ9h22+MR66WtfblZ1Lgn + WGJYZyv24pegBwYFK4EEACKhZANiAAQjqSWY/fQeXhAsQl0O97l5OI6/IgQJdL5Z + B8WVwyr6YWojzZLqk47nvrjeTnSXlotyyyGcqrdGkoYxoO4E0smOIbdapfIAqFg/ + g/gl8QlHzIOoq640br3FqDaeXMp0rgY= + -----END EC PRIVATE KEY----- + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 8080 + ## Metrics configuration + ## Prometheus configuration + ## The annotations for prometheus scraping are included + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.port }}" + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## class provides an kubernetes.io/ingress.class override of default nginx + class: "nginx" + ## Annotations to be added to the ingress. + ## + annotations: + nginx.ingress.kubernetes.io/auth-response-headers: Authorization + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite (?i)/api/(.*) /$1 break; + more_clear_input_headers "X-Forwarded-Host" "X-Forwarded-For" "X-Forwarded-Proto" "X-Original-URI" "X-Original-URL"; + + ## Currently templates a "main" container +deployment: + ## Number of replicas. + ## + replicas: 1 + +## Subcharts +## MongoDB configuration +mongodb: + ## Enables a local mongo chart + enabled: false + image: + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## This value overrides the mongo image tag in chart v.4.5.0 + # (tag: 4.0.3-debian-9) + tag: 3.6.12 + + ## disable password for local dev mode + usePassword: false + +## Messaging chart configuration +messaging: + ## Set messaging.enabled to true for localdev and CI builds + enabled: false + nats: + enabled: true + replicaCount: 1 + auth: + enabled: false + clusterAuth: + enabled: false + nats-streaming: + enabled: true + replicaCount: 3 + auth: + enabled: false + +## Redis configuration +redis: + enabled: false + ## Image pull policy for Redis chart + image: + pullPolicy: IfNotPresent + ## Disable password authentication by default (for local development for example) + usePassword: false + ## Disable master-secondary topology by default (for local development for example) + cluster: + enabled: false + ## master node configurations + master: + securityContext: + enabled: false + statefulset: + ## Updating all Pods in a StatefulSet, in reverse ordinal order, while respecting the StatefulSet guarantees + updateStrategy: RollingUpdate + slave: + securityContext: + enabled: false + ## metrics configurations + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + + +# Included if certs are required to be mounted into the pod +certs: + mountPath: "/etc/ssl/certs" diff --git a/qliksense/charts/audit/.helmignore b/qliksense/charts/audit/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/audit/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/audit/Chart.yaml b/qliksense/charts/audit/Chart.yaml new file mode 100644 index 0000000..445da0f --- /dev/null +++ b/qliksense/charts/audit/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +appVersion: 1.16.2 +description: Service to process and persist audit events +home: https://www.qlik.com +keywords: +- resource_contract +name: audit +sources: +- https://github.com/qlik-trial/audit +version: 3.3.5 diff --git a/qliksense/charts/audit/README.md b/qliksense/charts/audit/README.md new file mode 100644 index 0000000..268ac53 --- /dev/null +++ b/qliksense/charts/audit/README.md @@ -0,0 +1,115 @@ +# audit + +[audit](https://github.com/qlik-trial/audit) is the service responsible for the persistence of audit events published through NATS streaming. + +## Introduction + +This chart bootstraps a audit deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/audit +``` + +## Installing the chart locally without external dependencies + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/audit \ + --set configs.tokenAuthEnabled=false \ + --set messaging.enabled=true \ + --set mongodb.enabled=true +``` + +The command deploys audit on the Kubernetes cluster in the default configuration. +The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Installing the chart locally with external dependencies + +Install `edge-auth`, `keys`, `messaging`, `elastic-infra`, `policy-decisions`, `feature-flags` charts. + +To install the chart with the release name `my-release`: + +```console + helm upgrade --install my-release qlik/audit \ + --set configs.data.pdsUri=http://policy-decisions:5080 \ + --set configs.data.featureFlagsUri=http://feature-flags:8080 \ + --set configs.data.keysUri=http://keys:8080/v1/keys/qlik.api.internal \ + --set configs.data.tokenAuthUri=http://edge-auth:8080/v1/internal-tokens \ + --set configs.data.natsUri="nats://messaging-nats-client:4222" \ + --set configs.data.natsStreamingClusterId="messaging-nats-streaming-cluster" \ + --set configs.data.logLevel=debug \ + --set secrets.stringData.mongodbUri=mongodb://elastic-infra-mongodb:27017/audit +``` + +To install the chart with enabled archiving: + +```console + --set configs.archiveEnabled=true \ + --set configs.storageEndpoint=audit-minio:9000 \ + --set configs.storageSsl=false \ + --set configs.data.storageKeyID=AKIAIOSFODNN7EXAMPLE \ + --set configs.data.storageSecretKey=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY \ + --set minio.enabled=true +``` + +The command deploys audit on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the audit chart and their default values. + +| Key | Kind | Default | Description | +| --- | --- | --- | --- | +| logLevel | config | "info" | Log level | +| pdsUri | config | "http://{{ .Release.Name }}-policy-decisions:5080" | Policy-decisions service URL | +| keysUri | config | http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal" | Endpoint to retrieve the JWKS | +| natsUri | config | "nats://{{ .Release.Name }}-nats-client:4222" | Address of NATS server | +| natsStreamingClusterId | config | "{{ .Release.Name }}-nats-streaming-cluster" | NATS Streaming cluster ID | +| tokenAuthUri | config | "http://{{ .Release.Name }}-edge-auth:8080/v1/internal-tokens" | Token auth endpoint | +| ingressAuthUrl | config | http://{.Release.Name}.{.Release.Namespace}.svc.cluster.local:8080/v1/auth | Ingress auth URL | +| featureFlagsUri | config | "http://{{ .Release.Name }}-feature-flags:8080/" | Feature flags service URL | +| mongodbUri | secret | "mongodb://{{ .Release.Name }}-mongodb:27017" | A mongo db connection uri | +| storageEndpoint | config | "{{ .Release.Name }}-minio:9000" | Endpoint to S3 storage provider | +| storageBucket | config | "audit" | Storage bucket name | +| storageRegion | config | "us-east-1" | Storage region | +| storageKeyID | secret | "" | Storage access key ID | +| storageSecretKey | secret | "" | Storage secret access key | +| tokenAuthPrivateKeyId | secret | | Token auth key ID | +| tokenAuthPrivateKey | secret | | Token auth private key | +| terminationGracePerionSeconds | inline | | Number of seconds to wait during pod termination after sending SIGTERM until SIGKILL | +| authEnabled | inline | true | JWT validation using retrieved keys from the configured JWKS endpoint | +| authJwdAud | inline | "qlik.api.internal" | expected `audience` value within the JWT claims | +| authJwtIss | inline | "qlik.api.internal" | expected `issuer` value within the JWT claims | +| tokenAuthEnabled | inline | true | Enables service identity token authentication | +| eventTTL | inline | "-1" | Time-to-live for audits events in database before archiving | +| natsEnabled | inline | true | Toggle to enable NATS / Streaming messaging | +| natsChannels | inline | "system-events.*" | List of system events channel to subscribe to | +| archiveEnabled | inline | false | Toggle to enable archiving | +| archiveInterval | inline | "3h" | Repeat interval for validating and archiving the data | +| archiveRetryAfter | inline | "30m" | A timeout for the next attempt after the failed archive operation | +| storageSsl | inline | true | Toggle to use secured connection | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install --name my-release -f values.yaml qlik/audit +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/audit/charts/messaging/Chart.yaml b/qliksense/charts/audit/charts/messaging/Chart.yaml new file mode 100644 index 0000000..8ba51d6 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +description: | + Messaging system services. NATS and NATS Streaming are supported. Other services can communicate with each other and orchestrate their works using the services provided by this chart. +home: https://www.qlik.com +keywords: +- messaging +- queue +- nats +- nats-streaming +name: messaging +sources: +- https://github.com/nats-io/gnatsd +- https://github.com/nats-io/nats-streaming-server +- https://github.com/helm/charts/tree/master/stable/nats +- https://github.com/nats-io/prometheus-nats-exporter +- https://github.com/qlik-trial/nats-prom-exporter +version: 1.3.0 diff --git a/qliksense/charts/audit/charts/messaging/README.md b/qliksense/charts/audit/charts/messaging/README.md new file mode 100644 index 0000000..7659991 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/README.md @@ -0,0 +1,241 @@ +# messaging + +This charts provides **messaging system** (a.k.a. message queue, message bus, etc.) capabilities for services. +Currently, [NATS](https://www.nats.io) and [NATS Streaming](https://nats.io/documentation/streaming/nats-streaming-intro/) +are included in this chart, but in the future, other message systems like RabbitMQ can also be added. + +## Installing the Chart + +To install the chart with the release name `messaging`: + +```console +helm install --name messaging qlik/messaging +``` + +## Uninstalling the Chart + +To uninstall/delete the `messaging` deployment: + +```console +helm delete messaging +``` + +## Configuration + +### NATS + +| Parameter | Description | Default | +| --------------------------------- | ------------------------------------------- | ------------------------------------- | +| `nats.enabled` | enable NATS messaging system | `true` | +| `nats.image.registry` | NATS image registry | `qliktech-docker-snapshot.jfrog.io` | +| `nats.image.repository` | NATS Image name | `qnatsd` | +| `nats.image.tag` | NATS Image tag | `0.2.3` | +| `nats.image.pullPolicy` | pull policy for nats docker image | `IfNotPresent` | +| `nats.image.pullSecrets` | specify image pull secrets | `artifactory-registry-secret` | +| `nats.replicaCount` | number of nats replicas | `1` | +| `nats.antiAffinity` | anti-affinity for nats pod assignment | `soft` | +| `nats.auth.enabled` | enable authentication for nats clients | `true` | +| `nats.auth.user` | username for nats client authentication | `nats_client` | +| `nats.auth.password` | password for nats client authentication | `T0pS3cr3t` | +| `nats.auth.jwtUsers` | array of jwt authenticated users | See [Authentication](#authentication) | +| `nats.clusterAuth.enabled` | enable authentication for nats clustering | `false` | +| `nats.clusterAuth.user` | username for nats clustering authentication | `nats_cluster` | +| `nats.clusterAuth.password` | password for nats clustering authentication | random string | +| `nats.statefulset.updateStrategy` | update strategy for nats statefulsets | `onDelete` | +| `nats.client.service.type` | nats-client service type | `ClusterIP` | +| `nats.client.service.port` | nats-client service port | `4222` | +| `nats.cluster.service.type` | nats-cluster service type | `ClusterIP` | +| `nats.cluster.service.port` | nats-cluster service port | `6222` | +| `nats.monitoring.service.type` | nats-monitoring service type | `ClusterIP` | +| `nats.monitoring.service.port` | nats-monitoring service port | `8222` | +| `nats.livenessProbe.enabled` | enable liveness probe | `true` | +| `nats.readinessProbe.enabled` | enable readiness probe | `true` | +| `nats.resources` | CPU and memory requests and limits for nats | `{}` | +| `extraArgs` | Optional flags for NATS | See [values.yaml](./values.yaml) | + +### NATS Streaming + +| Parameter | Description | Default | +| ------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- | +| `nats-streaming.enabled` | enable NATS messaging system | `false` | +| `nats-streaming.image.pullPolicy` | pull policy for nats docker image | `IfNotPresent` | +| `nats-streaming.image.pullSecrets` | specify image pull secrets | `artifactory-registry-secret` | +| `nats-streaming.replicaCount` | number of nats replicas | `3` | +| `nats-streaming.antiAffinity` | anti-affinity for nats pod assignment | `soft` | +| `nats-streaming.auth.enabled` | enable authentication for nats clients | `true` | +| `nats-streaming.auth.user` | username for nats client authentication | `nats_client` | +| `nats-streaming.auth.password` | password for nats client authentication | `nil` (Uses Secret below for password) | +| `nats-streaming.auth.secretName` | secretName for nats client authentication | `{{ .Release.Name }}-nats-secret` | +| `nats-streaming.auth.secretKey` | secretKey for nats client authentication | `client-password` | +| `nats-streaming.statefulset.updateStrategy` | update strategy for nats statefulsets | `onDelete` | +| `nats-streaming.monitoring.service.type` | nats-streaming-monitoring service type | `ClusterIP` | +| `nats-streaming.monitoring.service.port` | nats-streaming-monitoring service port | `8222` | +| `nats-streaming.livenessProbe.enabled` | enable liveness probe | `true` | +| `nats-streaming.readinessProbe.enabled` | enable readiness probe | `true` | +| `nats-streaming.resources` | CPU and memory requests and limits for nats | `{}` | +| `nats-streaming.clusterID` | nats streaming cluster name id | `{{ .Release.Name }}-nats-streaming-cluster` | +| `nats-streaming.natsSvc` | external nats server url | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats-streaming.hbInterval` | Interval at which server sends heartbeat to a client | `10s` | +| `nats-streaming.hbTimeout` | How long server waits for a heartbeat response | `10s` | +| `nats-streaming.hbFailCount` | Number of failed heartbeats before server closes the client connection | `5` | +| `nats-streaming.persistence.volume.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `nats-streaming.persistence.volume.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `nats-streaming.persistence.volume.size` | Persistence volume size | `nil` | +| `nats-streaming.persistence.volume.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `nats-streaming.persistence.volume.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +### Network Policy for NATS and NATS Streaming + +| Parameter | Description | Default | +| -------------------------------------- | ---------------------------------------------------------------- | --------------------- | +| `networkPolicy.nats.enabled` | enable custom network policy for NATS messaging system | `false` | +| `networkPolicy.nats-streaming.enabled` | enable custom network policy for NATS Streaming messaging system | `false` | +| `networkPolicy.keys.release` | keys service release name for egress rules | `{{ .Release.Name }}` | + +## Requirements + +### Network Plugin to enable Network Policies in Kubernetes cluster + +This chart include options to enable [Network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) for the created +`nats` and `nats-streaming` clusters. + +Network policies are implemented by the network plugin, so the Kubernetes cluster must be configured with a networking solution which supports NetworkPolicy - +simply creating the resource without a controller to implement it will have no effect. + +For local development, please refer to [Setting Up a Minikube Cluster - Configuring Network Plugin to support Network Policies](https://github.com/qlik-trial/elastic-charts/blob/master/docs/prerequisites/minikube-cluster.md#configuring-network-plugin-to-support-network-policies) +for detailed instructions. + +### Secrets + +For deploying this chart to **stage**/**prod**, you need the following secrets written to **vault**. + +*The passwords should not start with a number!* + +| Secret | Key | Purpose | +| -------------------------------------------------------------- | ------- | ----------------------------------- | +| `/secret/{environment}/messaging/{region}/natsClientPassword` | `value` | password for client authentication | +| `/secret/{environment}/messaging/{region}/natsClusterPassword` | `value` | password for cluster authentication | + +## Connecting to NATS / NATS Streaming + +### From the command line: +#### Port-forward NATS Client Service: +```sh + > kubectl port-forward messaging-nats-0 4222 +``` +#### Connect via `telnet`: +```sh + > telnet localhost 4222 +``` +#### Connect with no auth: +```sh + CONNECT {} +``` +#### Connect with auth: +```sh + CONNECT {"user":"my-user","pass":"T0pS3cr3t"} +``` +#### Subscribing to channel, publishing to a channel, and receiving the published message: +```sh + SUB foo 1 + +OK + PUB foo 11 + Hello World + +OK + MSG foo 1 11 + Hello World +``` + +### Using [go-nats](https://github.com/nats-io/go-nats/) and [go-nats-streaming](https://github.com/nats-io/go-nats-streaming) clients: +```golang +package main + +import ( + "log" + + "github.com/nats-io/go-nats" + "github.com/nats-io/go-nats-streaming" +) + +func main() { + nc, err := nats.Connect("nats://nats_client:asdf@localhost:4222") + if err != nil { + log.Fatal(err) + } + sc, err := stan.Connect("messaging-nats-streaming-cluster", "client-123", stan.NatsConn(nc)) + if err != nil { + log.Fatal(err) + } + sc.Publish("hello", []byte("msg1")) + + sc.Subscribe("hello", func(m *stan.Msg) { + log.Printf("[Received] %+v", m) + }, stan.StartWithLastReceived()) + + sc.Publish("hello", []byte("msg2")) + + select{} +} +``` + +### With Network Policies enabled + +To connect to `NATS` as a client with Network Policies enabled , the pod in which the service client is in must have the label +`{{ .Release.Name }}-nats-client=true`. + +Otherwise, if enabled, the `ingress` `Network Policy` for `NATS` will block incoming traffic from any pod without the appropriate label. + +`Network Policy` is enabled in `stage` and `production` environments. + +## Authentication + +It's important to know that when using NATS Streaming, a NATS connection is also required and that it is the NATS connection that handles authentication and authorization not the NATS Streaming connnection. + +### JWT Authentication + +NATS has been configured to allow authentication using service-to-service(S2S) JWTs, but in order to be authenticated, a service must be whitelisted. +The `nats.auth.jwtUsers` value can be used to provide a whitelist of users that should be authenticated using a S2S JWT. +**Note** when using a S2S JWT both the NATS username and JWT `subject` must match + +Adding a new service to the whitelist is as simple as updating `nats.auth.jwtUsers` value as such: +```yaml +nats: + auth: + jwtUsers: + - user: "my-service" + - user: "my-service"` + ...etc +``` + +### Authorization + +The above method of adding a JWT authentication whitelist also allows for setting authorization rules. +NATS [authorization rules](https://nats.io/documentation/managing_the_server/authorization/) can be configured on a per subject basis. + +The following is an example of adding publish/subscribe authorization rules +```yaml +nats: + auth: + jwtUsers: + - user: "my-service" + stanPermissions: + publish: + - "events.mysubject.>" # service can publish to any subject that starts with `events.mysubject.` + - "system-events.mysubject" # service can publish to `system-events.mysubject` subject + subscribe: + - "events.somesubject" # service can subscribe `events.somesubject` subject + natsPermissions: + publish: + - "events.mysubject1" # service can publish to `events.mysubject1` subject + subscribe: + - "events.somesubject1" # service can subscribe `events.somesubject1` subject +``` +Wildcard support works as follow: + +The dot character `.` is the token separator. + +The asterisk character `*` is a token wildcard match. +`e.g foo.* matches foo.bar, foo.baz, but not foo.bar.baz.` + +The greater-than symbol `>` is a full wildcard match. +`e.g. foo.> matches foo.bar, foo.baz, foo.bar.baz, foo.bar.1, etc.` diff --git a/qliksense/charts/audit/charts/messaging/charts/nats-streaming/Chart.yaml b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/Chart.yaml new file mode 100644 index 0000000..77e79e4 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +appVersion: 0.6.0 +description: A NATS Streaming cluster setup +home: https://nats.io/ +keywords: +- NATS +- Messaging +- publish +- subscribe +- streaming +- cluster +- persistence +name: nats-streaming +version: 0.2.0 diff --git a/qliksense/charts/audit/charts/messaging/charts/nats-streaming/README.md b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/README.md new file mode 100644 index 0000000..c88074b --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/README.md @@ -0,0 +1,133 @@ +# NATS Streaming Clustering Helm Chart + +Sets up a [NATS](http://nats.io/) Streaming server cluster with Raft based replication. + +## Getting started + +This chart relies on an already available NATS Service to which the +NATS Streaming nodes that will form a clusters can connect to. +You can install the NATS Operator and then use it to create a NATS cluster +via the following: + +```console +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/example-nats-cluster.yaml +``` + +This will create a NATS cluster on the `nats-io` namespace. Then, to +install a NATS Streaming cluster the URL to the NATS cluster can be +specified as follows (using `my-release` for a name label for the +cluster): + +```console +$ git clone https://github.com/wallyqs/nats-streaming-cluster-chart nats-streaming-cluster +$ helm install nats-streaming-cluster -n my-release --set natsUrl=nats://nats.nats-io.svc.cluster.local:4222 +``` + +This will create 3 follower nodes plus an extra Pod which is +configured to be in bootstrapping mode, which will start as the leader +of the Raft group as soon as it joins. + +```console +$ kubectl get pods --namespace default -l "app=nats-streaming-cluster,release=my-release" +NAME READY STATUS RESTARTS AGE +my-release-nats-streaming-cluster-0 1/1 Running 0 30s +my-release-nats-streaming-cluster-1 1/1 Running 0 23s +my-release-nats-streaming-cluster-2 1/1 Running 0 17s +my-release-nats-streaming-cluster-bootstrap 1/1 Running 0 30s +``` + +Note that in case the bootstrapping Pod fails then it will not be +recreated and instead one of the extra follower Pods will take over +the leadership. The follower Pods are part of a Deployment so those +in case of failure they will be recreated. + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | -------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS Streaming image registry | `docker.io` | +| `image.repository` | NATS Streaming Image name | `nats-streaming` | +| `image.tag` | NATS Streaming Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.secretName` | Client authentication secret name | `nil` | +| `auth.secretKey` | Client authentication secret key | `nil` | +| `clusterID` | NATS Streaming Cluster Name ID | `"test-cluster"` | +| `natsSvc` | External NATS Server URL | `"nats://username:password@nats:4222"` | +| `maxChannels` | Max # of channels | `100` | +| `maxSubs` | Max # of subscriptions per channel | `1000` | +| `maxMsgs` | Max # of messages per channel | `"1000000"` | +| `maxBytes` | Max messages total size per channel | `900mb` | +| `maxAge` | Max duration a message can be stored | `"0s" (unlimited)` | +| `maxMsgs` | Max # of messages per channel | `1000000` | +| `debug` | Enable debugging | `false` | +| `trace` | Enable detailed tracing | `false` | +| `replicaCount` | Number of NATS Streaming nodes | `3` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `RollingUpdate` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `monitoring.service.type` | Kubernetes Service type (NATS Streaming monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS Streaming monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS Streaming monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS Streaming monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS Streaming monitoring service type is `LoadBalancer` | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `sidecars` | Attach additional containers to the pod. | `nil` | + +### File Specific Persistence Configuration + +| Parameter | Description | Default | +| --------------------------------- | ---------------------------------- | ---------------- | +| `persistence.file.compactEnabled` | Enable compaction | true | +| `persistence.file.bufferSize` | File buffer size (in bytes) | "2097152" (2MB) | +| `persistence.file.crc` | Enable file CRC-32 checksum | true | +| `persistence.file.sync` | Enable File.Sync on Flush | true | +| `persistence.file.fdsLimit` | Max File Descriptor limit (approx) | 0 (unlimited) | + +### Storage Specific Persistence Configuration + +| Parameter | Description | Default | +| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `persistence.volume.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `persistence.volume.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `persistence.volume.size` | Persistence volume size | `nil` | +| `persistence.volume.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `persistence.volume.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +*Additional configuration parameters not typically used may be found in [values.yaml](values.yaml).* diff --git a/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/NOTES.txt b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/NOTES.txt new file mode 100644 index 0000000..cc0eaa0 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/NOTES.txt @@ -0,0 +1,25 @@ +NATS Streaming server has been installed. + +Externally monitor the NATS streaming server by running these commands. + +{{- if contains "NodePort" .Values.monitoring.service.type }} + +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats-streaming-cluster.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + +echo http://$NODE_IP:$NODE_PORT/streaming + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "nats-streaming-cluster.fullname" . }}' +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats-streaming-cluster.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +echo http://$SERVICE_IP:{{ .Values.monitoring.service.port }}/streaming + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + +export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats-streaming-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 8222:{{ .Values.monitoring.service.port }} +echo "Visit http://127.0.0.1:8222/streaming to monitor the NATS streaming server" + +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/_helpers.tpl b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/_helpers.tpl new file mode 100644 index 0000000..81001e5 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nats-streaming-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats-streaming-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper NATS Streaming image name +*/}} +{{- define "nats-streaming-cluster.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a NATS Streaming cluster peers list string based on fullname, namespace, # of servers +in a format like "host-1,host-2,host-3" +*/}} +{{- define "nats-streaming-cluster.peers" -}} +{{- $name := include "nats-streaming-cluster.fullname" . -}} +{{- $nats_streaming_cluster := dict "peers" (list) -}} +{{- range $idx, $v := until (int .Values.replicaCount) }} +{{- $noop := printf "%s-%d" $name $idx | append $nats_streaming_cluster.peers | set $nats_streaming_cluster "peers" -}} +{{- end }} +{{- printf "%s" (join "," $nats_streaming_cluster.peers) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml new file mode 100644 index 0000000..2950f19 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }}-monitoring + labels: + app: {{ template "nats-streaming-cluster.name" . }} + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - name: monitoring + port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml new file mode 100644 index 0000000..b642078 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/sc.yaml b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/sc.yaml new file mode 100644 index 0000000..75b0519 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/sc.yaml @@ -0,0 +1,7 @@ +{{- if .Values.persistence.volume.internalStorageClass.enabled -}} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.persistence.volume.storageClass }} +{{ toYaml .Values.persistence.volume.internalStorageClass.definition }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/statefulset.yaml b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/statefulset.yaml new file mode 100644 index 0000000..3a9f5b2 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/templates/statefulset.yaml @@ -0,0 +1,247 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats-streaming-cluster.name" . }} + image: {{ template "nats-streaming-cluster.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.auth.enabled }} + - name: USER + value: {{ .Values.auth.user }} + {{- if .Values.auth.token }} + - name: AUTH + value: {{ .Values.auth.token }} + {{- else }} + - name: PASSWORD + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretKey }} + {{- else }} + value: {{ .Values.auth.password }} + {{- end }} + {{- end }} + {{- end }} + args: [ + "-clustered", + "-cid", "{{ tpl (.Values.clusterID) . }}", + "-m", "{{ .Values.monitoring.service.port }}", + "-ns", "{{ tpl (.Values.natsSvc) . }}", + "-mc", "{{ .Values.maxChannels }}", + "-msu", "{{ .Values.maxSubs }}", + "-mm", "{{ .Values.maxMsgs }}", + "-mb", "{{ .Values.maxBytes }}", + "-ma", "{{ .Values.maxAge }}", + "-hbi", "{{ .Values.hbInterval }}", + "-hbt", "{{ .Values.hbTimeout }}", + "-hbf", "{{ .Values.hbFailCount }}", + + "--cluster_node_id", "$(POD_NAME)", + "--cluster_peers", "{{ template "nats-streaming-cluster.peers" . }}", + "--store", "file", + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/data", + "--cluster_log_path", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/raft", + {{- if .Values.cluster_raft_logging }} + "--cluster_raft_logging", + {{- end }} + {{- if .Values.persistence.file.compactEnabled }} + "--file_compact_enabled", + "--file_compact_frag", "{{ .Values.persistence.file.compactFrag }}", + "--file_compact_interval", "{{ .Values.persistence.file.compactInterval }}", + "--file_compact_min_size", "{{ .Values.persistence.file.compactMinSize }}", + {{- end }} + "--file_buffer_size", "{{ .Values.persistence.file.bufferSize }}", + {{- if .Values.persistence.file.crc }} + "--file_crc", + "--file_crc_poly", "{{ .Values.persistence.file.crcPoly }}", + {{- end }} + {{- if .Values.persistence.file.sync }} + "--file_sync", + {{- end }} + "--file_slice_max_msgs", "{{ .Values.persistence.file.sliceMaxMsgs }}", + "--file_slice_max_bytes", "{{ .Values.persistence.file.sliceMaxBytes }}", + "--file_slice_max_age", "{{ .Values.persistence.file.sliceMaxAge }}", + {{- if ne .Values.persistence.file.sliceArchiveScript "" }} + "--file_slice_archive_script", "{{ .Values.persistence.file.sliceArchiveScript }}", + {{- end }} + "--file_fds_limit", "{{ .Values.persistence.file.fdsLimit }}", + "--file_parallel_recovery", "{{ .Values.persistence.file.parallelRecovery }}", + + {{- if .Values.auth.enabled }} + "--user", "$(USER)", + {{- if .Values.auth.token }} + "--auth", "$(AUTH)", + {{- else }} + "--pass", "$(PASSWORD)", + {{- end }} + {{- end }} + + {{- if .Values.debug }} + "-SD", + {{- end }} + {{- if .Values.trace }} + "-SV", + {{- end }} + ] + ports: + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + volumeMounts: + - name: datadir + mountPath: /nats + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + {{ if not .Values.persistence.volume.enabled }} + - name: datadir + emptyDir: {} + {{- end }} + {{- if .Values.persistence.volume.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: "{{ .Values.persistence.volume.size }}" + {{- if .Values.persistence.volume.storageClass }} + {{- if (eq "-" .Values.persistence.volume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.volume.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats-streaming/values.yaml b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/values.yaml new file mode 100644 index 0000000..f7920f3 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats-streaming/values.yaml @@ -0,0 +1,290 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## NATS Streaming image +## +image: + registry: docker.io + repository: nats-streaming + tag: 0.11.2 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS Streaming replicas +replicaCount: 3 + +# +# Streaming server configuration +# + +# Cluster ID (default: test-cluster) +clusterID: "test-cluster" + +# Connect to this external NATS Server URL +natsSvc: "nats://nats:4222" + +# Max number of channels (0 for unlimited) +maxChannels: 100 + +# Max number of subscriptions per channel (0 for unlimited) +maxSubs: 1000 + +# Max number of messages per channel (0 for unlimited) +maxMsgs: "1000000" + +# Max messages total size per channel (0 for unlimited) +maxBytes: "900mb" + +# Max duration a message can be stored ("0s" for unlimited) +maxAge: 0 + +# +# ADVANCED CONFIGURATION: Change these with caution. +# + +# Interval at which server sends heartbeat to a client +hbInterval: 30s + +# How long server waits for a heartbeat response +hbTimeout: 10s + +# Number of failed heartbeats before server closes the client connection +hbFailCount: 330 + +# Use for general debugging. Enabling this will negatively affect performance. +debug: false + +# Do not normally set this as it will drastically decrease performance and generate +# volumous logs. +trace: false + +# Use for raft related debugging +cluster_raft_logging: false + +persistence: + file: + + # Enable compaction + compactEnabled: true + + # Enable file CRC-32 checksum + crc: true + + # Enable File.Sync on Flush + sync: true + + # Store will try to use no more file descriptors than this given limit + fdsLimit: 0 + + ## + ## ADVANCED CONFIGURATION: Change these with caution. + ## + # File buffer size (in bytes) + bufferSize: "2097152" + + # File fragmentation % threshold for compaction + compactFrag: 50 + + # Minimum interval (in seconds) between file compactions, 5min + compactInterval: 300 + + # Minimum file size for compaction + compactMinSize: "1048576" + + # Polynomial used to make the table used for CRC-32 checksum (default is crc32.IEEE) + crcPoly : "3988292384" + + # Maximum number of messages per file slice (subject to channel limits) + sliceMaxMsgs: 0 + + # Maximum file slice size - including index file (subject to channel limits) 64MB + sliceMaxBytes: "67108931" + + # Maximum file slice duration starting when the first message is stored (subject to channel limits) + sliceMaxAge: 0 + + # Path to script to use if you want to archive a file slice being removed + sliceArchiveScript: "" + + # On startup, number of channels that can be recovered in parallel + parallelRecovery: 1 + + volume: + # If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + internalStorageClass: + enabled: false + definition: {} + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +## NATS Streaming Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + ## Annotations for Promethues metrics + # prometheus.io/scrape: "true" + # prometheus.io/port: "7777" + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/nats-streaming-server#authorization +## +auth: + enabled: false + # user: nats_client + # password: + # token: + # secretName: "{{ .Release.Name }}-nats-secret" + # secretKey: "client-password" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + +## NATS Streaming svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + # TODO: Uncomment these args once synadia/prometheus-nats-exporter adds NATS Streaming support + # - -serverz + # - -storez + # - -clientz + # - -channelz + +## Sidecars +sidecars: + ## e.g. + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/Chart.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/Chart.yaml new file mode 100644 index 0000000..8d73464 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +appVersion: 1.3.0 +description: An open-source, cloud-native messaging system +home: https://nats.io/ +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png +keywords: +- nats +- messaging +- addressing +- discovery +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: nats +sources: +- https://github.com/bitnami/bitnami-docker-nats +version: 2.1.0 diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/README.md b/qliksense/charts/audit/charts/messaging/charts/nats/README.md new file mode 100644 index 0000000..454b795 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/README.md @@ -0,0 +1,191 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `OnDelete` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `extraArgs` | Optional flags for NATS | `[]` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `client.service.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `client.service.port` | NATS client port | `4222` | +| `client.service.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `client.service.annotations` | Annotations for NATS client service | {} | +| `client.service.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `cluster.service.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `cluster.service.port` | NATS cluster port | `6222` | +| `cluster.service.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `cluster.service.annotations` | Annotations for NATS cluster service | {} | +| `cluster.service.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `cluster.noAdvertise` | Do not advertise known cluster IPs to clients | `false` | +| `monitoring.service.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | +| `metrics.enabled` | Enable Prometheus metrics via exporter side-car | `false` | +| `metrics.image.registry` | Prometheus metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Prometheus metrics exporter image name | `synadia/prometheus-nats-exporter` | +| `metrics.image.tag` | Prometheus metrics exporter image tag | `0.1.0` | +| `metrics.image.pullPolicy` | Prometheus metrics image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Prometheus metrics image pull secrets | `nil` | +| `metrics.port` | Prometheus metrics exporter port | `7777` | +| `metrics.podAnnotations` | Prometheus metrics exporter annotations | `prometheus.io/scrape: "true"`, `prometheus.io/port: "7777"` | +| `metrics.resources` | Prometheus metrics exporter resource requests/limit | {} | +| `sidecars` | Attach additional containers to the pod | `nil` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Sidecars + +If you have a need for additional containers to run within the same pod as NATS (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` + +## Upgrading + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is nats: + +```console +$ kubectl delete statefulset nats-nats --cascade=false +``` diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/NOTES.txt b/qliksense/charts/audit/charts/messaging/charts/nats/templates/NOTES.txt new file mode 100644 index 0000000..224df8f --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/NOTES.txt @@ -0,0 +1,88 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.client.service.type "LoadBalancer") (contains .Values.client.service.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "client.service.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.client.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoring.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoring.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "Monitoring URL: http://$SERVICE_IP/" + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoring.service.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "nats.fullname" . }}-monitoring {{ .Values.monitoring.service.port }}:{{ .Values.monitoring.service.port }} +{{- end }} + +2. Access NATS monitoring by opening the URL obtained in a browser. +{{- end }} + +{{- if .Values.metrics.enabled }} + +3. Get the NATS Prometheus Metrics URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +4. Access NATS Prometheus metrics by opening the URL obtained in a browser. +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/_helpers.tpl b/qliksense/charts/audit/charts/messaging/charts/nats/templates/_helpers.tpl new file mode 100644 index 0000000..ef93757 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Nats image name +*/}} +{{- define "nats.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random alphanumeric password string. +We prepend a random letter to the string to avoid password validation errors +*/}} +{{- define "nats.randomPassword" -}} +{{- randAlpha 1 -}}{{- randAlphaNum 9 -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/client-svc.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/templates/client-svc.yaml new file mode 100644 index 0000000..aacab44 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.client.service.annotations }} + annotations: +{{ toYaml .Values.client.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.client.service.type }} + {{- if and (eq .Values.client.service.type "LoadBalancer") .Values.client.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.client.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.client.service.port }} + targetPort: client + name: client + {{- if and (eq .Values.client.service.type "NodePort") (not (empty .Values.client.service.nodePort)) }} + nodePort: {{ .Values.client.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/cluster-svc.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000..b48c791 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.cluster.service.annotations }} + annotations: +{{ toYaml .Values.cluster.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.cluster.service.type }} + {{- if and (eq .Values.cluster.service.type "LoadBalancer") .Values.cluster.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.cluster.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.cluster.service.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.cluster.service.type "NodePort") (not (empty .Values.cluster.service.nodePort)) }} + nodePort: {{ .Values.cluster.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/configmap.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/templates/configmap.yaml new file mode 100644 index 0000000..3566286 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/configmap.yaml @@ -0,0 +1,87 @@ +{{- $authPwd := default (include "nats.randomPassword" .) .Values.auth.password -}} +{{- $clusterAuthPwd := default (include "nats.randomPassword" .) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.client.service.port }} + http: 0.0.0.0:{{ .Values.monitoring.service.port }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.cluster.service.port }} + + noAdvertise: {{ .Values.cluster.noAdvertise }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + ] + } + users.json: {{ if .Values.auth.enabled }}{{ toJson .Values.auth.jwtUsers | quote }}{{else}}"[]"{{ end }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/headless-svc.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/templates/headless-svc.yaml new file mode 100644 index 0000000..d340551 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/headless-svc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-headless + labels: + app: {{ template "nats.name" . }} + chart: {{ template "nats.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: client + port: 4222 + targetPort: client + - name: cluster + port: 6222 + targetPort: cluster + selector: + app: {{ template "nats.name" . }} + release: {{ .Release.Name | quote }} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/ingress.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/templates/ingress.yaml new file mode 100644 index 0000000..eec9749 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/monitoring-svc.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000..f8f4081 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/networkpolicy.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000..12032f8 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.client.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.cluster.service.port }} + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/statefulset.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/templates/statefulset.yaml new file mode 100644 index 0000000..a4a8283 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/statefulset.yaml @@ -0,0 +1,160 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + serviceName: {{ template "nats.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} +{{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - qnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + {{- range .Values.extraArgs }} + - {{ tpl (.) $ }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.client.service.port }} + - name: cluster + containerPort: {{ .Values.cluster.service.port }} + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/templates/tls-secret.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/templates/tls-secret.yaml new file mode 100644 index 0000000..5acf441 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/charts/nats/values.yaml b/qliksense/charts/audit/charts/messaging/charts/nats/values.yaml new file mode 100644 index 0000000..604d117 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/charts/nats/values.yaml @@ -0,0 +1,302 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.3.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: false + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +client: + service: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +cluster: + service: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: + + ## Do not advertise known cluster IPs to clients + ## + noAdvertise: false + +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: + +# Optional additional arguments +extraArgs: [] + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +sidecars: +## Add sidecars to the pod. +## e.g. +# - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/audit/charts/messaging/nats-streaming/Chart.yaml b/qliksense/charts/audit/charts/messaging/nats-streaming/Chart.yaml new file mode 100644 index 0000000..97d2db3 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats-streaming/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +description: A NATS Streaming cluster setup +name: nats-streaming +version: 0.2.0 +appVersion: 0.6.0 +keywords: +- NATS +- Messaging +- publish +- subscribe +- streaming +- cluster +- persistence +home: https://nats.io/ diff --git a/qliksense/charts/audit/charts/messaging/nats-streaming/README.md b/qliksense/charts/audit/charts/messaging/nats-streaming/README.md new file mode 100644 index 0000000..c88074b --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats-streaming/README.md @@ -0,0 +1,133 @@ +# NATS Streaming Clustering Helm Chart + +Sets up a [NATS](http://nats.io/) Streaming server cluster with Raft based replication. + +## Getting started + +This chart relies on an already available NATS Service to which the +NATS Streaming nodes that will form a clusters can connect to. +You can install the NATS Operator and then use it to create a NATS cluster +via the following: + +```console +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/example-nats-cluster.yaml +``` + +This will create a NATS cluster on the `nats-io` namespace. Then, to +install a NATS Streaming cluster the URL to the NATS cluster can be +specified as follows (using `my-release` for a name label for the +cluster): + +```console +$ git clone https://github.com/wallyqs/nats-streaming-cluster-chart nats-streaming-cluster +$ helm install nats-streaming-cluster -n my-release --set natsUrl=nats://nats.nats-io.svc.cluster.local:4222 +``` + +This will create 3 follower nodes plus an extra Pod which is +configured to be in bootstrapping mode, which will start as the leader +of the Raft group as soon as it joins. + +```console +$ kubectl get pods --namespace default -l "app=nats-streaming-cluster,release=my-release" +NAME READY STATUS RESTARTS AGE +my-release-nats-streaming-cluster-0 1/1 Running 0 30s +my-release-nats-streaming-cluster-1 1/1 Running 0 23s +my-release-nats-streaming-cluster-2 1/1 Running 0 17s +my-release-nats-streaming-cluster-bootstrap 1/1 Running 0 30s +``` + +Note that in case the bootstrapping Pod fails then it will not be +recreated and instead one of the extra follower Pods will take over +the leadership. The follower Pods are part of a Deployment so those +in case of failure they will be recreated. + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | -------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS Streaming image registry | `docker.io` | +| `image.repository` | NATS Streaming Image name | `nats-streaming` | +| `image.tag` | NATS Streaming Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.secretName` | Client authentication secret name | `nil` | +| `auth.secretKey` | Client authentication secret key | `nil` | +| `clusterID` | NATS Streaming Cluster Name ID | `"test-cluster"` | +| `natsSvc` | External NATS Server URL | `"nats://username:password@nats:4222"` | +| `maxChannels` | Max # of channels | `100` | +| `maxSubs` | Max # of subscriptions per channel | `1000` | +| `maxMsgs` | Max # of messages per channel | `"1000000"` | +| `maxBytes` | Max messages total size per channel | `900mb` | +| `maxAge` | Max duration a message can be stored | `"0s" (unlimited)` | +| `maxMsgs` | Max # of messages per channel | `1000000` | +| `debug` | Enable debugging | `false` | +| `trace` | Enable detailed tracing | `false` | +| `replicaCount` | Number of NATS Streaming nodes | `3` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `RollingUpdate` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `monitoring.service.type` | Kubernetes Service type (NATS Streaming monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS Streaming monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS Streaming monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS Streaming monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS Streaming monitoring service type is `LoadBalancer` | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `sidecars` | Attach additional containers to the pod. | `nil` | + +### File Specific Persistence Configuration + +| Parameter | Description | Default | +| --------------------------------- | ---------------------------------- | ---------------- | +| `persistence.file.compactEnabled` | Enable compaction | true | +| `persistence.file.bufferSize` | File buffer size (in bytes) | "2097152" (2MB) | +| `persistence.file.crc` | Enable file CRC-32 checksum | true | +| `persistence.file.sync` | Enable File.Sync on Flush | true | +| `persistence.file.fdsLimit` | Max File Descriptor limit (approx) | 0 (unlimited) | + +### Storage Specific Persistence Configuration + +| Parameter | Description | Default | +| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `persistence.volume.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `persistence.volume.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `persistence.volume.size` | Persistence volume size | `nil` | +| `persistence.volume.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `persistence.volume.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +*Additional configuration parameters not typically used may be found in [values.yaml](values.yaml).* diff --git a/qliksense/charts/audit/charts/messaging/nats-streaming/templates/NOTES.txt b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/NOTES.txt new file mode 100644 index 0000000..cc0eaa0 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/NOTES.txt @@ -0,0 +1,25 @@ +NATS Streaming server has been installed. + +Externally monitor the NATS streaming server by running these commands. + +{{- if contains "NodePort" .Values.monitoring.service.type }} + +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats-streaming-cluster.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + +echo http://$NODE_IP:$NODE_PORT/streaming + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "nats-streaming-cluster.fullname" . }}' +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats-streaming-cluster.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +echo http://$SERVICE_IP:{{ .Values.monitoring.service.port }}/streaming + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + +export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats-streaming-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 8222:{{ .Values.monitoring.service.port }} +echo "Visit http://127.0.0.1:8222/streaming to monitor the NATS streaming server" + +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/nats-streaming/templates/_helpers.tpl b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/_helpers.tpl new file mode 100644 index 0000000..81001e5 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nats-streaming-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats-streaming-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper NATS Streaming image name +*/}} +{{- define "nats-streaming-cluster.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a NATS Streaming cluster peers list string based on fullname, namespace, # of servers +in a format like "host-1,host-2,host-3" +*/}} +{{- define "nats-streaming-cluster.peers" -}} +{{- $name := include "nats-streaming-cluster.fullname" . -}} +{{- $nats_streaming_cluster := dict "peers" (list) -}} +{{- range $idx, $v := until (int .Values.replicaCount) }} +{{- $noop := printf "%s-%d" $name $idx | append $nats_streaming_cluster.peers | set $nats_streaming_cluster "peers" -}} +{{- end }} +{{- printf "%s" (join "," $nats_streaming_cluster.peers) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/messaging/nats-streaming/templates/monitoring-svc.yaml b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/monitoring-svc.yaml new file mode 100644 index 0000000..2950f19 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }}-monitoring + labels: + app: {{ template "nats-streaming-cluster.name" . }} + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - name: monitoring + port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/audit/charts/messaging/nats-streaming/templates/networkpolicy.yaml b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/networkpolicy.yaml new file mode 100644 index 0000000..b642078 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/networkpolicy.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/nats-streaming/templates/sc.yaml b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/sc.yaml new file mode 100644 index 0000000..75b0519 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/sc.yaml @@ -0,0 +1,7 @@ +{{- if .Values.persistence.volume.internalStorageClass.enabled -}} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.persistence.volume.storageClass }} +{{ toYaml .Values.persistence.volume.internalStorageClass.definition }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/nats-streaming/templates/statefulset.yaml b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/statefulset.yaml new file mode 100644 index 0000000..3a9f5b2 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats-streaming/templates/statefulset.yaml @@ -0,0 +1,247 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats-streaming-cluster.name" . }} + image: {{ template "nats-streaming-cluster.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.auth.enabled }} + - name: USER + value: {{ .Values.auth.user }} + {{- if .Values.auth.token }} + - name: AUTH + value: {{ .Values.auth.token }} + {{- else }} + - name: PASSWORD + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretKey }} + {{- else }} + value: {{ .Values.auth.password }} + {{- end }} + {{- end }} + {{- end }} + args: [ + "-clustered", + "-cid", "{{ tpl (.Values.clusterID) . }}", + "-m", "{{ .Values.monitoring.service.port }}", + "-ns", "{{ tpl (.Values.natsSvc) . }}", + "-mc", "{{ .Values.maxChannels }}", + "-msu", "{{ .Values.maxSubs }}", + "-mm", "{{ .Values.maxMsgs }}", + "-mb", "{{ .Values.maxBytes }}", + "-ma", "{{ .Values.maxAge }}", + "-hbi", "{{ .Values.hbInterval }}", + "-hbt", "{{ .Values.hbTimeout }}", + "-hbf", "{{ .Values.hbFailCount }}", + + "--cluster_node_id", "$(POD_NAME)", + "--cluster_peers", "{{ template "nats-streaming-cluster.peers" . }}", + "--store", "file", + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/data", + "--cluster_log_path", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/raft", + {{- if .Values.cluster_raft_logging }} + "--cluster_raft_logging", + {{- end }} + {{- if .Values.persistence.file.compactEnabled }} + "--file_compact_enabled", + "--file_compact_frag", "{{ .Values.persistence.file.compactFrag }}", + "--file_compact_interval", "{{ .Values.persistence.file.compactInterval }}", + "--file_compact_min_size", "{{ .Values.persistence.file.compactMinSize }}", + {{- end }} + "--file_buffer_size", "{{ .Values.persistence.file.bufferSize }}", + {{- if .Values.persistence.file.crc }} + "--file_crc", + "--file_crc_poly", "{{ .Values.persistence.file.crcPoly }}", + {{- end }} + {{- if .Values.persistence.file.sync }} + "--file_sync", + {{- end }} + "--file_slice_max_msgs", "{{ .Values.persistence.file.sliceMaxMsgs }}", + "--file_slice_max_bytes", "{{ .Values.persistence.file.sliceMaxBytes }}", + "--file_slice_max_age", "{{ .Values.persistence.file.sliceMaxAge }}", + {{- if ne .Values.persistence.file.sliceArchiveScript "" }} + "--file_slice_archive_script", "{{ .Values.persistence.file.sliceArchiveScript }}", + {{- end }} + "--file_fds_limit", "{{ .Values.persistence.file.fdsLimit }}", + "--file_parallel_recovery", "{{ .Values.persistence.file.parallelRecovery }}", + + {{- if .Values.auth.enabled }} + "--user", "$(USER)", + {{- if .Values.auth.token }} + "--auth", "$(AUTH)", + {{- else }} + "--pass", "$(PASSWORD)", + {{- end }} + {{- end }} + + {{- if .Values.debug }} + "-SD", + {{- end }} + {{- if .Values.trace }} + "-SV", + {{- end }} + ] + ports: + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + volumeMounts: + - name: datadir + mountPath: /nats + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + {{ if not .Values.persistence.volume.enabled }} + - name: datadir + emptyDir: {} + {{- end }} + {{- if .Values.persistence.volume.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: "{{ .Values.persistence.volume.size }}" + {{- if .Values.persistence.volume.storageClass }} + {{- if (eq "-" .Values.persistence.volume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.volume.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/audit/charts/messaging/nats-streaming/values.yaml b/qliksense/charts/audit/charts/messaging/nats-streaming/values.yaml new file mode 100644 index 0000000..f7920f3 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats-streaming/values.yaml @@ -0,0 +1,290 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## NATS Streaming image +## +image: + registry: docker.io + repository: nats-streaming + tag: 0.11.2 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS Streaming replicas +replicaCount: 3 + +# +# Streaming server configuration +# + +# Cluster ID (default: test-cluster) +clusterID: "test-cluster" + +# Connect to this external NATS Server URL +natsSvc: "nats://nats:4222" + +# Max number of channels (0 for unlimited) +maxChannels: 100 + +# Max number of subscriptions per channel (0 for unlimited) +maxSubs: 1000 + +# Max number of messages per channel (0 for unlimited) +maxMsgs: "1000000" + +# Max messages total size per channel (0 for unlimited) +maxBytes: "900mb" + +# Max duration a message can be stored ("0s" for unlimited) +maxAge: 0 + +# +# ADVANCED CONFIGURATION: Change these with caution. +# + +# Interval at which server sends heartbeat to a client +hbInterval: 30s + +# How long server waits for a heartbeat response +hbTimeout: 10s + +# Number of failed heartbeats before server closes the client connection +hbFailCount: 330 + +# Use for general debugging. Enabling this will negatively affect performance. +debug: false + +# Do not normally set this as it will drastically decrease performance and generate +# volumous logs. +trace: false + +# Use for raft related debugging +cluster_raft_logging: false + +persistence: + file: + + # Enable compaction + compactEnabled: true + + # Enable file CRC-32 checksum + crc: true + + # Enable File.Sync on Flush + sync: true + + # Store will try to use no more file descriptors than this given limit + fdsLimit: 0 + + ## + ## ADVANCED CONFIGURATION: Change these with caution. + ## + # File buffer size (in bytes) + bufferSize: "2097152" + + # File fragmentation % threshold for compaction + compactFrag: 50 + + # Minimum interval (in seconds) between file compactions, 5min + compactInterval: 300 + + # Minimum file size for compaction + compactMinSize: "1048576" + + # Polynomial used to make the table used for CRC-32 checksum (default is crc32.IEEE) + crcPoly : "3988292384" + + # Maximum number of messages per file slice (subject to channel limits) + sliceMaxMsgs: 0 + + # Maximum file slice size - including index file (subject to channel limits) 64MB + sliceMaxBytes: "67108931" + + # Maximum file slice duration starting when the first message is stored (subject to channel limits) + sliceMaxAge: 0 + + # Path to script to use if you want to archive a file slice being removed + sliceArchiveScript: "" + + # On startup, number of channels that can be recovered in parallel + parallelRecovery: 1 + + volume: + # If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + internalStorageClass: + enabled: false + definition: {} + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +## NATS Streaming Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + ## Annotations for Promethues metrics + # prometheus.io/scrape: "true" + # prometheus.io/port: "7777" + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/nats-streaming-server#authorization +## +auth: + enabled: false + # user: nats_client + # password: + # token: + # secretName: "{{ .Release.Name }}-nats-secret" + # secretKey: "client-password" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + +## NATS Streaming svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + # TODO: Uncomment these args once synadia/prometheus-nats-exporter adds NATS Streaming support + # - -serverz + # - -storez + # - -clientz + # - -channelz + +## Sidecars +sidecars: + ## e.g. + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/audit/charts/messaging/nats/Chart.yaml b/qliksense/charts/audit/charts/messaging/nats/Chart.yaml new file mode 100644 index 0000000..a016895 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/Chart.yaml @@ -0,0 +1,17 @@ +name: nats +version: 2.1.0 +appVersion: 1.3.0 +description: An open-source, cloud-native messaging system +keywords: +- nats +- messaging +- addressing +- discovery +home: https://nats.io/ +sources: +- https://github.com/bitnami/bitnami-docker-nats +maintainers: +- name: Bitnami + email: containers@bitnami.com +engine: gotpl +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png diff --git a/qliksense/charts/audit/charts/messaging/nats/README.md b/qliksense/charts/audit/charts/messaging/nats/README.md new file mode 100644 index 0000000..454b795 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/README.md @@ -0,0 +1,191 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `OnDelete` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `extraArgs` | Optional flags for NATS | `[]` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `client.service.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `client.service.port` | NATS client port | `4222` | +| `client.service.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `client.service.annotations` | Annotations for NATS client service | {} | +| `client.service.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `cluster.service.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `cluster.service.port` | NATS cluster port | `6222` | +| `cluster.service.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `cluster.service.annotations` | Annotations for NATS cluster service | {} | +| `cluster.service.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `cluster.noAdvertise` | Do not advertise known cluster IPs to clients | `false` | +| `monitoring.service.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | +| `metrics.enabled` | Enable Prometheus metrics via exporter side-car | `false` | +| `metrics.image.registry` | Prometheus metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Prometheus metrics exporter image name | `synadia/prometheus-nats-exporter` | +| `metrics.image.tag` | Prometheus metrics exporter image tag | `0.1.0` | +| `metrics.image.pullPolicy` | Prometheus metrics image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Prometheus metrics image pull secrets | `nil` | +| `metrics.port` | Prometheus metrics exporter port | `7777` | +| `metrics.podAnnotations` | Prometheus metrics exporter annotations | `prometheus.io/scrape: "true"`, `prometheus.io/port: "7777"` | +| `metrics.resources` | Prometheus metrics exporter resource requests/limit | {} | +| `sidecars` | Attach additional containers to the pod | `nil` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Sidecars + +If you have a need for additional containers to run within the same pod as NATS (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` + +## Upgrading + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is nats: + +```console +$ kubectl delete statefulset nats-nats --cascade=false +``` diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/NOTES.txt b/qliksense/charts/audit/charts/messaging/nats/templates/NOTES.txt new file mode 100644 index 0000000..224df8f --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/NOTES.txt @@ -0,0 +1,88 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.client.service.type "LoadBalancer") (contains .Values.client.service.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "client.service.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.client.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoring.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoring.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "Monitoring URL: http://$SERVICE_IP/" + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoring.service.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "nats.fullname" . }}-monitoring {{ .Values.monitoring.service.port }}:{{ .Values.monitoring.service.port }} +{{- end }} + +2. Access NATS monitoring by opening the URL obtained in a browser. +{{- end }} + +{{- if .Values.metrics.enabled }} + +3. Get the NATS Prometheus Metrics URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +4. Access NATS Prometheus metrics by opening the URL obtained in a browser. +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/_helpers.tpl b/qliksense/charts/audit/charts/messaging/nats/templates/_helpers.tpl new file mode 100644 index 0000000..ef93757 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Nats image name +*/}} +{{- define "nats.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random alphanumeric password string. +We prepend a random letter to the string to avoid password validation errors +*/}} +{{- define "nats.randomPassword" -}} +{{- randAlpha 1 -}}{{- randAlphaNum 9 -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/client-svc.yaml b/qliksense/charts/audit/charts/messaging/nats/templates/client-svc.yaml new file mode 100644 index 0000000..aacab44 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.client.service.annotations }} + annotations: +{{ toYaml .Values.client.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.client.service.type }} + {{- if and (eq .Values.client.service.type "LoadBalancer") .Values.client.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.client.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.client.service.port }} + targetPort: client + name: client + {{- if and (eq .Values.client.service.type "NodePort") (not (empty .Values.client.service.nodePort)) }} + nodePort: {{ .Values.client.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/cluster-svc.yaml b/qliksense/charts/audit/charts/messaging/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000..b48c791 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.cluster.service.annotations }} + annotations: +{{ toYaml .Values.cluster.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.cluster.service.type }} + {{- if and (eq .Values.cluster.service.type "LoadBalancer") .Values.cluster.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.cluster.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.cluster.service.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.cluster.service.type "NodePort") (not (empty .Values.cluster.service.nodePort)) }} + nodePort: {{ .Values.cluster.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/configmap.yaml b/qliksense/charts/audit/charts/messaging/nats/templates/configmap.yaml new file mode 100644 index 0000000..3566286 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/configmap.yaml @@ -0,0 +1,87 @@ +{{- $authPwd := default (include "nats.randomPassword" .) .Values.auth.password -}} +{{- $clusterAuthPwd := default (include "nats.randomPassword" .) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.client.service.port }} + http: 0.0.0.0:{{ .Values.monitoring.service.port }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.cluster.service.port }} + + noAdvertise: {{ .Values.cluster.noAdvertise }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + ] + } + users.json: {{ if .Values.auth.enabled }}{{ toJson .Values.auth.jwtUsers | quote }}{{else}}"[]"{{ end }} diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/headless-svc.yaml b/qliksense/charts/audit/charts/messaging/nats/templates/headless-svc.yaml new file mode 100644 index 0000000..d340551 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/headless-svc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-headless + labels: + app: {{ template "nats.name" . }} + chart: {{ template "nats.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: client + port: 4222 + targetPort: client + - name: cluster + port: 6222 + targetPort: cluster + selector: + app: {{ template "nats.name" . }} + release: {{ .Release.Name | quote }} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/ingress.yaml b/qliksense/charts/audit/charts/messaging/nats/templates/ingress.yaml new file mode 100644 index 0000000..eec9749 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/monitoring-svc.yaml b/qliksense/charts/audit/charts/messaging/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000..f8f4081 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/networkpolicy.yaml b/qliksense/charts/audit/charts/messaging/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000..12032f8 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.client.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.cluster.service.port }} + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/statefulset.yaml b/qliksense/charts/audit/charts/messaging/nats/templates/statefulset.yaml new file mode 100644 index 0000000..a4a8283 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/statefulset.yaml @@ -0,0 +1,160 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + serviceName: {{ template "nats.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} +{{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - qnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + {{- range .Values.extraArgs }} + - {{ tpl (.) $ }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.client.service.port }} + - name: cluster + containerPort: {{ .Values.cluster.service.port }} + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/qliksense/charts/audit/charts/messaging/nats/templates/tls-secret.yaml b/qliksense/charts/audit/charts/messaging/nats/templates/tls-secret.yaml new file mode 100644 index 0000000..5acf441 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/nats/values.yaml b/qliksense/charts/audit/charts/messaging/nats/values.yaml new file mode 100644 index 0000000..604d117 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/nats/values.yaml @@ -0,0 +1,302 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.3.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: false + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +client: + service: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +cluster: + service: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: + + ## Do not advertise known cluster IPs to clients + ## + noAdvertise: false + +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: + +# Optional additional arguments +extraArgs: [] + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +sidecars: +## Add sidecars to the pod. +## e.g. +# - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/audit/charts/messaging/requirements.yaml b/qliksense/charts/audit/charts/messaging/requirements.yaml new file mode 100644 index 0000000..515b43f --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/requirements.yaml @@ -0,0 +1,13 @@ +dependencies: + - name: nats + version: 2.1.0 + repository: "file://./nats" + # messaging.nats.enabled is used by services that depend on the messaging chart to enable or disable nats + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.nats.enabled,nats.enabled + - name: nats-streaming + version: 0.2.0 + repository: "file://./nats-streaming" + # messaging.nats-streaming.enabled is used by services that depend on the messaging chart to enable or disable nats streaming + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.nats-streaming.enabled,nats-streaming.enabled diff --git a/qliksense/charts/audit/charts/messaging/templates/_helper.tpl b/qliksense/charts/audit/charts/messaging/templates/_helper.tpl new file mode 100644 index 0000000..d03e4d7 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/templates/_helper.tpl @@ -0,0 +1,38 @@ +{{- define "messaging.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "messaging.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "messaging.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.name" -}} +{{- "nats" -}} +{{- end -}} + +{{- define "nats.fullname" -}} +{{- $name := "nats" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming.name" -}} +{{- "nats-streaming" -}} +{{- end -}} + +{{- define "nats-streaming.fullname" -}} +{{- $name := "nats-streaming" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/messaging/templates/nats-secret.yaml b/qliksense/charts/audit/charts/messaging/templates/nats-secret.yaml new file mode 100644 index 0000000..e58ebdf --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/templates/nats-secret.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ .Release.Name }}-nats-secret +data: + {{ if (.Values.nats.enabled) and .Values.nats.auth.password }} + client-password: {{ print .Values.nats.auth.password | b64enc }} + {{- end }} + {{ if .Values.nats.auth.token }} + client-token: {{ print .Values.nats.auth.token | b64enc }} + {{- end }} + + {{ if .Values.nats.clusterAuth.password }} + cluster-password: {{ print .Values.nats.clusterAuth.password | b64enc }} + {{- end }} + {{ if .Values.nats.clusterAuth.token }} + cluster-token: {{ print .Values.nats.clusterAuth.token | b64enc }} + {{- end }} diff --git a/qliksense/charts/audit/charts/messaging/templates/networkpolicy-nats-streaming.yaml b/qliksense/charts/audit/charts/messaging/templates/networkpolicy-nats-streaming.yaml new file mode 100644 index 0000000..cd855c0 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/templates/networkpolicy-nats-streaming.yaml @@ -0,0 +1,51 @@ +{{- if and (index .Values "nats-streaming" "enabled") (index .Values "networkPolicy" "nats-streaming" "enabled") }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "{{ template "nats-streaming.fullname" . }}" + labels: + chart: "{{ template "messaging.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + - ports: + - port: {{ index .Values "nats-streaming" "monitoring" "service" "port" }} + from: + - podSelector: + matchLabels: + {{ template "nats-streaming.fullname" . }}-admin: "true" + - ports: + - port: {{ index .Values "nats-streaming" "metrics" "port" }} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/templates/networkpolicy-nats.yaml b/qliksense/charts/audit/charts/messaging/templates/networkpolicy-nats.yaml new file mode 100644 index 0000000..df645c6 --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/templates/networkpolicy-nats.yaml @@ -0,0 +1,51 @@ +{{- if and (.Values.nats.enabled) (.Values.networkPolicy.nats.enabled) }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "{{ template "nats.fullname" . }}" + labels: + chart: "{{ template "messaging.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + - ports: + - port: {{ .Values.nats.client.service.port }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + - ports: + - port: {{ .Values.nats.metrics.port }} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "keys" + release: {{ tpl ( .Values.networkPolicy.keys.release ) . | quote }} +{{- end }} diff --git a/qliksense/charts/audit/charts/messaging/values.yaml b/qliksense/charts/audit/charts/messaging/values.yaml new file mode 100644 index 0000000..ac988ad --- /dev/null +++ b/qliksense/charts/audit/charts/messaging/values.yaml @@ -0,0 +1,320 @@ +## Default values for the messaging Helm chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## NATS configuration +## +nats: + ## Enables NATS chart by default + enabled: true + + securityContext: + enabled: false + + ## Image pull policy for NATS chart + image: + registry: ghcr.io + repository: qlik-download/qnatsd + tag: 0.2.3 + pullPolicy: IfNotPresent + pullSecrets: + - name: artifactory-registry-secret + + ## Number of NATS nodes + replicaCount: 1 + + ## NATS statefulset configurations + statefulset: + updateStrategy: RollingUpdate + + ## NATS svc used for client connections + ## ref: https://github.com/nats-io/gnatsd#running + ## + client: + service: + type: ClusterIP + port: 4222 + + ## Kubernetes svc used for clustering + ## ref: https://github.com/nats-io/gnatsd#clustering + ## + cluster: + service: + type: ClusterIP + port: 6222 + noAdvertise: true + + ## NATS svc used for monitoring + ## ref: https://github.com/nats-io/gnatsd#monitoring + ## + monitoring: + service: + type: ClusterIP + port: 8222 + ## Annotations for Promethues + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + + ## Client Authentication + auth: + enabled: true + user: "nats_client" + password: "T0pS3cr3t" + + ## Configuration of users that are authenticated used JWTs + ## Users can be configured with permissions to allow or deny publish/subscribe access to subjects + ## ref: https://nats.io/documentation/managing_the_server/authorization/ + ## + jwtUsers: + - user: "audit" + stanPermissions: + subscribe: + - "com.qlik.app" + - "com.qlik.engine.session" + - "system-events.engine.app" + - "system-events.engine.session" + - "system-events.user-session" + - user: "chronos-worker" + stanPermissions: + publish: + - "chronos-worker.>" + - user: "data-engineering-exporter" + stanPermissions: + subscribe: + - "system-events.>" + - user: "edge-auth" + stanPermissions: + publish: + - "system-events.user-session" + subscribe: + - "system-events.users" + - "system-events.user-session" + - "private.idp-sync" + - user: "engine" + stanPermissions: + publish: + - "com.qlik.app" + - "com.qlik.engine.session" + - "system-events.engine.app" + - "system-events.engine.session" + - user: "identity-providers" + stanPermissions: + publish: + - "private.idp-sync" + - user: "odag" + stanPermissions: + publish: + - "odag.>" + subscribe: + - "odag.>" + - "system-events.engine.app" + - "system-events.reloadResults" + - user: "qix-data-reload" + stanPermissions: + publish: + - "reload" + - "system-events.reloadResults" + subscribe: + - "reload" + - user: "resource-library" + stanPermissions: + publish: + - "system-events.resource-library" + - user: "tenants" + stanPermissions: + publish: + - "system-events.tenants" + - user: "users" + stanPermissions: + publish: + - "system-events.users" + - user: "collections" + stanPermissions: + subscribe: + - "system-events.engine.app" + + + extraArgs: + - --jwt_users_file=/opt/bitnami/nats/users.json + - --jwt_auth_url=http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal + ## for localdev use this configuration instead + # - --jwt_auth_url=http://keys:8080/v1/keys/qlik.api.internal + + ## Cluster Authentication + clusterAuth: + enabled: false + + ## Metrics / Prometheus NATS Exporter + ## + ## ref: https://github.com/nats-io/prometheus-nats-exporter + metrics: + enabled: true + image: + registry: ghcr.io + repository: qlik-download/prometheus-nats-exporter + tag: 0.1.0-16 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-registry-secret + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "false" # prometheus annotations are added on the monitoring service instead + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +## NATS Streaming configuration +## +nats-streaming: + enabled: true + + securityContext: + enabled: false + + ## NATS Streaming image + image: + pullSecrets: + - name: artifactory-registry-secret + + ## NATS Streaming replicas + replicaCount: 3 + + ## NATS Streaming statefulset configurations + statefulset: + updateStrategy: RollingUpdate + + ## NATS Streaming extra options for liveness and readiness probes + readinessProbe: + enabled: true + initialDelaySeconds: 30 + + ## NATS Streaming svc used for monitoring + ## + monitoring: + service: + type: ClusterIP + port: 8222 + # Annotations for Promethues + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + + ## NATS Streaming cluster id + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + + ## NATS server + natsSvc: "nats://{{ .Release.Name }}-nats-client:4222" + + ## NATS server client Authentication + auth: + enabled: true + user: nats_client + secretName: "{{ .Release.Name }}-nats-secret" + secretKey: "client-password" + + ## Use for general debugging. Enabling this will negatively affect performance. + debug: true + + # Interval at which server sends heartbeat to a client + hbInterval: 10s + + # How long server waits for a heartbeat response + hbTimeout: 10s + + # Number of failed heartbeats before server closes the client connection + hbFailCount: 5 + + + persistence: + volume: + ## If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + internalStorageClass: + ## Normally the storage class should be created outside this helm chart + ## If we want to deploy a storage class as part of the helm chart + ## - Provide a storageClassName above. + ## - set enabled true + ## - provide a storage class definition. + + ## If enabled storage class will be configured as part of the chart. + ## If not enabled an external storageclass can be used by providing storageClassName above. + enabled: false + + ## Storageclass definition + definition: {} + ## Storage classes have a provisioner that determines what volume plugin is used for provisioning PVs. + ## This field must be specified. + ## See https://kubernetes.io/docs/concepts/storage/storage-classes/ + # provisioner: kubernetes.io/no-provisioner + + ## Reclaim policy should normally be set to Retain to avoid loosing data when deleting this helm chart. + # reclaimPolicy: Retain + + ## Persistent Volumes that are dynamically created by a storage class will have the mount options specified + ## in the mountOptions field of the class. + # mountOptions: {} + + ## Storage classes have parameters that describe volumes belonging to the storage class. + ## Different parameters may be accepted depending on the provisioner. + # parameters: {} + maxAge: "2h" + + ## Metrics / Prometheus NATS Exporter + ## + ## ref: https://github.com/nats-io/prometheus-nats-exporter + metrics: + enabled: true + image: + registry: ghcr.io + repository: qlik-download/prometheus-nats-exporter + tag: 0.1.0-16 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-registry-secret + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "false" # prometheus annotations are added on the monitoring service instead + ## Metrics exporter flags + args: + - -channelz + + +## NATS and NATS Streaming Network Policy +## +networkPolicy: + ## NATS + nats: + enabled: false + ## NATS Streaminng + nats-streaming: + enabled: false + ## Keys + keys: + ## Set keys release name for egress rules + release: "{{ .Release.Name }}" diff --git a/qliksense/charts/audit/charts/minio/.helmignore b/qliksense/charts/audit/charts/minio/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/qliksense/charts/audit/charts/minio/Chart.yaml b/qliksense/charts/audit/charts/minio/Chart.yaml new file mode 100644 index 0000000..b0e6fc4 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: RELEASE.2018-09-01T00-38-25Z +description: Minio is a high performance distributed object storage server, designed + for large-scale private cloud infrastructure. +home: https://minio.io +icon: https://www.minio.io/img/logo_160x160.png +keywords: +- storage +- object-storage +- S3 +maintainers: +- email: hello@acale.ph + name: Acaleph +- email: dev@minio.io + name: Minio +name: minio +sources: +- https://github.com/minio/minio +version: 1.6.5 diff --git a/qliksense/charts/audit/charts/minio/README.md b/qliksense/charts/audit/charts/minio/README.md new file mode 100644 index 0000000..665b7d0 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/README.md @@ -0,0 +1,227 @@ +Minio +===== + +[Minio](https://minio.io) is a lightweight, AWS S3 compatible object storage server. It is best suited for storing unstructured data such as photos, videos, log files, backups, VM and container images. Size of an object can range from a few KBs to a maximum of 5TB. Minio server is light enough to be bundled with the application stack, similar to NodeJS, Redis and MySQL. + +Minio supports [distributed mode](https://docs.minio.io/docs/distributed-minio-quickstart-guide). In distributed mode, you can pool multiple drives (even on different machines) into a single object storage server. + +Introduction +------------ + +This chart bootstraps Minio deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Prerequisites +------------- + +- Kubernetes 1.4+ with Beta APIs enabled for default standalone mode. +- Kubernetes 1.5+ with Beta APIs enabled to run Minio in [distributed mode](#distributed-minio). +- PV provisioner support in the underlying infrastructure. + +Installing the Chart +-------------------- + +Install this chart using: + +```bash +$ helm install stable/minio +``` + +The command deploys Minio on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +### Release name + +An instance of a chart running in a Kubernetes cluster is called a release. Each release is identified by a unique name within the cluster. Helm automatically assigns a unique release name after installing the chart. You can also set your preferred name by: + +```bash +$ helm install --name my-release stable/minio +``` + +### Access and Secret keys + +By default a pre-generated access and secret key will be used. To override the default keys, pass the access and secret keys as arguments to helm install. + +```bash +$ helm install --set accessKey=myaccesskey,secretKey=mysecretkey \ + stable/minio +``` + +### Updating Minio configuration via Helm + +[ConfigMap](https://kubernetes.io/docs/user-guide/configmap/) allows injecting containers with configuration data even while a Helm release is deployed. + +To update your Minio server configuration while it is deployed in a release, you need to + +1. Check all the configurable values in the Minio chart using `helm inspect values stable/minio`. +2. Override the `minio_server_config` settings in a YAML formatted file, and then pass that file like this `helm upgrade -f config.yaml stable/minio`. +3. Restart the Minio server(s) for the changes to take effect. + +You can also check the history of upgrades to a release using `helm history my-release`. Replace `my-release` with the actual release name. + +Uninstalling the Chart +---------------------- + +Assuming your release is named as `my-release`, delete it using the command: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +Configuration +------------- + +The following table lists the configurable parameters of the Minio chart and their default values. + +| Parameter | Description | Default | +|----------------------------|-------------------------------------|---------------------------------------------------------| +| `image.repository` | Image repository | `minio/minio` | +| `image.tag` | Minio image tag. Possible values listed [here](https://hub.docker.com/r/minio/minio/tags/).| `RELEASE.2018-09-01T00-38-25Z`| +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `mcImage.repository` | Client image repository | `minio/mc` | +| `mcImage.tag` | mc image tag. Possible values listed [here](https://hub.docker.com/r/minio/mc/tags/).| `RELEASE.2018-08-18T02-13-04Z`| +| `mcImage.pullPolicy` | mc Image pull policy | `IfNotPresent` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.hosts` | Ingress accepted hostnames | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `mode` | Minio server mode (`standalone` or `distributed`)| `standalone` | +| `replicas` | Number of nodes (applicable only for Minio distributed mode). Should be 4 <= x <= 32 | `4` | +| `accessKey` | Default access key (5 to 20 characters) | `AKIAIOSFODNN7EXAMPLE` | +| `secretKey` | Default secret key (8 to 40 characters) | `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` | +| `configPath` | Default config file location | `~/.minio` | +| `mountPath` | Default mount location for persistent drive| `/export` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed| `9000` | +| `service.externalIPs` | service external IP addresses | `nil` | +| `service.annotations` | Service annotations | `{}` | +| `persistence.enabled` | Use persistent volume to store data | `true` | +| `persistence.size` | Size of persistent volume claim | `10Gi` | +| `persistence.existingClaim`| Use an existing PVC to persist data | `nil` | +| `persistence.storageClass` | Storage class name of PVC | `nil` | +| `persistence.accessMode` | ReadWriteOnce or ReadOnly | `ReadWriteOnce` | +| `persistence.subPath` | Mount a sub directory of the persistent volume if set | `""` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `priorityClassName` | Pod priority settings | `""` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `defaultBucket.enabled` | If set to true, a bucket will be created after minio install | `false` | +| `defaultBucket.name` | Bucket name | `bucket` | +| `defaultBucket.policy` | Bucket policy | `none` | +| `defaultBucket.purge` | Purge the bucket if already exists | `false` | +| `azuregateway.enabled` | Use minio as an [azure gateway](https://docs.minio.io/docs/minio-gateway-for-azure)| `false` | +| `gcsgateway.enabled` | Use minio as a [Google Cloud Storage gateway](https://docs.minio.io/docs/minio-gateway-for-gcs)| `false` | +| `gcsgateway.gcsKeyJson` | credential json file of service account key | `""` | +| `gcsgateway.projectId` | Google cloud project id | `""` | +| `nasgateway.enabled` | Use minio as a [NAS gateway](https://docs.minio.io/docs/minio-gateway-for-nas) | `false` | +| `nasgateway.replicas` | Number of NAS gateway instances to be run in parallel on a PV | `4` | + +Some of the parameters above map to the env variables defined in the [Minio DockerHub image](https://hub.docker.com/r/minio/minio/). + +You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set persistence.size=100Gi \ + stable/minio +``` + +The above command deploys Minio server with a 100Gi backing persistent volume. + +Alternately, you can provide a YAML file that specifies parameter values while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/minio +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +Distributed Minio +----------- + +This chart provisions a Minio server in standalone mode, by default. To provision Minio server in [distributed mode](https://docs.minio.io/docs/distributed-minio-quickstart-guide), set the `mode` field to `distributed`, + +```bash +$ helm install --set mode=distributed stable/minio +``` + +This provisions Minio server in distributed mode with 4 nodes. To change the number of nodes in your distributed Minio server, set the `replicas` field, + +```bash +$ helm install --set mode=distributed,replicas=8 stable/minio +``` + +This provisions Minio server in distributed mode with 8 nodes. Note that the `replicas` value should be an integer between 4 and 16 (inclusive). + +### StatefulSet [limitations](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/#limitations) applicable to distributed Minio + +1. StatefulSets need persistent storage, so the `persistence.enabled` flag is ignored when `mode` is set to `distributed`. +2. When uninstalling a distributed Minio release, you'll need to manually delete volumes associated with the StatefulSet. + +NAS Gateway +----------- + +### Prerequisites + +Minio in [NAS gateway mode](https://docs.minio.io/docs/minio-gateway-for-nas) can be used to create multiple Minio instances backed by single PV in `ReadWriteMany` mode. Currently few [Kubernetes volume plugins](https://kubernetes.io/docs/user-guide/persistent-volumes/#access-modes) support `ReadWriteMany` mode. To deploy Minio NAS gateway with Helm chart you'll need to have a Persistent Volume running with one of the supported volume plugins. [This document](https://kubernetes.io/docs/user-guide/volumes/#nfs) +outlines steps to create a NFS PV in Kubernetes cluster. + +### Provision NAS Gateway Minio instances + +To provision Minio servers in [NAS gateway mode](https://docs.minio.io/docs/minio-gateway-for-nas), set the `nasgateway.enabled` field to `true`, + +```bash +$ helm install --set nasgateway.enabled=true stable/minio +``` + +This provisions 4 Minio NAS gateway instances backed by single storage. To change the number of instances in your Minio deployment, set the `replicas` field, + +```bash +$ helm install --set nasgateway.enabled=true,nasgateway.replicas=8 stable/minio +``` + +This provisions Minio NAS gateway with 8 instances. + +Persistence +----------- + +This chart provisions a PersistentVolumeClaim and mounts corresponding persistent volume to default location `/export`. You'll need physical storage available in the Kubernetes cluster for this to work. If you'd rather use `emptyDir`, disable PersistentVolumeClaim by: + +```bash +$ helm install --set persistence.enabled=false stable/minio +``` + +> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."* + +Existing PersistentVolumeClaim +------------------------------ + +If a Persistent Volume Claim already exists, specify it during installation. + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install --set persistence.existingClaim=PVC_NAME stable/minio +``` + +NetworkPolicy +------------- + +To enable network policy for Minio, +install [a networking plugin that implements the Kubernetes +NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, traffic will be limited to just port 9000. + +For more precise policy, set `networkPolicy.allowExternal=true`. This will +only allow pods with the generated client label to connect to Minio. +This label will be displayed in the output of a successful install. diff --git a/qliksense/charts/audit/charts/minio/templates/NOTES.txt b/qliksense/charts/audit/charts/minio/templates/NOTES.txt new file mode 100644 index 0000000..07d264e --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/NOTES.txt @@ -0,0 +1,44 @@ +{{- if eq .Values.service.type "ClusterIP" "NodePort" }} +Minio can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: +{{ template "minio.fullname" . }}-svc.{{ .Release.Namespace }}.svc.cluster.local + +To access Minio from localhost, run the below commands: + + 1. export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ template "minio.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + + 2. kubectl port-forward $POD_NAME 9000 --namespace {{ .Release.Namespace }} + +Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ + +You can now access Minio server on http://localhost:9000. Follow the below steps to connect to Minio server with mc client: + + 1. Download the Minio mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. mc config host add {{ template "minio.fullname" . }}-local http://localhost:9000 {{ .Values.accessKey }} {{ .Values.secretKey }} S3v4 + + 3. mc ls {{ template "minio.fullname" . }}-local + +Alternately, you can use your browser or the Minio SDK to access the server - https://docs.minio.io/categories/17 +{{- end }} +{{- if eq .Values.service.type "LoadBalancer" }} +Minio can be accessed via port {{ .Values.service.port }} on an external IP address. Get the service external IP address by: +kubectl get svc --namespace {{ .Release.Namespace }} -l app={{ template "minio.fullname" . }} + +Note that the public IP may take a couple of minutes to be available. + +You can now access Minio server on http://:9000. Follow the below steps to connect to Minio server with mc client: + + 1. Download the Minio mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. mc config host add {{ template "minio.fullname" . }}-local http://:{{ .Values.service.port }} {{ .Values.accessKey }} {{ .Values.secretKey }} S3v4 + + 3. mc ls {{ template "minio.fullname" . }}-local + +Alternately, you can use your browser or the Minio SDK to access the server - https://docs.minio.io/categories/17 +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "minio.fullname" . }}-client=true" +will be able to connect to this minio cluster. +{{- end }} diff --git a/qliksense/charts/audit/charts/minio/templates/_helper_create_bucket.txt b/qliksense/charts/audit/charts/minio/templates/_helper_create_bucket.txt new file mode 100644 index 0000000..582c7cd --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/_helper_create_bucket.txt @@ -0,0 +1,75 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +# connectToMinio +# Use a check-sleep-check loop to wait for Minio service to be available +connectToMinio() { + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/accesskey) ; SECRET=$(cat /config/secretkey) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to Minio server: http://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="mc config host add myminio http://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkBucketExists ($bucket) +# Check if the bucket exists, by using the exit code of `mc ls` +checkBucketExists() { + BUCKET=$1 + CMD=$(/usr/bin/mc ls myminio/$BUCKET > /dev/null 2>&1) + return $? +} + +# createBucket ($bucket, $policy, $purge) +# Ensure bucket exists, purging if asked to +createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + /usr/bin/mc rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist + if ! checkBucketExists $BUCKET ; then + echo "Creating bucket '$BUCKET'" + /usr/bin/mc mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + /usr/bin/mc policy $POLICY myminio/$BUCKET +} + +# Try connecting to Minio instance +connectToMinio +# Create the bucket +createBucket {{ .Values.defaultBucket.name }} {{ .Values.defaultBucket.policy }} {{ .Values.defaultBucket.purge }} diff --git a/qliksense/charts/audit/charts/minio/templates/_helpers.tpl b/qliksense/charts/audit/charts/minio/templates/_helpers.tpl new file mode 100644 index 0000000..c8fe9ba --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "minio.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "minio.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "minio.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "minio.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/minio/templates/configmap.yaml b/qliksense/charts/audit/charts/minio/templates/configmap.yaml new file mode 100644 index 0000000..5cef4c3 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/configmap.yaml @@ -0,0 +1,140 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + initialize: |- +{{ include (print $.Template.BasePath "/_helper_create_bucket.txt") . | indent 4 }} + config.json: |- + { + "version": "26", + "credential": { + "accessKey": {{ .Values.accessKey | quote }}, + "secretKey": {{ .Values.secretKey | quote }} + }, + "region": {{ .Values.minioConfig.region | quote }}, + "browser": {{ .Values.minioConfig.browser | quote }}, + "worm": {{ .Values.minioConfig.worm | quote }}, + "domain": {{ .Values.minioConfig.domain | quote }}, + "storageclass": { + "standard": {{ .Values.minioConfig.storageClass.standardStorageClass | quote }}, + "rrs": {{ .Values.minioConfig.storageClass.reducedRedundancyStorageClass | quote }} + }, + "cache": { + "drives": {{ .Values.minioConfig.cache.drives }}, + "expiry": {{ .Values.minioConfig.cache.expiry | int }}, + "maxuse": {{ .Values.minioConfig.cache.maxuse | int }}, + "exclude": {{ .Values.minioConfig.cache.exclude }} + }, + "notify": { + "amqp": { + "1": { + "enable": {{ .Values.minioConfig.aqmp.enable }}, + "url": {{ .Values.minioConfig.aqmp.url | quote }}, + "exchange": {{ .Values.minioConfig.aqmp.exchange | quote }}, + "routingKey": {{ .Values.minioConfig.aqmp.routingKey | quote }}, + "exchangeType": {{ .Values.minioConfig.aqmp.exchangeType | quote }}, + "deliveryMode": {{ .Values.minioConfig.aqmp.deliveryMode }}, + "mandatory": {{ .Values.minioConfig.aqmp.mandatory }}, + "immediate": {{ .Values.minioConfig.aqmp.immediate }}, + "durable": {{ .Values.minioConfig.aqmp.durable }}, + "internal": {{ .Values.minioConfig.aqmp.internal }}, + "noWait": {{ .Values.minioConfig.aqmp.noWait }}, + "autoDeleted": {{ .Values.minioConfig.aqmp.autoDeleted }} + } + }, + "nats": { + "1": { + "enable": {{ .Values.minioConfig.nats.enable }}, + "address": {{ .Values.minioConfig.nats.address | quote }}, + "subject": {{ .Values.minioConfig.nats.subject | quote }}, + "username": {{ .Values.minioConfig.nats.username | quote }}, + "password": {{ .Values.minioConfig.nats.password | quote }}, + "token": {{ .Values.minioConfig.nats.token | quote }}, + "secure": {{ .Values.minioConfig.nats.secure }}, + "pingInterval": {{ .Values.minioConfig.nats.pingInterval | int64 }}, + "streaming": { + "enable": {{ .Values.minioConfig.nats.enableStreaming }}, + "clusterID": {{ .Values.minioConfig.nats.clusterID | quote }}, + "clientID": {{ .Values.minioConfig.nats.clientID | quote }}, + "async": {{ .Values.minioConfig.nats.async }}, + "maxPubAcksInflight": {{ .Values.minioConfig.nats.maxPubAcksInflight | int }} + } + } + }, + "elasticsearch": { + "1": { + "enable": {{ .Values.minioConfig.elasticsearch.enable }}, + "format": {{ .Values.minioConfig.elasticsearch.format | quote }}, + "url": {{ .Values.minioConfig.elasticsearch.url | quote }}, + "index": {{ .Values.minioConfig.elasticsearch.index | quote }} + } + }, + "redis": { + "1": { + "enable": {{ .Values.minioConfig.redis.enable }}, + "format": {{ .Values.minioConfig.redis.format | quote }}, + "address": {{ .Values.minioConfig.redis.address | quote }}, + "password": {{ .Values.minioConfig.redis.password | quote }}, + "key": {{ .Values.minioConfig.redis.key | quote }} + } + }, + "postgresql": { + "1": { + "enable": {{ .Values.minioConfig.postgresql.enable }}, + "format": {{ .Values.minioConfig.postgresql.format | quote }}, + "connectionString": {{ .Values.minioConfig.postgresql.connectionString | quote }}, + "table": {{ .Values.minioConfig.postgresql.table | quote }}, + "host": {{ .Values.minioConfig.postgresql.host | quote }}, + "port": {{ .Values.minioConfig.postgresql.port | quote }}, + "user": {{ .Values.minioConfig.postgresql.user | quote }}, + "password": {{ .Values.minioConfig.postgresql.password | quote }}, + "database": {{ .Values.minioConfig.postgresql.database | quote }} + } + }, + "kafka": { + "1": { + "enable": {{ .Values.minioConfig.kafka.enable }}, + "brokers": {{ .Values.minioConfig.kafka.brokers }}, + "topic": {{ .Values.minioConfig.kafka.topic | quote }} + } + }, + "webhook": { + "1": { + "enable": {{ .Values.minioConfig.webhook.enable }}, + "endpoint": {{ .Values.minioConfig.webhook.endpoint | quote }} + } + }, + "mysql": { + "1": { + "enable": {{ .Values.minioConfig.mysql.enable }}, + "format": {{ .Values.minioConfig.mysql.format | quote }}, + "dsnString": {{ .Values.minioConfig.mysql.dsnString | quote }}, + "table": {{ .Values.minioConfig.mysql.table | quote }}, + "host": {{ .Values.minioConfig.mysql.host | quote }}, + "port": {{ .Values.minioConfig.mysql.port | quote }}, + "user": {{ .Values.minioConfig.mysql.user | quote }}, + "password": {{ .Values.minioConfig.mysql.password | quote }}, + "database": {{ .Values.minioConfig.mysql.database | quote }} + } + }, + "mqtt": { + "1": { + "enable": {{ .Values.minioConfig.mqtt.enable }}, + "broker": {{ .Values.minioConfig.mqtt.broker | quote }}, + "topic": {{ .Values.minioConfig.mqtt.topic | quote }}, + "qos": {{ .Values.minioConfig.mqtt.qos | int }}, + "clientId": {{ .Values.minioConfig.mqtt.clientId | quote }}, + "username": {{ .Values.minioConfig.mqtt.username | quote }}, + "password": {{ .Values.minioConfig.mqtt.password | quote }}, + "reconnectInterval": {{ .Values.minioConfig.mqtt.reconnectInterval | int }}, + "keepAliveInterval": {{ .Values.minioConfig.mqtt.keepAliveInterval | int }} + } + } + } + } \ No newline at end of file diff --git a/qliksense/charts/audit/charts/minio/templates/deployment.yaml b/qliksense/charts/audit/charts/minio/templates/deployment.yaml new file mode 100644 index 0000000..907c38c --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/deployment.yaml @@ -0,0 +1,134 @@ +{{- if eq .Values.mode "standalone" }} +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + {{- if .Values.nasgateway.enabled }} + replicas: {{ .Values.nasgateway.replicas }} + {{- end }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.azuregateway.enabled }} + command: [ "/bin/sh", + "-ce", + "cp /tmp/config.json {{ .Values.configPath }} && + /usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} gateway azure"] + {{- else }} + {{- if .Values.gcsgateway.enabled }} + command: [ "/bin/sh", + "-ce", + "cp /tmp/config.json {{ .Values.configPath }} && + /usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} gateway gcs {{ .Values.gcsgateway.projectId }}"] + {{- else }} + {{- if .Values.nasgateway.enabled }} + command: [ "/bin/sh", + "-ce", + "cp /tmp/config.json {{ .Values.configPath }} && + /usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} gateway nas {{ .Values.mountPath }}"] + {{- else }} + command: [ "/bin/sh", + "-ce", + "cp /tmp/config.json {{ .Values.configPath }} && + /usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} server {{ .Values.mountPath }}" ] + {{- end }} + {{- end }} + {{- end }} + volumeMounts: + - name: export + mountPath: {{ .Values.mountPath }} + {{- if and .Values.persistence.enabled .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- if .Values.gcsgateway.enabled }} + - name: minio-user + mountPath: "/etc/credentials" + readOnly: true + {{- end }} + - name: minio-server-config + mountPath: "/tmp/config.json" + subPath: config.json + - name: minio-config-dir + mountPath: {{ .Values.configPath }} + ports: + - name: service + containerPort: 9000 + env: + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.fullname" . }} + key: accesskey + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.fullname" . }} + key: secretkey + {{- if .Values.gcsgateway.enabled }} + - name: GOOGLE_APPLICATION_CREDENTIALS + valueFrom: + secretKeyRef: + name: {{ template "minio.fullname" . }} + key: gcs_key.json + {{- end }} + livenessProbe: + tcpSocket: + port: 9000 + initialDelaySeconds: 5 + timeoutSeconds: 1 + readinessProbe: + tcpSocket: + port: 9000 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: minio-server-config + configMap: + name: {{ template "minio.fullname" . }} + - name: minio-user + secret: + secretName: {{ template "minio.fullname" . }} + - name: minio-config-dir + emptyDir: {} +{{- end }} diff --git a/qliksense/charts/audit/charts/minio/templates/ingress.yaml b/qliksense/charts/audit/charts/minio/templates/ingress.yaml new file mode 100644 index 0000000..dddad96 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/ingress.yaml @@ -0,0 +1,39 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "minio.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/minio/templates/networkpolicy.yaml b/qliksense/charts/audit/charts/minio/templates/networkpolicy.yaml new file mode 100644 index 0000000..de57f48 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/networkpolicy.yaml @@ -0,0 +1,25 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + ingress: + - ports: + - port: {{ .Values.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "minio.name" . }}-client: "true" + {{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/minio/templates/post-install-create-bucket-job.yaml b/qliksense/charts/audit/charts/minio/templates/post-install-create-bucket-job.yaml new file mode 100644 index 0000000..b0b9cd3 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/post-install-create-bucket-job.yaml @@ -0,0 +1,47 @@ +{{- if .Values.defaultBucket.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-bucket-job + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + spec: + restartPolicy: OnFailure +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.fullname" . }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config +{{- end }} diff --git a/qliksense/charts/audit/charts/minio/templates/pvc.yaml b/qliksense/charts/audit/charts/minio/templates/pvc.yaml new file mode 100644 index 0000000..5bffaf7 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/pvc.yaml @@ -0,0 +1,27 @@ +{{- if eq .Values.mode "standalone" }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.nasgateway.enabled }} + selector: + matchLabels: + pv: {{ .Values.nasgateway.pv | quote }} +{{- end }} + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/minio/templates/secrets.yaml b/qliksense/charts/audit/charts/minio/templates/secrets.yaml new file mode 100644 index 0000000..f130bf9 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/secrets.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + accesskey: {{ .Values.accessKey | b64enc }} + secretkey: {{ .Values.secretKey | b64enc }} +{{- if .Values.gcsgateway.enabled }} + gcs_key.json: {{ .Values.gcsgateway.gcsKeyJson | b64enc }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/minio/templates/service.yaml b/qliksense/charts/audit/charts/minio/templates/service.yaml new file mode 100644 index 0000000..1ac8c22 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/service.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }} + type: ClusterIP + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} + ports: + - name: service + port: 9000 + targetPort: {{ .Values.service.port }} + protocol: TCP +{{- if (and (eq .Values.service.type "NodePort") ( .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} +{{- end}} +{{- if .Values.service.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.service.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/audit/charts/minio/templates/statefulset.yaml b/qliksense/charts/audit/charts/minio/templates/statefulset.yaml new file mode 100644 index 0000000..b4fdf80 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/templates/statefulset.yaml @@ -0,0 +1,99 @@ +{{- if eq .Values.mode "distributed" }} +{{ $nodeCount := .Values.replicas | int }} +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + serviceName: {{ template "minio.fullname" . }} + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: [ "/bin/sh", + "-ce", + "cp /tmp/config.json {{ .Values.configPath }} && + /usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} server + {{- range $i := until $nodeCount }} + http://{{ template `minio.fullname` $ }}-{{ $i }}.{{ template `minio.fullname` $ }}.{{ $.Release.Namespace }}.svc.cluster.local{{ $.Values.mountPath }} + {{- end }}" ] + volumeMounts: + - name: export + mountPath: {{ .Values.mountPath }} + {{- if and .Values.persistence.enabled .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + - name: minio-server-config + mountPath: "/tmp/config.json" + subPath: config.json + - name: minio-config-dir + mountPath: {{ .Values.configPath }} + ports: + - name: service + containerPort: 9000 + env: + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.fullname" . }} + key: accesskey + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.fullname" . }} + key: secretkey + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: minio-user + secret: + secretName: {{ template "minio.fullname" . }} + - name: minio-server-config + configMap: + name: {{ template "minio.fullname" . }} + - name: minio-config-dir + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: export + spec: + accessModes: [ {{ .Values.persistence.accessMode | quote }} ] + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} diff --git a/qliksense/charts/audit/charts/minio/values.yaml b/qliksense/charts/audit/charts/minio/values.yaml new file mode 100644 index 0000000..0a0c428 --- /dev/null +++ b/qliksense/charts/audit/charts/minio/values.yaml @@ -0,0 +1,239 @@ +## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the +## +image: + repository: minio/minio + tag: RELEASE.2018-09-01T00-38-25Z + pullPolicy: IfNotPresent + +## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio +## client used to create a default bucket). +## +mcImage: + repository: minio/mc + tag: RELEASE.2018-08-18T02-13-04Z + pullPolicy: IfNotPresent + +## minio server mode, i.e. standalone or distributed. +## Distributed Minio ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +mode: standalone + +## Pod priority settings +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Set default accesskey, secretkey, Minio config file path, volume mount path and +## number of nodes (only used for Minio distributed mode) +## Distributed Minio ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +accessKey: "AKIAIOSFODNN7EXAMPLE" +secretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" +configPath: "/root/.minio/" +mountPath: "/export" +replicas: 4 + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## minio data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + ## Storage class of PV to bind. By default it looks for standard storage class. + ## If the PV uses a different storage class, specify that here. + # storageClass: standard + accessMode: ReadWriteOnce + size: 10Gi + + ## If subPath is set mount a sub folder of a volume instead of the root of the volume. + ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). + ## + subPath: "" + +## Expose the Minio service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## + +service: + type: ClusterIP + clusterIP: ~ + port: 9000 + # nodePort: 31311 + # externalIPs: + # - externalIp1 + annotations: {} + # prometheus.io/scrape: 'true' + # prometheus.io/path: '/minio/prometheus/metrics' + # prometheus.io/port: '9000' + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +tolerations: [] +affinity: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Create a bucket after minio install +## +defaultBucket: + enabled: false + ## If enabled, must be a string with length > 0 + name: bucket + ## Can be one of none|download|upload|public + policy: none + ## Purge if bucket exists already + purge: false + +## Use minio as an azure blob gateway, you should disable data persistence so no volume claim are created. +## https://docs.minio.io/docs/minio-gateway-for-azure +azuregateway: + enabled: false + +## Use minio as GCS (Google Cloud Storage) gateway, you should disable data persistence so no volume claim are created. +## https://docs.minio.io/docs/minio-gateway-for-gcs + +gcsgateway: + enabled: false + # credential json file of service account key + gcsKeyJson: "" + # Google cloud project-id + projectId: "" + +## Use minio on NAS backend +## https://docs.minio.io/docs/minio-gateway-for-nas + +nasgateway: + enabled: false + # Number of parallel instances + replicas: 4 + # For NAS Gateway, you may want to bind the PVC to a specific PV. To ensure that happens, PV to bind to should have + # a label like "pv: ", use value here. + pv: ~ + +## https://docs.minio.io/docs/minio-bucket-notification-guide +## https://github.com/minio/minio/blob/master/docs/config +minioConfig: + region: "us-east-1" + browser: "on" + domain: "" + worm: "off" + storageClass: + standardStorageClass: "" + reducedRedundancyStorageClass: "" + cache: + drives: [] + expiry: 90 + maxuse: 80 + exclude: [] + aqmp: + enable: false + url: "" + exchange: "" + routingKey: "" + exchangeType: "" + deliveryMode: 0 + mandatory: false + immediate: false + durable: false + internal: false + noWait: false + autoDeleted: false + nats: + enable: false + address: "" + subject: "" + username: "" + password: "" + token: "" + secure: false + pingInterval: 0 + enableStreaming: false + clusterID: "" + clientID: "" + async: false + maxPubAcksInflight: 0 + elasticsearch: + enable: false + format: "namespace" + url: "" + index: "" + redis: + enable: false + format: "namespace" + address: "" + password: "" + key: "" + postgresql: + enable: false + format: "namespace" + connectionString: "" + table: "" + host: "" + port: "" + user: "" + password: "" + database: "" + kafka: + enable: false + brokers: "null" + topic: "" + webhook: + enable: false + endpoint: "" + mysql: + enable: false + format: "namespace" + dsnString: "" + table: "" + host: "" + port: "" + user: "" + password: "" + database: "" + mqtt: + enable: false + broker: "" + topic: "" + qos: 0 + clientId: "" + username: "" + password: "" + reconnectInterval: 0 + keepAliveInterval: 0 +networkPolicy: + enabled: false + allowExternal: true diff --git a/qliksense/charts/audit/charts/mongodb/.helmignore b/qliksense/charts/audit/charts/mongodb/.helmignore new file mode 100644 index 0000000..6b8710a --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/.helmignore @@ -0,0 +1 @@ +.git diff --git a/qliksense/charts/audit/charts/mongodb/Chart.yaml b/qliksense/charts/audit/charts/mongodb/Chart.yaml new file mode 100644 index 0000000..cc8038a --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 4.0.3 +description: NoSQL document-oriented database that stores JSON-like documents with + dynamic schemas, simplifying the integration of data in content-driven applications. +home: https://mongodb.org +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb +sources: +- https://github.com/bitnami/bitnami-docker-mongodb +version: 4.5.0 diff --git a/qliksense/charts/audit/charts/mongodb/OWNERS b/qliksense/charts/audit/charts/mongodb/OWNERS new file mode 100644 index 0000000..2c3e9fa --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/OWNERS @@ -0,0 +1,12 @@ +approvers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 +reviewers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 diff --git a/qliksense/charts/audit/charts/mongodb/README.md b/qliksense/charts/audit/charts/mongodb/README.md new file mode 100644 index 0000000..1b9d003 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/README.md @@ -0,0 +1,158 @@ +# MongoDB + +[MongoDB](https://www.mongodb.com/) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +## TL;DR; + +```bash +$ helm install stable/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/mongodb +``` + +The command deploys MongoDB on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the MongoDB chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------|----------------------------------------------------------------------------------------------|----------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | MongoDB image registry | `docker.io` | +| `image.repository` | MongoDB Image name | `bitnami/mongodb` | +| `image.tag` | MongoDB Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `usePassword` | Enable password authentication | `true` | +| `existingSecret` | Existing secret with MongoDB credentials | `nil` | +| `mongodbRootPassword` | MongoDB admin password | `random alhpanumeric string (10)` | +| `mongodbUsername` | MongoDB custom user | `nil` | +| `mongodbPassword` | MongoDB custom user password | `random alhpanumeric string (10)` | +| `mongodbDatabase` | Database to create | `nil` | +| `mongodbEnableIPv6` | Switch to enable/disable IPv6 on MongoDB | `true` | +| `mongodbExtraFlags` | MongoDB additional command line flags | [] | +| `service.annotations` | Kubernetes service annotations | `{}` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.nodePort` | Port to bind to for NodePort service type | `nil` | +| `port` | MongoDB service port | `27017` | +| `replicaSet.enabled` | Switch to enable/disable replica set configuration | `false` | +| `replicaSet.name` | Name of the replica set | `rs0` | +| `replicaSet.useHostnames` | Enable DNS hostnames in the replica set config | `true` | +| `replicaSet.key` | Key used for authentication in the replica set | `nil` | +| `replicaSet.replicas.secondary` | Number of secondary nodes in the replica set | `1` | +| `replicaSet.replicas.arbiter` | Number of arbiter nodes in the replica set | `1` | +| `replicaSet.pdb.minAvailable.primary` | PDB for the MongoDB Primary nodes | `1` | +| `replicaSet.pdb.minAvailable.secondary` | PDB for the MongoDB Secondary nodes | `1` | +| `replicaSet.pdb.minAvailable.arbiter` | PDB for the MongoDB Arbiter nodes | `1` | +| `podAnnotations` | Annotations to be added to pods | {} | +| `resources` | Pod resources | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | {} | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.annotations` | Persistent Volume annotations | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (avoids creating one if this is given) | `nil` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `configmap` | MongoDB configuration file to be used | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set mongodbRootPassword=secretpassword,mongodbUsername=my-user,mongodbPassword=my-password,mongodbDatabase=my-database \ + stable/mongodb +``` + +The above command sets the MongoDB `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/mongodb +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Replication + +You can start the MongoDB chart in replica set mode with the following command: + +```bash +$ helm install --name my-release stable/mongodb --set replication.enabled=true +``` +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available MongoDB deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/mongodb/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/mongodb +``` + +To horizontally scale this chart, run the following command to scale the number of secondary nodes in your MongoDB replica set. + +```console +$ kubectl scale statefulset my-release-mongodb-secondary --replicas=3 +``` + +Some characteristics of this chart are: + +* Each of the participants in the replication has a fixed stateful set so you always know where to find the primary, secondary or arbiter nodes. +* The number of secondary and arbiter nodes can be scaled out independently. +* Easy to move an application from using a standalone MongoDB server to use a replica set. + +## Initialize a fresh instance + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +The allowed extensions are `.sh`, and `.js`. + +## Persistence + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. diff --git a/qliksense/charts/audit/charts/mongodb/files/docker-entrypoint-initdb.d/README.md b/qliksense/charts/audit/charts/mongodb/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..a929990 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom .sh, or .js file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-mongodb](https://github.com/bitnami/bitnami-docker-mongodb#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/qliksense/charts/audit/charts/mongodb/templates/NOTES.txt b/qliksense/charts/audit/charts/mongodb/templates/NOTES.txt new file mode 100644 index 0000000..af81001 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/NOTES.txt @@ -0,0 +1,66 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.mongodbRootPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer" and not specifying "mongodbRootPassword" + you have most likely exposed the MongoDB service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "mongodbRootPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB can be accessed via port 27017 on the following DNS name from within your cluster: + + {{ template "mongodb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.usePassword -}} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- end }} +{{- if and .Values.mongodbUsername .Values.mongodbDatabase }} +{{- if .Values.mongodbPassword }} + +To get the password for "{{ .Values.mongodbUsername }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} +{{- end }} + +To connect to your database run the following command: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --image bitnami/mongodb --command -- mongo admin --host {{ template "mongodb.fullname" . }} {{- if .Values.usePassword }} -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.fullname" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "mongodb.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + mongo --host $SERVICE_IP --port {{ .Values.service.nodePort }} {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "mongodb.fullname" . }} 27017:27017 & + mongo --host 127.0.0.1 {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} diff --git a/qliksense/charts/audit/charts/mongodb/templates/_helpers.tpl b/qliksense/charts/audit/charts/mongodb/templates/_helpers.tpl new file mode 100644 index 0000000..855dc29 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "mongodb.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name for the admin secret. +*/}} +{{- define "mongodb.adminSecret" -}} + {{- if .Values.auth.existingAdminSecret -}} + {{- .Values.auth.existingAdminSecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-admin + {{- end -}} +{{- end -}} + +{{/* +Create the name for the key secret. +*/}} +{{- define "mongodb.keySecret" -}} + {{- if .Values.auth.existingKeySecret -}} + {{- .Values.auth.existingKeySecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-keyfile + {{- end -}} +{{- end -}} + +{{/* +Return the proper MongoDB image name +*/}} +{{- define "mongodb.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/mongodb/templates/configmap.yaml b/qliksense/charts/audit/charts/mongodb/templates/configmap.yaml new file mode 100644 index 0000000..66dc853 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +data: + mongodb.conf: |- +{{ toYaml .Values.configmap | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/mongodb/templates/deployment-standalone.yaml b/qliksense/charts/audit/charts/mongodb/templates/deployment-standalone.yaml new file mode 100644 index 0000000..d8ff01b --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/deployment-standalone.yaml @@ -0,0 +1,143 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + chart: {{ template "mongodb.chart" . }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.fullname" . }} + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + {{- if .Values.usePassword }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + {{- end }} + - name: MONGODB_USERNAME + value: {{ default "" .Values.mongodbUsername | quote }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_DATABASE + value: {{ default "" .Values.mongodbDatabase | quote }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + ports: + - name: mongodb + containerPort: 27017 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + + {{- else }} + emptyDir: {} + {{- end -}} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- end -}} diff --git a/qliksense/charts/audit/charts/mongodb/templates/headless-svc-rs.yaml b/qliksense/charts/audit/charts/mongodb/templates/headless-svc-rs.yaml new file mode 100644 index 0000000..29fcf34 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/headless-svc-rs.yaml @@ -0,0 +1,24 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }}-headless + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: {{ .Values.service.port }} + selector: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/mongodb/templates/initialization-configmap.yaml b/qliksense/charts/audit/charts/mongodb/templates/initialization-configmap.yaml new file mode 100644 index 0000000..840e77c --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/initialization-configmap.yaml @@ -0,0 +1,13 @@ +{{ if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mongodb.fullname" . }}-init-scripts + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]").AsConfig | indent 2 }} +{{ end }} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml b/qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml new file mode 100644 index 0000000..eb7f14a --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: arbiter + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.arbiter }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml b/qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml new file mode 100644 index 0000000..6434e3f --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: primary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.primary }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml b/qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml new file mode 100644 index 0000000..03f317d --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: secondary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.secondary }} +{{- end }} diff --git a/qliksense/charts/audit/charts/mongodb/templates/pvc-standalone.yaml b/qliksense/charts/audit/charts/mongodb/templates/pvc-standalone.yaml new file mode 100644 index 0000000..8182ce7 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/pvc-standalone.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.replicaSet.enabled) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/mongodb/templates/secrets.yaml b/qliksense/charts/audit/charts/mongodb/templates/secrets.yaml new file mode 100644 index 0000000..ecbf1eb --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/secrets.yaml @@ -0,0 +1,34 @@ +{{ if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{- if .Values.usePassword }} + {{- if .Values.mongodbRootPassword }} + mongodb-root-password: {{ .Values.mongodbRootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + {{- if .Values.mongodbPassword }} + mongodb-password: {{ .Values.mongodbPassword | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if .Values.replicaSet.enabled }} + {{- if .Values.replicaSet.key }} + mongodb-replica-set-key: {{ .Values.replicaSet.key | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/mongodb/templates/statefulset-arbiter-rs.yaml b/qliksense/charts/audit/charts/mongodb/templates/statefulset-arbiter-rs.yaml new file mode 100644 index 0000000..4ed30a1 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/statefulset-arbiter-rs.yaml @@ -0,0 +1,121 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: arbiter + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.arbiter }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: arbiter + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-arbiter + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/mongodb/templates/statefulset-primary-rs.yaml b/qliksense/charts/audit/charts/mongodb/templates/statefulset-primary-rs.yaml new file mode 100644 index 0000000..8dcb004 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/statefulset-primary-rs.yaml @@ -0,0 +1,174 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: 1 + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-primary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "primary" + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + - name: MONGODB_USERNAME + value: {{ .Values.mongodbUsername | quote }} + - name: MONGODB_DATABASE + value: {{ .Values.mongodbDatabase | quote }} + {{- if .Values.usePassword }} + {{- if .Values.mongodbPassword }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/mongodb/templates/statefulset-secondary-rs.yaml b/qliksense/charts/audit/charts/mongodb/templates/statefulset-secondary-rs.yaml new file mode 100644 index 0000000..d4c4a97 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/statefulset-secondary-rs.yaml @@ -0,0 +1,157 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: secondary + podManagementPolicy: "Parallel" + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.secondary }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: secondary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-secondary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "secondary" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/audit/charts/mongodb/templates/svc-primary-rs.yaml b/qliksense/charts/audit/charts/mongodb/templates/svc-primary-rs.yaml new file mode 100644 index 0000000..fd440c8 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/svc-primary-rs.yaml @@ -0,0 +1,28 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + component: primary +{{- end }} diff --git a/qliksense/charts/audit/charts/mongodb/templates/svc-standalone.yaml b/qliksense/charts/audit/charts/mongodb/templates/svc-standalone.yaml new file mode 100644 index 0000000..4ca9443 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/templates/svc-standalone.yaml @@ -0,0 +1,27 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" +{{- end }} diff --git a/qliksense/charts/audit/charts/mongodb/values-production.yaml b/qliksense/charts/audit/charts/mongodb/values-production.yaml new file mode 100644 index 0000000..9070f3b --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/values-production.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: true + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# replication: +# replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/audit/charts/mongodb/values.yaml b/qliksense/charts/audit/charts/mongodb/values.yaml new file mode 100644 index 0000000..4b090d4 --- /dev/null +++ b/qliksense/charts/audit/charts/mongodb/values.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# #replication: +# # replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/audit/charts/qlikcommon/.helmignore b/qliksense/charts/audit/charts/qlikcommon/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/qliksense/charts/audit/charts/qlikcommon/Chart.yaml b/qliksense/charts/audit/charts/qlikcommon/Chart.yaml new file mode 100644 index 0000000..f2622c4 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +appVersion: 1.0.13 +description: Qlik resource contract chartbuilding components and helpers +home: https://github.com/qlik-trial/resource-contract +maintainers: +- email: boris.kuschel@qlik.com + name: bkuschel +name: qlikcommon +version: 1.0.13 diff --git a/qliksense/charts/audit/charts/qlikcommon/README.md b/qliksense/charts/audit/charts/qlikcommon/README.md new file mode 100644 index 0000000..664b529 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/README.md @@ -0,0 +1,837 @@ +# Qlik Common + +This chart is based off of the Common helper chart hosts in the kubernetes incubator +helm chart repo. Documentation below. + +## Common: The Helm Helper Chart + +This chart is designed to make it easier for you to build and maintain Helm +charts. + +It provides utilities that reflect best practices of Kubernetes chart development, +making it faster for you to write charts. + +## Tips + +A few tips for working with Common: + +- Be careful when using functions that generate random data (like `common.fullname.unique`). + They may trigger unwanted upgrades or have other side effects. + +In this document, we use `RELEASE-NAME` as the name of the release. + +## Resource Kinds + +Kubernetes defines a variety of resource kinds, from `Secret` to `StatefulSet`. +We define some of the most common kinds in a way that lets you easily work with +them. + +The resource kind templates are designed to make it much faster for you to +define _basic_ versions of these resources. They allow you to extend and modify +just what you need, without having to copy around lots of boilerplate. + +To make use of these templates you must define a template that will extend the +base template (though it can be empty). The name of this template is then passed +to the base template, for example: + +```yaml +{{- template "common.service" (list . "mychart.service") -}} +{{- define "mychart.service" -}} +## Define overrides for your Service resource here, e.g. +# metadata: +# labels: +# custom: label +# spec: +# ports: +# - port: 8080 +{{- end -}} +``` + +Note that the `common.service` template defines two parameters: + + - The root context (usually `.`) + - A template name containing the service definition overrides + +A limitation of the Go template library is that a template can only take a +single argument. The `list` function is used to workaround this by constructing +a list or array of arguments that is passed to the template. + +The `common.service` template is responsible for rendering the templates with +the root context and merging any overrides. As you can see, this makes it very +easy to create a basic `Service` resource without having to copy around the +standard metadata and labels. + +Each implemented base resource is described in greater detail below. + +### `common.service` + +The `common.service` template creates a basic `Service` resource with the +following defaults: + +- Service type (ClusterIP, NodePort, LoadBalancer) made configurable by `.Values.service.type` +- Named port `http` configured on port 80 +- Selector set to `app: {{ template "common.name" }}, release: {{ .Release.Name | quote }}` to match the default used in the `Deployment` resource + +Example template: + +```yaml +{{- template "common.service" (list . "mychart.mail.service") -}} +{{- define "mychart.mail.service" -}} +metadata: + name: {{ template "common.fullname" . }}-mail # overrides the default name to add a suffix + labels: # appended to the labels section + protocol: mail +spec: + ports: # composes the `ports` section of the service definition. + - name: smtp + port: 25 + targetPort: 25 + - name: imaps + port: 993 + targetPort: 993 + selector: # this is appended to the default selector + protocol: mail +{{- end -}} +--- +{{ template "common.service" (list . "mychart.web.service") -}} +{{- define "mychart.web.service" -}} +metadata: + name: {{ template "common.fullname" . }}-www # overrides the default name to add a suffix + labels: # appended to the labels section + protocol: www +spec: + ports: # composes the `ports` section of the service definition. + - name: www + port: 80 + targetPort: 8080 +{{- end -}} +``` + +The above template defines _two_ services: a web service and a mail service. + +The most important part of a service definition is the `ports` object, which +defines the ports that this service will listen on. Most of the time, +`selector` is computed for you. But you can replace it or add to it. + +The output of the example above is: + +```yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app: service + chart: service-0.1.0 + heritage: Tiller + protocol: mail + release: release-name + name: release-name-service-mail +spec: + ports: + - name: smtp + port: 25 + targetPort: 25 + - name: imaps + port: 993 + targetPort: 993 + selector: + app: service + release: release-name + protocol: mail + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: service + chart: service-0.1.0 + heritage: Tiller + protocol: www + release: release-name + name: release-name-service-www +spec: + ports: + - name: www + port: 80 + targetPort: 8080 + type: ClusterIP +``` + +## `common.deployment` + +The `common.deployment` template defines a basic `Deployment`. Underneath the +hood, it uses `common.container` (see next section). + +By default, the pod template within the deployment defines the labels `app: {{ template "common.name" . }}` +and `release: {{ .Release.Name | quote }` as this is also used as the selector. The +standard set of labels are not used as some of these can change during upgrades, +which causes the replica sets and pods to not correctly match. + +Example use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +## Define overrides for your Deployment resource here, e.g. +spec: + replicas: {{ .Values.replicaCount }} +{{- end -}} +``` + +## `common.container` + +The `common.container` template creates a basic `Container` spec to be used +within a `Deployment` or `ReplicaSet`. It holds the following defaults: + +- The name is set to `main` +- Uses `.Values.image` to describe the image to run, with the following spec: + ```yaml + image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + ``` +- Exposes the named port `http` as port 80 +- Lays out the compute resources using `.Values.resources` + +Example use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +## Define overrides for your Deployment resource here, e.g. +spec: + template: + spec: + containers: + - +{{ include "common.container" (list (set . "container" .Values.deployment.container) "mychart.deployment.container") | indent 8}} +{{- end -}} +{{- define "mychart.deployment.container" -}} +## Define overrides for your Container here, e.g. +livenessProbe: + httpGet: + path: / + port: 80 +readinessProbe: + httpGet: + path: / + port: 80 +{{- end -}} +``` + +The above example creates a `Deployment` resource which makes use of the +`common.container` template to populate the PodSpec's container list. The usage +of this template is similar to the other resources, you must define and +reference a template that contains overrides for the container object. + +The most important part of a container definition is the image you want to run. +As mentioned above, this is derived from `.Values.image` by default. It is a +best practice to define the image, tag and pull policy in your charts' values as +this makes it easy for an operator to change the image registry, or use a +specific tag or version. Another example of configuration that should be exposed +to chart operators is the container's required compute resources, as this is +also very specific to an operators environment. An example `values.yaml` for +your chart could look like: + +```yaml +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +``` + +The output of running the above values through the earlier template is: + +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: deployment + chart: deployment-0.1.0 + heritage: Tiller + release: release-name + name: release-name-deployment +spec: + template: + metadata: + labels: + app: deployment + spec: + containers: + - image: nginx:stable + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: / + port: 80 + name: deployment + ports: + - containerPort: 80 + name: http + readinessProbe: + httpGet: + path: / + port: 80 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +``` + +## `common.configmap` + +The `common.configmap` template creates an empty `ConfigMap` resource that you +can override with your configuration. + +Example use: + +```yaml +{{- template "common.configmap" (list . "mychart.configmap") -}} +{{- define "mychart.configmap" -}} +data: + zeus: cat + athena: cat + julius: cat + one: |- + {{ .Files.Get "file1.txt" }} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +data: + athena: cat + julius: cat + one: This is a file. + zeus: cat +kind: ConfigMap +metadata: + labels: + app: configmap + chart: configmap-0.1.0 + heritage: Tiller + release: release-name + name: release-name-configmap +``` + +## `common.secret` + +The `common.secret` template creates an empty `Secret` resource that you +can override with your secrets. + +Example use: + +```yaml +{{- template "common.secret" (list . "mychart.secret") -}} +{{- define "mychart.secret" -}} +data: + zeus: {{ print "cat" | b64enc }} + athena: {{ print "cat" | b64enc }} + julius: {{ print "cat" | b64enc }} + one: |- + {{ .Files.Get "file1.txt" | b64enc }} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +data: + athena: Y2F0 + julius: Y2F0 + one: VGhpcyBpcyBhIGZpbGUuCg== + zeus: Y2F0 +kind: Secret +metadata: + labels: + app: secret + chart: secret-0.1.0 + heritage: Tiller + release: release-name + name: release-name-secret +type: Opaque +``` + +## `common.ingress` + +The `common.ingress` template is designed to give you a well-defined `Ingress` +resource, that can be configured using `.Values.ingress`. An example values file +that can be used to configure the `Ingress` resource is: + +```yaml +ingress: + hosts: + - chart-example.local + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + tls: + - secretName: chart-example-tls + hosts: + - chart-example.local +``` + +Example use: + +```yaml +{{- template "common.ingress" (list . "mychart.ingress") -}} +{{- define "mychart.ingress" -}} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + labels: + app: ingress + chart: ingress-0.1.0 + heritage: Tiller + release: release-name + name: release-name-ingress +spec: + rules: + - host: chart-example.local + http: + paths: + - backend: + serviceName: release-name-ingress + servicePort: 80 + path: / + tls: + - hosts: + - chart-example.local + secretName: chart-example-tls +``` + +## `common.persistentvolumeclaim` + +`common.persistentvolumeclaim` can be used to easily add a +`PersistentVolumeClaim` resource to your chart that can be configured using +`.Values.persistence`: + +| Value | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------- | +| persistence.enabled | Whether or not to claim a persistent volume. If false, `common.volume.pvc` will use an emptyDir instead | +| persistence.storageClass | `StorageClass` name | +| persistence.accessMode | Access mode for persistent volume | +| persistence.size | Size of persistent volume | +| persistence.existingClaim | If defined, `PersistentVolumeClaim` is not created and `common.volume.pvc` helper uses this claim | + +An example values file that can be used to configure the +`PersistentVolumeClaim` resource is: + +```yaml +persistence: + enabled: true + storageClass: fast + accessMode: ReadWriteOnce + size: 8Gi +``` + +Example use: + +```yaml +{{- template "common.persistentvolumeclaim" (list . "mychart.persistentvolumeclaim") -}} +{{- define "mychart.persistentvolumeclaim" -}} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app: persistentvolumeclaim + chart: persistentvolumeclaim-0.1.0 + heritage: Tiller + release: release-name + name: release-name-persistentvolumeclaim +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + storageClassName: "fast" +``` + +## Partial API Objects + +When writing Kubernetes resources, you may find the following helpers useful to +construct parts of the spec. + +### EnvVar + +Use the EnvVar helpers within a container spec to simplify specifying key-value +environment variables or referencing secrets as values. + +Example Use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +spec: + template: + spec: + containers: + - {{ template "common.container" (list . "mychart.deployment.container") }} +{{- end -}} +{{- define "mychart.deployment.container" -}} +{{- $fullname := include "common.fullname" . -}} +env: +- {{ template "common.envvar.value" (list "ZEUS" "cat") }} +- {{ template "common.envvar.secret" (list "ATHENA" "secret-name" "athena") }} +{{- end -}} +``` + +Output: + +```yaml +... + spec: + containers: + - env: + - name: ZEUS + value: cat + - name: ATHENA + valueFrom: + secretKeyRef: + key: athena + name: secret-name +... +``` + +### Volume + +Use the Volume helpers within a `Deployment` spec to help define ConfigMap and +PersistentVolumeClaim volumes. + +Example Use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +spec: + template: + spec: + volumes: + - {{ template "common.volume.configMap" (list "config" "configmap-name") }} + - {{ template "common.volume.pvc" (list "data" "pvc-name" .Values.persistence) }} +{{- end -}} +``` + +Output: + +```yaml +... + spec: + volumes: + - configMap: + name: configmap-name + name: config + - name: data + persistentVolumeClaim: + claimName: pvc-name +... +``` + +The `common.volume.pvc` helper uses the following configuration from the `.Values.persistence` object: + +| Value | Description | +| ------------------------- | ----------------------------------------------------- | +| persistence.enabled | If false, creates an `emptyDir` instead | +| persistence.existingClaim | If set, uses this instead of the passed in claim name | + +## Utilities + +### `common.fullname` + +The `common.fullname` template generates a name suitable for the `name:` field +in Kubernetes metadata. It is used like this: + +```yaml +name: {{ template "common.fullname" . }} +``` + +The following different values can influence it: + +```yaml +# By default, fullname uses '{{ .Release.Name }}-{{ .Chart.Name }}'. This +# overrides that and uses the given string instead. +fullnameOverride: "some-name" + +# This adds a prefix +fullnamePrefix: "pre-" +# This appends a suffix +fullnameSuffix: "-suf" + +# Global versions of the above +global: + fullnamePrefix: "pp-" + fullnameSuffix: "-ps" +``` + +Example output: + +```yaml +--- +# with the values above +name: pp-pre-some-name-suf-ps + +--- +# the default, for release "happy-panda" and chart "wordpress" +name: happy-panda-wordpress +``` + +Output of this function is truncated at 54 characters, which leaves 9 additional +characters for customized overriding. Thus you can easily extend this name +in your own charts: + +```yaml +{{- define "my.fullname" -}} + {{ template "common.fullname" . }}-my-stuff +{{- end -}} +``` + +### `common.fullname.unique` + +The `common.fullname.unique` variant of fullname appends a unique seven-character +sequence to the end of the common name field. + +This takes all of the same parameters as `common.fullname` + +Example template: + +```yaml +uniqueName: {{ template "common.fullname.unique" . }} +``` + +Example output: + +```yaml +uniqueName: release-name-fullname-jl0dbwx +``` + +It is also impacted by the prefix and suffix definitions, as well as by +`.Values.fullnameOverride` + +Note that the effective maximum length of this function is 63 characters, not 54. + +### `common.name` + +The `common.name` template generates a name suitable for the `app` label. It is used like this: + +```yaml +app: {{ template "common.name" . }} +``` + +The following different values can influence it: + +```yaml +# By default, name uses '{{ .Chart.Name }}'. This +# overrides that and uses the given string instead. +nameOverride: "some-name" + +# This adds a prefix +namePrefix: "pre-" +# This appends a suffix +nameSuffix: "-suf" + +# Global versions of the above +global: + namePrefix: "pp-" + nameSuffix: "-ps" +``` + +Example output: + +```yaml +--- +# with the values above +name: pp-pre-some-name-suf-ps + +--- +# the default, for chart "wordpress" +name: wordpress +``` + +Output of this function is truncated at 54 characters, which leaves 9 additional +characters for customized overriding. Thus you can easily extend this name +in your own charts: + +```yaml +{{- define "my.name" -}} + {{ template "common.name" . }}-my-stuff +{{- end -}} +``` + +### `common.metadata` + +The `common.metadata` helper generates the `metadata:` section of a Kubernetes +resource. + +This takes three objects: + - .top: top context + - .fullnameOverride: override the fullname with this name + - .metadata + - .labels: key/value list of labels + - .annotations: key/value list of annotations + - .hook: name(s) of hook(s) + +It generates standard labels, annotations, hooks, and a name field. + +Example template: + +```yaml +{{ template "common.metadata" (dict "top" . "metadata" .Values.bio) }} +--- +{{ template "common.metadata" (dict "top" . "metadata" .Values.pet "fullnameOverride" .Values.pet.fullnameOverride) }} +``` + +Example values: + +```yaml +bio: + name: example + labels: + first: matt + last: butcher + nick: technosophos + annotations: + format: bio + destination: archive + hook: pre-install + +pet: + fullnameOverride: Zeus + +``` + +Example output: + +```yaml +metadata: + name: release-name-metadata + labels: + app: metadata + heritage: "Tiller" + release: "RELEASE-NAME" + chart: metadata-0.1.0 + first: "matt" + last: "butcher" + nick: "technosophos" + annotations: + "destination": "archive" + "format": "bio" + "helm.sh/hook": "pre-install" +--- +metadata: + name: Zeus + labels: + app: metadata + heritage: "Tiller" + release: "RELEASE-NAME" + chart: metadata-0.1.0 + annotations: +``` + +Most of the common templates that define a resource type (e.g. `common.configmap` +or `common.job`) use this to generate the metadata, which means they inherit +the same `labels`, `annotations`, `nameOverride`, and `hook` fields. + +### `common.labelize` + +`common.labelize` turns a map into a set of labels. + +Example template: + +```yaml +{{- $map := dict "first" "1" "second" "2" "third" "3" -}} +{{- template "common.labelize" $map -}} +``` + +Example output: + +```yaml +first: "1" +second: "2" +third: "3" +``` + +### `common.labels.standard` + +`common.labels.standard` prints the standard set of labels. + +Example usage: + +``` +{{ template "common.labels.standard" . }} +``` + +Example output: + +```yaml +app: labelizer +heritage: "Tiller" +release: "RELEASE-NAME" +chart: labelizer-0.1.0 +``` + +### `common.hook` + +The `common.hook` template is a convenience for defining hooks. + +Example template: + +```yaml +{{ template "common.hook" "pre-install,post-install" }} +``` + +Example output: + +```yaml +"helm.sh/hook": "pre-install,post-install" +``` + +### `common.chartref` + +The `common.chartref` helper prints the chart name and version, escaped to be +legal in a Kubernetes label field. + +Example template: + +```yaml +chartref: {{ template "common.chartref" . }} +``` + +For the chart `foo` with version `1.2.3-beta.55+1234`, this will render: + +```yaml +chartref: foo-1.2.3-beta.55_1234 +``` + +(Note that `+` is an illegal character in label values) diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_certificates.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_certificates.tpl new file mode 100644 index 0000000..d385098 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_certificates.tpl @@ -0,0 +1,32 @@ +{{- define "common.ca-certificates.volume" -}} +{{- if .Values.certs }} +{{- if .Values.global }}{{- if .Values.global.certs }} +{{- if .Values.global.certs.volume }} +- name: ca-certificates + {{- if .Values.global.certs.volume.hostPath }} + hostPath: + path: {{ .Values.global.certs.volume.hostPath }} + type: Directory + {{- end }} + {{- if .Values.global.certs.volume.existingVolumeClaim }} + persistentVolumeClaim: + claimName: {{ .Values.global.certs.volume.existingVolumeClaim }} + {{- end }} +{{- else }} +- name: ca-certificates + persistentVolumeClaim: + claimName: {{ .Release.Name }}-certs-pvc +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "common.ca-certificates.volumeMount" -}} +{{- if .Values.certs }} +{{- if .Values.global }}{{- if .Values.global.certs }} +- name: ca-certificates + mountPath: {{ default "/etc/ssl/certs" .Values.certs.mountPath | quote }} + readOnly: true +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_chartref.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_chartref.tpl new file mode 100644 index 0000000..e6c1486 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_chartref.tpl @@ -0,0 +1,14 @@ +{{- /* +common.chartref prints a chart name and version. + +It does minimal escaping for use in Kubernetes labels. + +Example output: + + zookeeper-1.2.3 + wordpress-3.2.1_20170219 + +*/ -}} +{{- define "common.chartref" -}} + {{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_configmap.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_configmap.yaml new file mode 100644 index 0000000..f04def2 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_configmap.yaml @@ -0,0 +1,32 @@ +{{- define "common.configmap.tpl" -}} +apiVersion: v1 +kind: ConfigMap +{{ template "common.metadata.configs" . }} +data: + {{- $root := . -}} + {{- if .Values.deployment -}} + {{- range $name, $container:= .Values.deployment }} + {{- if kindIs "map" $container }}{{- if $container.configs }} + {{- range $key, $value := $container.configs.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | quote }} + {{- else }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }}{{- end }} + {{- end }} + {{- end }} + {{- if .Values.configs -}} + {{- range $key, $value := .Values.configs.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | quote }} + {{- else }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- define "common.configmap" -}} +{{- template "common.util.merge" (append . "common.configmap.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_container.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_container.yaml new file mode 100644 index 0000000..6ed061e --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_container.yaml @@ -0,0 +1,93 @@ +{{- define "common.container.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +name: {{ include "common.name" . }} +image: {{ include "common.image" (set . "container" .container ) }} +{{- $pullPolicy := .Values.image.pullPolicy }} +{{- if .container }}{{- if .container.image }}{{- if .container.image.pullPolicy }} + {{- $pullPolicy = .container.image.pullPolicy }} +{{- end }}{{- end }}{{- end }} +imagePullPolicy: {{ $pullPolicy }} +{{- if or .Values.configs .Values.secrets }} +env: +{{ include "common.transformers" (set . "container" .container ) | indent 2 }} +{{- end }} +ports: +{{- $port := .Values.service.port }} +{{- if .container }}{{- if .container.port }} + {{- $port = .container.port }} +{{- end }}{{- end }} +- containerPort: {{ $port }} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end }}{{- end }} +{{- $hasSecret := false -}} +{{- if $secrets.stringData }} +{{- $hasSecret = true -}} +{{- end }} +{{- if $secrets.data }} +{{- $hasSecret = true -}} +{{- end }} +{{- $hasVolumeMounts := false -}} +{{- if .container }}{{- if .container.volumeMounts }} +{{- $hasVolumeMounts = true -}} +{{- end -}}{{- end -}} +{{- if or $hasSecret $hasVolumeMounts }} +volumeMounts: +{{- end }} +{{- if $hasSecret }} +{{- if contains $name .Release.Name }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +{{- else }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +- name: {{ .Release.Name }}-secrets + mountPath: "/run/secrets/qlik.com/{{ .Release.Name }}" + readOnly: true +{{- end }} +{{- end }} +{{- if $hasVolumeMounts }} +{{- $multipleMounts := false }} +{{- range $key, $val:= .container.volumeMounts }} +{{- if kindIs "map" $val }} +{{- $multipleMounts = true }} +{{ include "common.volume.mount" (list (printf "%s-%s" (include "common.fullname" $root) $key) $val) }} +{{- end }} +{{- end }} +{{- if ne ($multipleMounts | default false) true }} +{{ include "common.volume.mount" (list (include "common.fullname" $root) $root.Values.deployment.container.volumeMounts.mountPath) }} +{{- end }} +{{- end }} +{{- include "common.ca-certificates.volumeMount" . }} +{{- $probes := .Values.probes }} +{{- if .container }}{{- if .container.probes }} + {{- $probes = .container.probes }} +{{- end }}{{- end }} +livenessProbe: + httpGet: + path: /health + port: {{ $port }} +{{- if $probes }} +{{ toYaml $probes | indent 2 }} +{{- end }} +readinessProbe: + httpGet: + path: /ready + port: {{ $port }} +{{- if $probes }} +{{ toYaml $probes | indent 2 }} +{{- end }} +{{- if .container }}{{- if .container.resources }} +resources: +{{ toYaml .container.resources | indent 2 }} +{{- end -}}{{- end -}} +{{- end -}} +{{- define "common.container" -}} +{{- /* clear new line so indentation works correctly */ -}} +{{- println "" -}} +{{- include "common.util.merge" (append . "common.container.tpl") | indent 8 -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_deployment.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_deployment.yaml new file mode 100644 index 0000000..889bb7f --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_deployment.yaml @@ -0,0 +1,72 @@ +{{- define "common.deployment.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +apiVersion: apps/v1 +kind: Deployment +{{ template "common.metadata.workload" . }} +spec: +{{- if .Values.deployment.replicas}} + replicas: {{ .Values.deployment.replicas }} +{{- end}} + template: + metadata: + annotations: + checksum/configs: {{ (print (include "common.configmap.tpl" .)) | sha256sum }} + checksum/secrets: {{ (print (include "common.secret.tpl" .)) | sha256sum }} +{{- if .Values.deployment }}{{- if .Values.deployment.annotations }} +{{ include "common.annote" (dict "annotations" .Values.deployment.annotations "root" . ) | indent 8 }} +{{- end }}{{- end }} + labels: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} +{{- if .Values.deployment }}{{- if .Values.deployment.labels }} +{{ include "common.labelize" .Values.deployment.labels | indent 8 }} +{{- end }}{{- end }} +{{- if .Values.configs }}{{- if .Values.configs.data }}{{- if .Values.configs.data.natsUri }} + {{ tpl .Values.configs.data.natsUri . | regexFind "//.*:" | trimAll ":" | trimAll "/" }}: "true" +{{- end }}{{- end }}{{- end }} + spec: +{{- if ne (.Values.rbacEnabled | default false) false }} + serviceAccountName: {{ template "common.fullname" . }} +{{- end }} +{{- if .Values.image }}{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }}{{- end }} + volumes: +{{- if contains $name .Release.Name }} + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} +{{- else }} + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} + - {{ template "common.volume.secret" (list (printf "%s-secrets" (.Release.Name)) (printf "%s" (.Release.Name))) }} +{{- end }} +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }} +{{- $multiPVC := false }} +{{- range $name, $claim:= $root.Values.persistence }} +{{- if kindIs "map" $claim }} +{{- $multiPVC = true }} + - {{ template "common.volume.pvc" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $root.Values.persistence) }} +{{- end }} +{{- end }} +{{- if ne ($multiPVC | default false) true }} + - {{ template "common.volume.pvc" (list (include "common.fullname" $root) (include "common.fullname" $root) $root.Values.persistence) }} +{{- end }} +{{- end }}{{- end }} +{{ include "common.ca-certificates.volume" . | nindent 6 }} +{{- if .Values.configs }}{{- if .Values.configs.terminationGracePerionSeconds }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} +{{- end }}{{- end }} + containers: + - +{{ include "common.container.tpl" (set . "container" .Values.deployment.container ) | indent 8 }} + selector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} +{{- end -}} +{{- define "common.deployment" -}} +{{- $top := first . -}} +{{- if and $top.Values.deployment }} +{{- template "common.util.merge" (append . "common.deployment.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_envvar.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_envvar.tpl new file mode 100644 index 0000000..39a997a --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_envvar.tpl @@ -0,0 +1,32 @@ +{{- define "common.envvar.value" -}} + {{- $name := index . 0 -}} + {{- $value := index . 1 -}} + + name: {{ $name }} + value: {{ default "" $value | quote }} +{{- end -}} + +{{- define "common.envvar.configmap" -}} + {{- $name := index . 0 -}} + {{- $configMapName := index . 1 -}} + {{- $configMapKey := index . 2 -}} + + name: {{ $name }} + valueFrom: + configMapKeyRef: + name: {{ $configMapName }}-configs + key: {{ $configMapKey }} +{{- end -}} + +{{- define "common.envvar.secret" -}} + {{- $name := index . 0 -}} + {{- $secretName := index . 1 -}} + {{- $secretKey := index . 2 -}} + + name: {{ $name }} + valueFrom: + secretKeyRef: + name: {{ $secretName }}-secrets + key: {{ $secretKey }} +{{- end -}} + diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_fullname.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_fullname.tpl new file mode 100644 index 0000000..0f6bc77 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_fullname.tpl @@ -0,0 +1,42 @@ +{{- /* +fullname defines a suitably unique name for a resource by combining +the release name and the chart name. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.fullnameOverride: Replaces the computed name with this given name +- .Values.fullnamePrefix: Prefix +- .Values.global.fullnamePrefix: Global prefix +- .Values.fullnameSuffix: Suffix +- .Values.global.fullnameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "common.fullname" . -}}"' +*/ -}} +{{- define "common.fullname" -}} + {{- $global := default (dict) .Values.global -}} + {{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- $name := default .Chart.Name .Values.nameOverride -}} + {{- if contains $name .Release.Name -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{- /* +common.fullname.unique adds a random suffix to the unique name. + +This takes the same parameters as common.fullname + +*/ -}} +{{- define "common.fullname.unique" -}} + {{ template "common.fullname" . }}-{{ randAlphaNum 7 | lower }} +{{- end }} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_hpa.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_hpa.yaml new file mode 100644 index 0000000..be4215d --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_hpa.yaml @@ -0,0 +1,31 @@ +{{- define "common.hpa.tpl" -}} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +{{ template "common.metadata" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "common.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: +{{ if .Values.hpa.targetAverageUtilizationCpu }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationCpu }} +{{- end }} +{{ if .Values.hpa.targetAverageUtilizationMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationMemory }} +{{- end }} +{{- end -}} +{{- define "common.hpa" -}} +{{- $top := first . -}} +{{- if and $top.Values.hpa }} +{{- template "common.util.merge" (append . "common.hpa.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_image.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_image.tpl new file mode 100644 index 0000000..d60569d --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_image.tpl @@ -0,0 +1,21 @@ +{{/* Return the proper collections image name */}} +{{- define "common.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{ $image := .Values.image }} + {{- if .container }}{{- if .container.image }} + {{ $image = .container.image }} + {{- end -}}{{- end -}} + {{- $registry := default "docker.io" (default .Values.image.registry $image.registry) -}} + {{- $repository := $image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" $image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_ingress.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_ingress.yaml new file mode 100644 index 0000000..ab9a75d --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_ingress.yaml @@ -0,0 +1,49 @@ +{{- define "common.ingress.tpl" -}} +apiVersion: extensions/v1beta1 +kind: Ingress +{{ template "common.metadata" . }} + annotations: + kubernetes.io/ingress.class: {{ template "common.ingress.class" . }} + {{- if .Values.configs }}{{- if .Values.configs.data }}{{- if .Values.configs.data.ingressAuthUrl }} + nginx.ingress.kubernetes.io/auth-url: {{ tpl .Values.configs.data.ingressAuthUrl . | quote }} + {{- end }}{{- end }}{{- end }} + {{- if .Values.ingress}}{{- if .Values.ingress.annotations }} + {{ include "common.annote" (dict "annotations" .Values.ingress.annotations "root" . ) | indent 4 }} + {{- end }}{{- end }} +spec: + {{- if .Values.ingress }} + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: / + backend: + serviceName: {{ template "common.fullname" $ }} + servicePort: 80 + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} + {{- end }} +{{- define "common.ingress" -}} +{{- $top := first . -}} +{{- if and $top.Values.ingress }} +{{- template "common.util.merge" (append . "common.ingress.tpl") -}} +{{- end -}} +{{- end -}} + +{{- define "common.ingress.class" -}} + {{- $ingressClass := "nginx" }} + {{- if .Values.ingress }}{{- if .Values.ingress.class }} + {{- $ingressClass = .Values.ingress.class -}} + {{- end -}}{{- end -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_metadata.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_metadata.yaml new file mode 100644 index 0000000..83c42d5 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_metadata.yaml @@ -0,0 +1,35 @@ +{{- /* +common.metadata creates a standard metadata header. +It creates a 'metadata:' section with name and labels. +*/ -}} +{{ define "common.metadata" -}} +metadata: + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.configs" -}} +metadata: + name: {{ template "common.fullname" . }}-configs + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.secrets" -}} +metadata: + name: {{ template "common.fullname" . }}-secrets + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.workload" -}} +metadata: + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_metadata_annotations.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_metadata_annotations.tpl new file mode 100644 index 0000000..ed28474 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_metadata_annotations.tpl @@ -0,0 +1,23 @@ +{{- /* +common.hook defines a hook. + +This is to be used in a 'metadata.annotations' section. + +This should be called as 'template "common.metadata.hook" "post-install"' + +Any valid hook may be passed in. Separate multiple hooks with a ",". +*/ -}} +{{- define "common.hook" -}} +"helm.sh/hook": {{printf "%s" . | quote}} +{{- end -}} + +{{- define "common.annote" -}} +{{ $root := .root}} +{{- range $k, $v := .annotations }} +{{- if kindIs "string" $v }} +{{ $k | quote }}: {{ tpl $v $root | quote }} +{{- else -}} +{{ $k | quote }}: {{ $v }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_metadata_labels.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_metadata_labels.tpl new file mode 100644 index 0000000..15fe00c --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_metadata_labels.tpl @@ -0,0 +1,28 @@ +{{- /* +common.labelize takes a dict or map and generates labels. + +Values will be quoted. Keys will not. + +Example output: + + first: "Matt" + last: "Butcher" + +*/ -}} +{{- define "common.labelize" -}} +{{- range $k, $v := . }} +{{ $k }}: {{ $v | quote }} +{{- end -}} +{{- end -}} + +{{- /* +common.labels.standard prints the standard Helm labels. + +The standard labels are frequently used in metadata. +*/ -}} +{{- define "common.labels.standard" -}} +app: {{ template "common.name" . }} +chart: {{ template "common.chartref" . }} +heritage: {{ .Release.Service | quote }} +release: {{ .Release.Name | quote }} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_name.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_name.tpl new file mode 100644 index 0000000..1d42fb0 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_name.tpl @@ -0,0 +1,29 @@ +{{- /* +name defines a template for the name of the chart. It should be used for the `app` label. +This is common practice in many Kubernetes manifests, and is not Helm-specific. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.nameOverride: Replaces the computed name with this given name +- .Values.namePrefix: Prefix +- .Values.global.namePrefix: Global prefix +- .Values.nameSuffix: Suffix +- .Values.global.nameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "common.name" . -}}"' +*/ -}} +{{- define "common.name"}} + {{- $global := default (dict) .Values.global -}} + {{- $base := default .Chart.Name .Values.nameOverride -}} + {{- $gpre := default "" $global.namePrefix -}} + {{- $pre := default "" .Values.namePrefix -}} + {{- $suf := default "" .Values.nameSuffix -}} + {{- $gsuf := default "" $global.nameSuffix -}} + {{- $name := print $gpre $pre $base $suf $gsuf -}} + {{- $name | lower | trunc 54 | trimSuffix "-" -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_networkpolicy.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_networkpolicy.yaml new file mode 100644 index 0000000..e0c4922 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_networkpolicy.yaml @@ -0,0 +1,52 @@ +{{- define "common.networkpolicy.tpl" -}} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +{{ template "common.metadata" . }} +spec: + podSelector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} + policyTypes: + - Egress + egress: + - to: +{{- if .Values.configs }}{{- if .Values.configs.data }} +{{- if .Values.configs.data.natsUri }} + - podSelector: + matchLabels: + app: "nats" + release: {{ .Values.natsRelease | default .Release.Name | quote }} + - podSelector: + matchLabels: + app: "nats-streaming" + release: {{ .Values.natsRelease | default .Release.Name | quote }} +{{- end }} +{{- if or .Values.configs.data.tokenAuthUri .Values.configs.data.ingressAuthUrl }} + - podSelector: + matchLabels: + app: "edge-auth" + release: {{ .Values.edgeAuthRelease | default .Release.Name | quote }} +{{- end }} +{{- if .Values.configs.data.keysUri }} + - podSelector: + matchLabels: + app: "keys" + release: {{ .Values.keysRelease | default .Release.Name | quote }} +{{- end }} +{{- if .Values.configs.data.pdsUri }} + - podSelector: + matchLabels: + app: "policy-decisions" + release: {{ .Values.pdsRelease | default .Release.Name | quote }} +{{- end }} +{{- end }}{{- end }} + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP +{{- end }} +{{- define "common.networkpolicy" -}} +{{- template "common.util.merge" (append . "common.networkpolicy.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_persistentvolumeclaim.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_persistentvolumeclaim.yaml new file mode 100644 index 0000000..e4715b8 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_persistentvolumeclaim.yaml @@ -0,0 +1,42 @@ +{{- define "common.persistentvolumeclaim.tpl" -}} +{{- $persistence := default .Values.persistence .claim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +{{ template "common.metadata" . }} +spec: + accessModes: + - {{ $persistence.accessMode | quote }} + resources: + requests: + storage: {{ $persistence.size | quote }} +{{- if $persistence.matchLabels }} + selector: + matchLabels: +{{- include "common.labelize" $persistence.matchLabels | indent 6 -}} +{{- end -}} +{{- if $persistence.storageClass }} +{{- if (eq "-" $persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ $persistence.storageClass }}" +{{- end }} +{{- else -}} + {{- if .Values.global }} + {{- if .Values.global.persistence }} + {{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) -}} + storageClassName: "" + {{- else -}} + storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- define "common.persistentvolumeclaim" -}} +{{- $top := first . -}} +{{- if $top.Values.persistence -}}{{- if and $top.Values.persistence.enabled (not $top.Values.persistence.existingClaim) -}} +{{- template "common.util.merge" (append . "common.persistentvolumeclaim.tpl") -}} +{{- end -}}{{- end -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_persistentvolumeclaims.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_persistentvolumeclaims.yaml new file mode 100644 index 0000000..ebd2136 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_persistentvolumeclaims.yaml @@ -0,0 +1,19 @@ +{{- define "common.persistentvolumeclaims" -}} +{{- $root := . -}} +{{- if .Values.persistence -}}{{- if .Values.persistence.enabled -}} +{{- range $name, $claim:= .Values.persistence }} +{{- if kindIs "map" $claim }} +{{- $values := set $root.Values "fullnameOverride" (printf "%s-%s" (include "common.fullname" $root) $name) -}} +{{- $root = set (set $root "claim" $claim) "Values" $values -}} +{{- include "common.persistentvolumeclaim" (list $root "mychart.persistentvolumeclaim") -}} +{{- printf "---" -}} +{{- printf "\n" -}} +{{- end -}} +{{- $_:= unset $root.Values "fullnameOverride" -}} +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} + +## No override templates are needed for the case of defining multiple PVCs +{{- define "mychart.persistentvolumeclaim" -}} +{{- end }} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_podSecurityPolicy.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_podSecurityPolicy.yaml new file mode 100644 index 0000000..c06f607 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_podSecurityPolicy.yaml @@ -0,0 +1,55 @@ +{{- define "common.podsecuritypolicy.tpl" -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +{{ template "common.metadata" . }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end -}} +{{- define "common.podsecuritypolicy" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}}{{- if ne ($top.Values.podSecurityPolicy | default false) false -}} +{{- template "common.util.merge" (append . "common.podsecuritypolicy.tpl") -}} +{{- end -}}{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_role.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_role.yaml new file mode 100644 index 0000000..cf1d6f6 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_role.yaml @@ -0,0 +1,23 @@ +{{- define "common.role.tpl" -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +{{ template "common.metadata" . }} +rules: +{{- if .Values.podSecurityPolicy | default false }} +- apiGroups: + - policy + resourceNames: + - {{ template "common.fullname" . }} + resources: + - podsecuritypolicies + verbs: + - use +{{- end }} +{{- end -}} +{{- define "common.role" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.role.tpl") -}} +{{- end -}} +{{- end -}} + diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_rolebinding.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_rolebinding.yaml new file mode 100644 index 0000000..021e896 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_rolebinding.yaml @@ -0,0 +1,19 @@ +{{- define "common.rolebinding.tpl" -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +{{ template "common.metadata" . }} +roleRef: + kind: Role + name: {{ template "common.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} +{{- define "common.rolebinding" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.rolebinding.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_secret.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_secret.yaml new file mode 100644 index 0000000..45ec55f --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_secret.yaml @@ -0,0 +1,45 @@ +{{- define "common.secret.tpl" -}} +apiVersion: v1 +kind: Secret +{{ template "common.metadata.secrets" . }} +type: Opaque +data: + {{- $root := . -}} + {{- if .Values.deployment -}} + {{- range $name, $container:= .Values.deployment }} + {{- if kindIs "map" $container }}{{- if $container.secrets }} + {{- range $key, $value := $container.secrets.stringData }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | b64enc }} + {{- else }} + {{ $key }}: {{ $value | b64enc }} + {{- end }} + {{- end }} + {{- range $key, $value := $container.secrets.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }}{{- end }} + {{- end }} + {{- end }} + {{- if .Values.secrets -}} + {{- if .Values.secrets.stringData -}} + {{- range $key, $value := .Values.secrets.stringData }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | b64enc }} + {{- else }} + {{ $key }}: {{ $value | b64enc }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.secrets.data -}} + {{- range $key, $value := .Values.secrets.data }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- define "common.secret" -}} +{{- template "common.util.merge" (append . "common.secret.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_service.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_service.yaml new file mode 100644 index 0000000..fb4a9e8 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_service.yaml @@ -0,0 +1,25 @@ +{{- define "common.service.tpl" -}} +apiVersion: v1 +kind: Service +{{ template "common.metadata" . }} + annotations: + {{- if .Values.service }}{{- if .Values.service.annotations }} + {{ include "common.annote" (dict "annotations" .Values.service.annotations "root" . ) | indent 4 }} + {{- end }}{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: {{ template "common.name" . }} + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} +{{- end -}} +{{- define "common.service" -}} +{{- $top := first . -}} +{{- if and $top.Values.service}} +{{- template "common.util.merge" (append . "common.service.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_serviceaccount.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_serviceaccount.yaml new file mode 100644 index 0000000..534a4bf --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- define "common.serviceaccount.tpl" -}} +apiVersion: v1 +kind: ServiceAccount +{{ template "common.metadata" . }} +{{- end -}} +{{- define "common.serviceaccount" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.serviceaccount.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_statefulset.yaml b/qliksense/charts/audit/charts/qlikcommon/templates/_statefulset.yaml new file mode 100644 index 0000000..99086c6 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_statefulset.yaml @@ -0,0 +1,44 @@ +{{- define "common.statefulset.tpl" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +apiVersion: apps/v1 +kind: statefulset +{{ template "common.metadata.workload" . }} +spec: +{{- if .Values.statefulset.replicas}} + replicas: {{ .Values.statefulset.replicas }} +{{- end}} + template: + metadata: + labels: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} + spec: +{{- if ne (.Values.rbacEnabled | default false) false }} + serviceAccountName: {{ template "common.fullname" . }} +{{- end }} +{{- if .Values.image }}{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }}{{- end }} + volumes: + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} +{{- if .Values.persistence }} + - {{ template "common.volume.pvc" (list (include "common.fullname" .) (include "common.fullname" .) .Values.persistence) }} +{{- end }} +{{- if .Values.configs.terminationGracePerionSeconds }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePerionSeconds }} +{{- end }} + containers: + - +{{ include "common.container.tpl" (set . "container" .Values.statefulset.container ) | indent 8 }} + selector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} +{{- end -}} +{{- define "common.statefulset" -}} +{{- $top := first . -}} +{{- if and $top.Values.statefulset }} +{{- template "common.util.merge" (append . "common.statefulset.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_transformers.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_transformers.tpl new file mode 100644 index 0000000..49e8429 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_transformers.tpl @@ -0,0 +1,41 @@ +{{- define "common.transformers" -}} +{{- $fullname := include "common.fullname" . -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $release := .Release.Name -}} +{{- $commonSecretList := list "mongodbUri" -}} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end -}}{{- end -}} +{{- if $secrets -}} +{{- range $key, $value := $secrets.stringData }} +{{- if has $key $commonSecretList -}}{{- if not (contains $name $release) }} +{{- $fullname = $release -}} +{{- end }}{{- end }} +- {{ template "common.envvar.value" (list (print $key "_FILE" | snakecase | upper) ( print "/run/secrets/qlik.com/" $fullname ( print "/" $key ) )) }} +{{- $fullname = include "common.fullname" $root -}} +{{- end }} +{{- range $key, $value := $secrets.data }} +{{- if has $key $commonSecretList -}}{{- if not (contains $name $release) }} +{{- $fullname = $release -}} +{{- end }}{{- end }} +- {{ template "common.envvar.value" (list (print $key "_FILE" | snakecase | upper) ( print "/run/secrets/qlik.com/" $fullname ( print "/" $key ) )) }} +{{- $fullname = include "common.fullname" $root -}} +{{- end }} +{{- end }} +{{ $configs := .Values.configs}} +{{- if .container }}{{- if .container.configs }} +{{ $configs = .container.configs}} +{{- end -}}{{- end -}} +{{- if $configs -}} +{{- range $key, $value := $configs.data }} +- {{ template "common.envvar.configmap" (list (print $key | snakecase | upper) $fullname $key ) }} +{{- end }} +{{- range $key, $value := $configs }} +{{- if ne $key "data" }} +- {{ template "common.envvar.value" (list (print $key | snakecase | upper) $value ) }} +{{- end }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_util.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_util.tpl new file mode 100644 index 0000000..6abeec0 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_util.tpl @@ -0,0 +1,15 @@ +{{- /* +common.util.merge will merge two YAML templates and output the result. + +This takes an array of three values: +- the top context +- the template name of the overrides (destination) +- the template name of the base (source) + +*/ -}} +{{- define "common.util.merge" -}} +{{- $top := first . -}} +{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}} +{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}} +{{- regexReplaceAll ".*: null|.*: nil" (toYaml (merge $overrides $tpl)) "${1}" -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/audit/charts/qlikcommon/templates/_volume.tpl b/qliksense/charts/audit/charts/qlikcommon/templates/_volume.tpl new file mode 100644 index 0000000..ccd6110 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/templates/_volume.tpl @@ -0,0 +1,39 @@ +{{- define "common.volume.configmap" -}} + {{- $name := index . 0 -}} + {{- $configMapName := index . 1 -}} + + name: {{ $name }} + configMap: + name: {{ $configMapName }}-configs +{{- end -}} + +{{- define "common.volume.secret" -}} + {{- $name := index . 0 -}} + {{- $secretName := index . 1 -}} + + name: {{ $name }} + secret: + secretName: {{ $secretName }}-secrets +{{- end -}} + +{{- define "common.volume.pvc" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $persistence := index . 2 -}} + + name: {{ $name }} + {{- if $persistence.enabled }} + persistentVolumeClaim: + claimName: {{ $persistence.existingClaim | default $claimName }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end -}} + +{{- define "common.volume.mount" -}} +{{- $volume := index . 0 -}} +{{- $mountPath := index . 1 -}} +- name: {{ $volume }} + mountPath: {{ default "/tmp" $mountPath.mountPath | quote }} + readOnly: {{ default false $mountPath.readOnly }} +{{- end -}} diff --git a/qliksense/charts/audit/charts/qlikcommon/values.yaml b/qliksense/charts/audit/charts/qlikcommon/values.yaml new file mode 100644 index 0000000..b7cf514 --- /dev/null +++ b/qliksense/charts/audit/charts/qlikcommon/values.yaml @@ -0,0 +1,4 @@ +# Default values for commons. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value diff --git a/qliksense/charts/audit/requirements.yaml b/qliksense/charts/audit/requirements.yaml new file mode 100644 index 0000000..86a6d92 --- /dev/null +++ b/qliksense/charts/audit/requirements.yaml @@ -0,0 +1,18 @@ +dependencies: + - name: qlikcommon + version: 1.0.13 + repository: "@qlik" + condition: global.component-common-imports + - name: mongodb + version: 4.5.0 + repository: "@stable" + condition: global.component-common-imports,mongodb.enabled + - name: messaging + version: 1.3.0 + repository: "@qlik" + condition: global.component-common-imports,messaging.enabled + - name: minio + version: ~1.6.4 + repository: https://kubernetes-charts.storage.googleapis.com/ + condition: global.component-common-imports,minio.enabled + diff --git a/qliksense/charts/audit/templates/manifest.yaml b/qliksense/charts/audit/templates/manifest.yaml new file mode 100644 index 0000000..7d1b068 --- /dev/null +++ b/qliksense/charts/audit/templates/manifest.yaml @@ -0,0 +1,56 @@ +{{- template "common.configmap" (list . "audit.configmap") -}} +{{- define "audit.configmap" -}} +{{- end }} + +--- +{{ template "common.secret" (list . "audit.secret") -}} +{{- define "audit.secret" -}} +{{- end }} + +--- +{{ template "common.ingress" (list . "audit.ingress") -}} +{{- define "audit.ingress" -}} +spec: + rules: + - http: + paths: + - backend: + serviceName: {{ template "common.fullname" . }} + servicePort: {{ .Values.service.port }} + path: /api/v1/audits +{{- end }} + +--- +{{ template "common.service" (list . "audit.service") -}} +{{- define "audit.service" -}} +{{- end }} + +--- +{{ template "common.deployment" (list . "audit.deployment") -}} +{{- define "audit.deployment" -}} +{{- end }} + +--- +{{ template "common.hpa" (list . "audit.hpa") -}} +{{- define "audit.hpa" -}} +{{- end }} + +--- +{{ template "common.serviceaccount" (list . "audit.serviceaccount" ) -}} +{{- define "audit.serviceaccount" -}} +{{- end }} + +--- +{{ template "common.role" (list . "audit.role") -}} +{{- define "audit.role" -}} +{{- end }} + +--- +{{ template "common.rolebinding" (list . "audit.rolebinding") -}} +{{- define "audit.rolebinding" -}} +{{- end }} + +--- +{{ template "common.podsecuritypolicy" (list . "audit.podsecuritypolicy") -}} +{{- define "audit.podsecuritypolicy" -}} +{{- end }} diff --git a/qliksense/charts/audit/values.yaml b/qliksense/charts/audit/values.yaml new file mode 100644 index 0000000..c758cc6 --- /dev/null +++ b/qliksense/charts/audit/values.yaml @@ -0,0 +1,188 @@ +## Default values for Audit Service Helm Chart. + +image: + ## Default registry where this repository should be pulled from. + ## Will be overridden by `global.imageRegistry` if set + registry: ghcr.io + ## Audit image name. + repository: qlik-download/audit + ## Audit image version. + ## ref: https://hub.docker.com/r/qlik/audit/tags/ + tag: 1.16.2 + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', + ## else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: + pullPolicy: Always + ## Secrets for pulling images from a private docker registry. + ## + pullSecrets: + - name: artifactory-docker-secret + +configs: + ## Hard-coded standard inline Defaults (only for dev and internal Qlik purposes) + # + ## Number of seconds to wait during pod termination after sending SIGTERM until SIGKILL. + # terminationGracePerionSeconds: 30 + ## True to use NATS and NATS Streaming. + # natsEnabled: true + ## Toggle JWT validation using retrieved keys from the configured JWKS endpoint + # authEnabled: true + ## Expected `audience` value within the JWT claims + # authJwdAud: "qlik.api.internal" + ## Expected `issuer` value within the JWT claims + # authJwtIss: "qlik.api.internal" + ## Service identity token authentication + # tokenAuthEnabled: true + ## Access Control / rules enforcement setup + # accessControlEnabled: true + ## Environment + # env: "localdev" + + ## Hard-coded service specific inline Defaults + # + ## Time-to-live for audits events in the database before archiving + ## Infinite if undefined, supported precisions: ms, s, m, h (90 days = 2160h) + # eventTTL: -1 + ## Toggle to enable archiving + # archiveEnabled: false + ## Repeat interval for validating and archiving the data + # archiveInterval: 3h + ## A timeout for the next attempt after the failed archive operation + # archiveRetryAfter: 30m + ## Toggle to use secured connection + # storageSsl: true + ## Bucket to be used for audits archive + # storageBucket: audit + ## Bucket region + # storageRegion: us-east-1 + ## Comma-separated list of system events channels + natsChannels: system-events.engine.app,system-events.user-session,system-events.spaces,system-events.licenses,system-events.generic-links,system-events.api-keys,system-events.web-security,system-events.user-identity,system-events.tenants + + data: + ## Log level (debug|info|warn|error) + logLevel: info + # Feature flags service URL + featureFlagsUri: "http://{{ .Release.Name }}-feature-flags:8080" + # Policy-decisions service URL + pdsUri: "http://{{ .Release.Name }}-policy-decisions:5080" + # Endpoint to retrieve the JWKS + keysUri: "http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal" + # Token auth endpoint + tokenAuthUri: "http://{{ .Release.Name }}-edge-auth:8080/v1/internal-tokens" + # Address of NATS server + natsUri: "nats://{{ .Release.Name }}-nats-client:4222" + # NATS Streaming cluster ID + natsStreamingClusterId: "{{ .Release.Name }}-nats-streaming-cluster" + # Endpoint to S3 storage provider + storageEndpoint: "{{ .Release.Name }}-minio:9000" + # Storage bucket name + storageBucket: "audit" + # Storage region + storageRegion: "us-east-1" + # Ingress auth URL + ingressAuthUrl: "http://{{ .Release.Name }}-edge-auth.{{ .Release.Namespace }}.svc.cluster.local:8080/v1/auth" + +secrets: + stringData: + ## Specify a custom mongo uri. Not needed when the local mongo is enabled. + ## Secret: {{ Release.Name }}-mongoconfig.mongo-uri + mongodbUri: "mongodb://{{ .Release.Name }}-mongodb:27017" + ## Service identity token authentication keys + tokenAuthPrivateKeyId: V5uEI2x2sYjIq0Ezz7NlqoExS1Y4dvwhdt3iakflxGY + tokenAuthPrivateKey: | + -----BEGIN EC PRIVATE KEY----- + MIGkAgEBBDAIjTfnfDoa9CeEIrlXJpZwc+Le1Hbh7LwooQh9YbpdBw5nIQORGAI0 + EsGEiUPDiMqgBwYFK4EEACKhZANiAATs3JLJEL1y7pUQ6u1eOhJ3M5BVRPZRQ4du + ouRsRM/wkAb+OlMAsyFZCuEgtDF14G/2RY4gepUkARBFCgEO0E3WarSEHQMS26pN + fkRHHEUAaOvT6vP4+BqgQmh+0MbStKI= + -----END EC PRIVATE KEY----- + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 6080 + ## Metrics configuration + ## Prometheus configuration + ## The annotations for prometheus scraping are included + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.port }}" + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## class provides an kubernetes.io/ingress.class override of default nginx + class: "nginx" + ## Annotations to be added to the ingress. + ## + annotations: + nginx.ingress.kubernetes.io/auth-response-headers: Authorization + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite (?i)/api/v1/(.*) /v1/$1 break; + +# Currently templates a "main" container +deployment: + ## Number of replicas. + ## + replicas: 1 + container: + ## deployment resources + resources: + limits: + cpu: null + memory: null + requests: + cpu: null + memory: null + +## Subcharts +## MongoDB configuration +mongodb: + ## Enables a local mongo chart + enabled: false + image: + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## This value overrides the mongo image tag in chart v.4.5.0 + # (tag: 4.0.3-debian-9) + tag: 3.6.12 + + ## disable password for local dev mode + usePassword: false + +## Messaging chart configuration +messaging: + ## Set messaging.enabled to true for localdev and CI builds + enabled: false + nats: + enabled: true + replicaCount: 1 + auth: + enabled: false + clusterAuth: + enabled: false + nats-streaming: + enabled: true + replicaCount: 3 + auth: + enabled: false + +## MinIO chart configuration +minio: + ## Set minio.enabled to true for localdev + enabled: false + service: + port: 9000 + defaultBucket: + enabled: true + name: audit + +# Included if certs are required to be mounted into the pod +certs: + mountPath: "/etc/ssl/certs" diff --git a/qliksense/charts/chronos-worker/.helmignore b/qliksense/charts/chronos-worker/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/chronos-worker/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/chronos-worker/Chart.yaml b/qliksense/charts/chronos-worker/Chart.yaml new file mode 100644 index 0000000..412a8cc --- /dev/null +++ b/qliksense/charts/chronos-worker/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +appVersion: 0.8.0 +description: | + The scheduler-worker for Qlik Elastic +home: https://www.qlik.com +name: chronos-worker +sources: +- https://github.com/qlik-trial/chronos-worker +version: 1.4.10 diff --git a/qliksense/charts/chronos-worker/README.md b/qliksense/charts/chronos-worker/README.md new file mode 100644 index 0000000..c409f60 --- /dev/null +++ b/qliksense/charts/chronos-worker/README.md @@ -0,0 +1,86 @@ +# chronos-worker + +[chronos-worker](https://github.com/qlik-trial/chronos-worker) is the service +responsible for processing scheduled actions. + +## Introduction + +This chart bootstraps a `chronos-worker` deployment on a [Kubernetes](http://kubernetes.io) +cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/chronos-worker +``` + +The command deploys `chronos-worker` on the Kubernetes cluster in the default +configuration. +The [configuration](#configuration) section lists the parameters that can be +configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete --purge my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the `chronos-worker` chart and their default values. + + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil` | +| `image.registry` | The default registry where the repository is pulled from | `qliktech-docker.jfrog.io` | +| `imagePullSecrets` | Name of the secret containing the docker registry credentials | `[{name: "artifactory-docker-secret"}]` | +| `image.repository` | Docker image for the service with no registry | `chronos-worker` | +| `image.tag` | Tag of the docker image for the service | `0.8.0` | +| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` | +| `service.port` | Port on which the service is listening on | `8080` | +| `service.headerManagerEnabled` | Enable the header manager for HTTP requests | `true` | +| `service.denyExternalEgressTraffic` | Enable the network policy that denies all external outboud traffic | `true` | +| `replicaCount` | Number of replicas for the service | `1` | +| `probes.initialDelaySeconds` | Amount of time k8s waits before performing the first probe | `30` | +| `probes.timeoutSeconds` | Number of seconds after which the probe times out | `5` | +| `probes.failureThreshold` | Number of times k8s tries a probe before giving up | `10` | +| `metrics.prometheus.enabled` | Include annotations for prometheus scraping | `true` | +| `redis.enabled` | Enable Redis as chart's dependency | `true` | +| `redis.image.pullPolicy` | Docker image pull policy | `IfNotPresent` | +| `redis.rbac.create` | Whether RBAC resources should be created for Redis | `true` | +| `redis.uri` | Redis full URL (port included) | `{{.Release.Name}}-redis-master:6379` | +| `redis.usePassword` | Disable Redis password authentication | `true` | +| `redis.password` | Custom password for authentication (needs `usePassword` set to `true`) | `""` | +| `redis.cluster.enabled` | Use master-secondary topology | `false` | +| `redis.metrics.enabled` | Expose redis metrics to prometheus | `true` | +| `nats.servers` | NATS server full URI | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats.podLabel.key` | Pod label key required to talk to messaging chart | `{{ .Release.Name }}-nats-client` | +| `nats.podLabel.value` | Pod label value required to talk to messaging chart | `true` | +| `nats.tokenAuthEnabled` | Enable token authentication to NATS via service-to-service JWT | `true` | +| `stan.clusterID` | NATS Streaming cluster ID | `{{ .Release.Name }}-nats-streaming-cluster` | +| `jwt.authUrl` | `edge-auth` full url for the internal-tokens endpoint | `http://{{ .Release.Name }}-edge-auth:8080/v1/internal-tokens` | +| `jwt.privateKey` | Private key used to sign JWTs | see `values.yaml` | +| `jwt.kid` | Public key id in the `keys` service | `BKIz36TOxYe2wf6sB2f2pA5sb9GIDOZZXmWHDYG` | + +*NOTES*: +* if `image.tag` is set to `latest`, it is recommended to change `image.pullPolicy` to `Always` + +### Setting Parameters + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. +For example, + +```console +helm install --name my-release -f values.yaml qlik/chronos-worker +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/chronos-worker/charts/redis/.helmignore b/qliksense/charts/chronos-worker/charts/redis/.helmignore new file mode 100644 index 0000000..b2767ae --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/.helmignore @@ -0,0 +1,3 @@ +.git +# OWNERS file for Kubernetes +OWNERS diff --git a/qliksense/charts/chronos-worker/charts/redis/Chart.yaml b/qliksense/charts/chronos-worker/charts/redis/Chart.yaml new file mode 100644 index 0000000..0b1ce8a --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 5.0.7 +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: http://redis.io/ +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +version: 10.5.6 diff --git a/qliksense/charts/chronos-worker/charts/redis/README.md b/qliksense/charts/chronos-worker/charts/redis/README.md new file mode 100644 index 0000000..72eb836 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/README.md @@ -0,0 +1,497 @@ + +# Redis + +[Redis](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +## TL;DR; + +```bash +# Testing configuration +$ helm install my-release stable/redis +``` + +```bash +# Production configuration +$ helm install my-release stable/redis --values values-production.yaml +``` + +## Introduction + +This chart bootstraps a [Redis](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release stable/redis +``` + +The command deploys Redis on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the Redis chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | Redis password (overrides `password`) | `nil` | +| `image.registry` | Redis Image registry | `docker.io` | +| `image.repository` | Redis Image name | `bitnami/redis` | +| `image.tag` | Redis Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `1` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | Redis password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common Redis node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.runAsUser` | User ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.sysctls` | Set namespaced sysctls for the container (both redis master and slave pods) | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Redis exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | {} | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.podLabels` | Additional labels for Redis master pod | {} | +| `master.podAnnotations` | Additional annotations for Redis master pod | {} | +| `redisPort` | Redis port (in both master and slaves) | `6379` | +| `master.command` | Redis master entrypoint string. The command `redis-server` is executed if this is not provided. | `/run.sh` | +| `master.configmap` | Additional Redis configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of Redis commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | Redis master additional command line flags | [] | +| `master.nodeSelector` | Redis master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for Redis master pod assignment | [] | +| `master.affinity` | Affinity settings for Redis master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.priorityClassName` | Redis Master pod priorityClassName | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.command` | Redis slave entrypoint array. The docker image's ENTRYPOINT is used if this is not provided. | `/run.sh` | +| `slave.configmap` | Additional Redis configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of Redis commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | Redis slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `30` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `10` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.priorityClassName` | Redis Slave pod priorityClassName | {} | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | Redis Sentinel port | `26379` | +| `sentinel.configmap` | Additional Redis configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for Redis read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | Redis sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | Redis Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | Redis Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + stable/redis +``` + +The above command sets the Redis server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml stable/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis pod as it attempts to write to the `/bitnami` directory. Consider installing Redis with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of slaves: +```diff +- cluster.slaveCount: 2 ++ cluster.slaveCount: 3 +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a Redis master StatefulSet (only one master node allowed) and a Redis slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - Redis Master service: Points to the master, where read-write operations can be performed + - Redis Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis master StatefulSet (only one master allowed) and a Redis slave StatefulSet. In this case, the pods will contain en extra container with Redis Sentinel. This container will form a cluster of Redis Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - Redis service: Exposes port 6379 for Redis read-only operations and port 26379 for accesing Redis Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for Redis you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Host Kernel Settings +Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME stable/redis +``` + +## NetworkPolicy + +To enable network policy for Redis, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to Redis. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: +* Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +* Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + + - Recommended: Create a clone of the Redis Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release stable/redis --set persistence.existingClaim= + ``` + + - Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install stable/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + + - `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) + - `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis StatefulSet before upgrading: +```bash +$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` +And edit the Redis slave (and metrics if enabled) deployment: +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Notable changes + +### 9.0.0 +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### 7.0.0 +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis Sentinel containers inside of the Redis Pods (feature disabled by default). In case the master crashes, a new Redis node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/qliksense/charts/chronos-worker/charts/redis/ci/default-values.yaml b/qliksense/charts/chronos-worker/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/qliksense/charts/chronos-worker/charts/redis/ci/dev-values.yaml b/qliksense/charts/chronos-worker/charts/redis/ci/dev-values.yaml new file mode 100644 index 0000000..be01913 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/ci/dev-values.yaml @@ -0,0 +1,9 @@ +master: + persistence: + enabled: false + +cluster: + enabled: true + slaveCount: 1 + +usePassword: false diff --git a/qliksense/charts/chronos-worker/charts/redis/ci/extra-flags-values.yaml b/qliksense/charts/chronos-worker/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/qliksense/charts/chronos-worker/charts/redis/ci/insecure-sentinel-values.yaml b/qliksense/charts/chronos-worker/charts/redis/ci/insecure-sentinel-values.yaml new file mode 100644 index 0000000..2e9174f --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/ci/insecure-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: false + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/chronos-worker/charts/redis/ci/production-sentinel-values.yaml b/qliksense/charts/chronos-worker/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..36a00e3 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/chronos-worker/charts/redis/ci/production-values.yaml b/qliksense/charts/chronos-worker/charts/redis/ci/production-values.yaml new file mode 100644 index 0000000..6fa9c88 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/ci/production-values.yaml @@ -0,0 +1,525 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/chronos-worker/charts/redis/ci/redis-lib-values.yaml b/qliksense/charts/chronos-worker/charts/redis/ci/redis-lib-values.yaml new file mode 100644 index 0000000..e03382b --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/ci/redis-lib-values.yaml @@ -0,0 +1,13 @@ +## Redis library image +## ref: https://hub.docker.com/r/library/redis/ +## +image: + registry: docker.io + repository: redis + tag: '5.0.5' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/chronos-worker/charts/redis/ci/redisgraph-module-values.yaml b/qliksense/charts/chronos-worker/charts/redis/ci/redisgraph-module-values.yaml new file mode 100644 index 0000000..8096020 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/ci/redisgraph-module-values.yaml @@ -0,0 +1,10 @@ +image: + registry: docker.io + repository: redislabs/redisgraph + tag: '1.0.0' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/NOTES.txt b/qliksense/charts/chronos-worker/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..5b1089e --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/NOTES.txt @@ -0,0 +1,104 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis server: + +1. Run a Redis pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash + +2. Connect using the Redis CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/_helpers.tpl b/qliksense/charts/chronos-worker/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..3397a7b --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/_helpers.tpl @@ -0,0 +1,355 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/configmap.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..d17ec26 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{ tpl .Values.configmap . | indent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{ tpl .Values.master.configmap . | indent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{ tpl .Values.slave.configmap . | indent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{ tpl .Values.sentinel.configmap . | indent 4 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/headless-svc.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..909cbce --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis +{{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel +{{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/health-configmap.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..35c61b5 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,134 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_SENTINEL_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/metrics-prometheus.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..3f33454 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,30 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/metrics-svc.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..74f6fa8 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,30 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.service.labels -}} + {{ toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" -}} {{ if .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/networkpolicy.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..da05552 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,73 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/prometheusrule.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..500c3b3 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/psp.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..28ae22a --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/psp.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/redis-master-statefulset.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..b61c539 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,419 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master +{{- if .Values.master.podLabels }} +{{ toYaml .Values.master.podLabels | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} +{{ toYaml .Values.master.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" + {{- end }} + {{- with .Values.master.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: "{{ .Values.master.schedulerName }}" + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: "{{ template "redis.image" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.command }} + {{ .Values.master.command }} ${ARGS[@]} + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.master.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + echo "Getting information about current running sentinels" + # Get information from existing sentinels + existing_sentinels=$(timeout -s 9 {{ .Values.sentinel.initialCheckTimeout }} redis-cli --raw -h {{ template "redis.fullname" . }} -a "$REDIS_PASSWORD" -p {{ .Values.sentinel.service.sentinelPort }} SENTINEL sentinels {{ .Values.sentinel.masterSet }}) + echo "$existing_sentinels" | awk -f /health/parse_sentinels.awk | tee -a /opt/bitnami/redis-sentinel/etc/sentinel.conf + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc/ + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel-tmp-conf + emptyDir: {} + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.master.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.master.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/redis-master-svc.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..3a98e66 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,39 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{ toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{ toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/redis-role.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..71f75ef --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/redis-role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{ toYaml .Values.rbac.role.rules | indent 2 }} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/redis-rolebinding.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..aceb258 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/redis-serviceaccount.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..f027176 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/redis-slave-statefulset.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..d5a8db5 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,437 @@ +{{- if .Values.cluster.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: +{{ toYaml .Values.slave.updateStrategy | indent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} +{{ toYaml .Values.slave.podLabels | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} +{{ toYaml .Values.slave.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: "{{ .Values.slave.schedulerName }}" + {{- end }} + {{- with .Values.slave.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.command }} + {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.slave.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.slave.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.slave.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/redis-slave-svc.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..052ecea --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{ toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: +{{ toYaml .Values.slave.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/redis-with-sentinel-svc.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..5017c22 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,40 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{ toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: +{{ toYaml .Values.sentinel.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/chronos-worker/charts/redis/templates/secret.yaml b/qliksense/charts/chronos-worker/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..ead9c61 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/templates/secret.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/qliksense/charts/chronos-worker/charts/redis/values-production.yaml b/qliksense/charts/chronos-worker/charts/redis/values-production.yaml new file mode 100644 index 0000000..cae2af1 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/values-production.yaml @@ -0,0 +1,630 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/chronos-worker/charts/redis/values.schema.json b/qliksense/charts/chronos-worker/charts/redis/values.schema.json new file mode 100644 index 0000000..2138e45 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "condition": false, + "value": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "master.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "slave.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/qliksense/charts/chronos-worker/charts/redis/values.yaml b/qliksense/charts/chronos-worker/charts/redis/values.yaml new file mode 100644 index 0000000..2649466 --- /dev/null +++ b/qliksense/charts/chronos-worker/charts/redis/values.yaml @@ -0,0 +1,631 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/chronos-worker/requirements.yaml b/qliksense/charts/chronos-worker/requirements.yaml new file mode 100644 index 0000000..21a2d9c --- /dev/null +++ b/qliksense/charts/chronos-worker/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: redis + version: 10.5.6 + repository: "@stable" + condition: redis.enabled diff --git a/qliksense/charts/chronos-worker/templates/NOTES.txt b/qliksense/charts/chronos-worker/templates/NOTES.txt new file mode 100644 index 0000000..e69de29 diff --git a/qliksense/charts/chronos-worker/templates/_helpers.tpl b/qliksense/charts/chronos-worker/templates/_helpers.tpl new file mode 100644 index 0000000..2c808f3 --- /dev/null +++ b/qliksense/charts/chronos-worker/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "chronos-worker.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chronos-worker.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chronos-worker.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* Return chronos-worker image name */}} +{{- define "chronos-worker.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} + diff --git a/qliksense/charts/chronos-worker/templates/deny-external-egress-traffic.yaml b/qliksense/charts/chronos-worker/templates/deny-external-egress-traffic.yaml new file mode 100644 index 0000000..4281e82 --- /dev/null +++ b/qliksense/charts/chronos-worker/templates/deny-external-egress-traffic.yaml @@ -0,0 +1,29 @@ +{{- if .Values.service.denyExternalEgressTraffic }} + +# This network policy prevents chronos-worker from establishing connections to the +# external networks (= anything outside the cluster) +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "chronos-worker.fullname" . }}-deny-external-egress + labels: + app: {{ template "chronos-worker.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "chronos-worker.name" . }} + policyTypes: + - Egress + egress: + - ports: + - port: 53 # DNS resolution + protocol: UDP + - port: 53 # DNS resolution + protocol: TCP + - to: + - namespaceSelector: {} # Allow outbount traffic to pods inside the cluster + +{{- end }} diff --git a/qliksense/charts/chronos-worker/templates/deployment.yaml b/qliksense/charts/chronos-worker/templates/deployment.yaml new file mode 100644 index 0000000..2a09b56 --- /dev/null +++ b/qliksense/charts/chronos-worker/templates/deployment.yaml @@ -0,0 +1,68 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "chronos-worker.fullname" . }} + labels: + app: {{ template "chronos-worker.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "chronos-worker.name" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "chronos-worker.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{ tpl ( .Values.nats.podLabel.key ) . }}: {{ .Values.nats.podLabel.value | quote }} + spec: +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + containers: + - image: {{ template "chronos-worker.image" . }} + name: {{ template "chronos-worker.name" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - -natsURI={{ tpl .Values.nats.servers . }} + - -stanClusterID={{ tpl .Values.stan.clusterID . }} + - -jwtTokenAuthEnabled={{ .Values.nats.tokenAuthEnabled }} + - -jwtTokenAuthURL={{ tpl .Values.jwt.authUrl . }} + - -jwtKid={{ .Values.jwt.kid }} + - -headerManagerEnabled={{ .Values.service.headerManagerEnabled }} +{{- if .Values.service.resources }} + resources: +{{ toYaml .Values.service.resources | indent 10 }} +{{- end }} + ports: + - containerPort: {{ .Values.service.port }} + env: + - name: REDIS_URI + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-redis-secret" .Release.Name) (tpl (default "" .Values.redis.existingSecret) .) }} + key: {{ default "redis-addr" .Values.redis.addressKey }} + - name: REDIS_PWD + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-redis-secret" .Release.Name) (tpl (default "" .Values.redis.existingSecret) .) }} + key: redis-password + - name: JWT_PRIVATE_KEY + value: {{ .Values.jwt.privateKey | quote }} + readinessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} +{{ toYaml .Values.probes | indent 10 }} + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} +{{ toYaml .Values.probes | indent 10 }} diff --git a/qliksense/charts/chronos-worker/templates/redis-secret.yaml b/qliksense/charts/chronos-worker/templates/redis-secret.yaml new file mode 100644 index 0000000..674b146 --- /dev/null +++ b/qliksense/charts/chronos-worker/templates/redis-secret.yaml @@ -0,0 +1,21 @@ +{{- if or .Values.redis.uri .Values.redis.enabled }} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-redis-secret +type: Opaque +data: +{{- if .Values.redis.uri}} + redis-addr: {{ .Values.redis.uri | b64enc }} +{{- else if .Values.redis.enabled}} + redis-addr: {{ print .Release.Name "-redis-master:6379" | b64enc }} +{{- end}} + +{{ if not .Values.redis.usePassword }} # usePassword=false + redis-password: {{ print "" | b64enc }} +{{- else if .Values.redis.password }} # usePassword=true AND password is set + redis-password: {{ print .Values.redis.password | b64enc }} +{{- end }} + +{{- end }} diff --git a/qliksense/charts/chronos-worker/templates/svc.yaml b/qliksense/charts/chronos-worker/templates/svc.yaml new file mode 100644 index 0000000..4e4c04d --- /dev/null +++ b/qliksense/charts/chronos-worker/templates/svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "chronos-worker.fullname" . }} + labels: + app: {{ template "chronos-worker.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + ports: + - name: {{ template "chronos-worker.name" . }} + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + selector: + app: {{ template "chronos-worker.name" . }} diff --git a/qliksense/charts/chronos-worker/values.yaml b/qliksense/charts/chronos-worker/values.yaml new file mode 100644 index 0000000..1aac69c --- /dev/null +++ b/qliksense/charts/chronos-worker/values.yaml @@ -0,0 +1,150 @@ +# Default values for chronos. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Docker image information +# +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + # Name of the docker image + repository: qlik-download/chronos-worker + # Tag of the docker image + tag: 0.8.0 + # Pull policy for the image + pullPolicy: IfNotPresent + +## Secret for pulling images from private docker registry +# +imagePullSecrets: + - name: artifactory-docker-secret + +## Chronos-Worker configuration +# +service: + # Port the service is listening on + port: 8080 + + ## Resource management + # resources: + + # Enable the header manager/manipulator + headerManagerEnabled: true + + # Enable the network policy that denies all external outboud traffic + denyExternalEgressTraffic: true + +## Number of replicas +# +replicaCount: 1 + +## Configure k8s probes +# +probes: + # Number of seconds to wait before performing the first probe + initialDelaySeconds: 30 + # Number of seconds after which the probe times out + timeoutSeconds: 5 + # Number of times k8s tries a probe before giving up + failureThreshold: 10 + +## Metrics configuration +# +metrics: + # Prometheus configurations + prometheus: + # Include annotations for prometheus scraping + enabled: true + +## Redis configuration +## See the URL below to configure the Redis chart at will +## https://github.com/kubernetes/charts/tree/master/stable/redis#configuration +# +redis: + # Enable Redis as chart's dependency + enabled: true + # Redis docker image pull policy + image: + pullPolicy: IfNotPresent + + # Specifies whether RBAC resources should be created. + # https://github.com/kubernetes/charts/blob/060135390785ad6af618d498c982a13573d1f2d9/stable/redis/values.yaml#L74-L89 + rbac: + create: true + # role: + # rules: [] + + ## Custom URL (port included) where Redis can be found + ## Not needed when .Values.redis.enabled is true + # uri: + + # Disable password authentication + usePassword: false + # Set custom password + # password: + + # Disable master-secondary topology by default. + # Need to override in production-like environments. + cluster: + enabled: false + # Expose Redis metrics via oliver006/redis_exporter. + # See https://github.com/oliver006/redis_exporter for more information. + metrics: + enabled: true + # Annotations for prometheus + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + + master: + securityContext: + enabled: false + + slave: + securityContext: + enabled: false + +## NATS configuration +# +nats: + # Full URI of the NATS server + servers: "nats://{{ .Release.Name }}-nats-client:4222" + # For localdev use this configuration instead + # servers: "nats://messaging-nats-client:4222" + + # Pod label required to allow communication with NATS + podLabel: + key: "{{ .Release.Name }}-nats-client" + value: true + # Enable token authentication (s2s JWT) + tokenAuthEnabled: true + +## NATS Streaming configuration +# +stan: + # NATS Streaming cluster ID + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + # For localdev use this configuration instead + # clusterID: "messaging-nats-streaming-cluster" + +## JWT configuration +# +jwt: + # The edge-auth full url for the internal-tokens endpoint + authUrl: "http://{{ .Release.Name }}-edge-auth:8080/v1/internal-tokens" + # For localdev use this configuration instead + # authUrl: "http://edge-auth:8080/v1/internal-tokens" + + # The private key used to sign JWTs + privateKey: | + -----BEGIN EC PRIVATE KEY----- + MIGkAgEBBDC3dOGDv3m5X/6y8dp1pQQBegCavKZCwUmI1l4p/HjaUEDZunScZ1mv + VceTcH2/g4ugBwYFK4EEACKhZANiAASMo/DYuKlVdNMZT0h1YxVdoetpmRMXPD6s + PJXjqXN1KL1SFYk9b/vQEZYJWuWrArTEegyNmNOPvaPUmLj3xzt5sUneGY0LlQ8y + f2UvDiYmlGcsJ35od1gkcOQ2wk3laNw= + -----END EC PRIVATE KEY----- + # The public key id in the keys service + kid: "BKIz36TOxYe2wf6sB2f2pA5sb9GIDOZZXmWHDYG-pwQ" diff --git a/qliksense/charts/chronos/.helmignore b/qliksense/charts/chronos/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/chronos/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/chronos/Chart.yaml b/qliksense/charts/chronos/Chart.yaml new file mode 100644 index 0000000..46e7346 --- /dev/null +++ b/qliksense/charts/chronos/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +appVersion: 0.14.0 +description: | + The scheduler for Qlik Elastic +home: https://www.qlik.com +name: chronos +sources: +- https://github.com/qlik-trial/chronos +version: 1.6.4 diff --git a/qliksense/charts/chronos/README.md b/qliksense/charts/chronos/README.md new file mode 100644 index 0000000..21de3dc --- /dev/null +++ b/qliksense/charts/chronos/README.md @@ -0,0 +1,86 @@ +# chronos + +[chronos](https://github.com/qlik-trial/chronos) is the service +responsible for scheduling arbitrary actions. + +## Introduction + +This chart bootstraps a `chronos` deployment on a [Kubernetes](http://kubernetes.io) +cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/chronos +``` + +The command deploys `chronos` on the Kubernetes cluster in the default +configuration. +The [configuration](#configuration) section lists the parameters that can be +configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete --purge my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the `chronos` chart and their default values. + + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil` | +| `imagePullSecrets` | Name of the secret containing the docker registry credentials | `[{name: "artifactory-docker-secret"}]` | +| `image.registry` | The default registry where the repository is pulled from | `qliktech-docker.jfrog.io` | +| `image.repository` | Docker image for the service | `chronos` | +| `image.tag` | Tag of the docker image for the service | `0.14.0` | +| `image.pullPolicy` | Docker image pull policy for K8s | `IfNotPresent` | +| `settings.port` | Port on which the service is listening on | `8585` | +| `settings.disableapi` | Disable APIs | `false` | +| `settings.resources.requests.cpu` | CPU reservation | `250m` | +| `settings.resources.requests.memory` | Memory reservation | `300Mi` | +| `settings.resources.limits.cpu` | CPU limit | `400m` | +| `settings.resources.limits.memory` | Memory limit | `600Mi` | +| `replicaCount` | Number of replicas for the service | `1` | +| `podAntiAffinityEnabled` | Enable hard pod antiaffinity rules when chronos has more than 1 replica | `false` | +| `mongodb.enabled` | enable Mongodb as a chart dependency | `true` | +| `mongodb.uri`| If the mongodb chart dependency isn't used, specify the URI path to mongo | `""` | +| `mongodb.uriSecretName` | name of secret to mount for mongo URI. The secret must have the `mongodb-uri` key | `""` | +| `redis.uri` | Redis full URL (port included) | `{{.Release.Name}}-redis-master:6379` | +| `redis.usePassword` | Disable password authentication | `true` | +| `redis.password` | Custom password for authentication (needs `usePassword` set to `true`) | `""` | +| `redis.cluster.enabled` | Use master-secondary topology | `false` | +| `redis.metrics.enabled` | Export redis metrics for prometheus | `true` | +| `probes.initialDelaySeconds` | Amount of time k8s waits before performing the first probe | `30` | +| `probes.timeoutSeconds` | Number of seconds after which the probe times out | `5` | +| `probes.failureThreshold` | Number of times k8s tries a probe before giving up | `10` | +| `metrics.prometheus.enabled` | Include annotations for prometheus scraping | `true` | +| `jwt.enabled` | Enable JWT validation | `true` | +| `jwt.jwksURI` | URI where the JWKS to validate JWTs is located | `http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal` | +| `jwt.jwtAud` | Expected "audience" value within the JWT claims | `qlik.api.internal` | +| `jwt.jwtIss` | Expected "issuer" value within the JWT claims | `qlik.api.internal` | + +*NOTES*: +* if `image.tag` is set to `latest`, it is recommended to change `image.pullPolicy` to `Always` + +### Setting Parameters + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. +For example, + +```console +helm install --name my-release -f values.yaml qlik/chronos +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/chronos/charts/mongodb/.helmignore b/qliksense/charts/chronos/charts/mongodb/.helmignore new file mode 100644 index 0000000..6b8710a --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/.helmignore @@ -0,0 +1 @@ +.git diff --git a/qliksense/charts/chronos/charts/mongodb/Chart.yaml b/qliksense/charts/chronos/charts/mongodb/Chart.yaml new file mode 100644 index 0000000..cc8038a --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 4.0.3 +description: NoSQL document-oriented database that stores JSON-like documents with + dynamic schemas, simplifying the integration of data in content-driven applications. +home: https://mongodb.org +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb +sources: +- https://github.com/bitnami/bitnami-docker-mongodb +version: 4.5.0 diff --git a/qliksense/charts/chronos/charts/mongodb/OWNERS b/qliksense/charts/chronos/charts/mongodb/OWNERS new file mode 100644 index 0000000..2c3e9fa --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/OWNERS @@ -0,0 +1,12 @@ +approvers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 +reviewers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 diff --git a/qliksense/charts/chronos/charts/mongodb/README.md b/qliksense/charts/chronos/charts/mongodb/README.md new file mode 100644 index 0000000..1b9d003 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/README.md @@ -0,0 +1,158 @@ +# MongoDB + +[MongoDB](https://www.mongodb.com/) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +## TL;DR; + +```bash +$ helm install stable/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/mongodb +``` + +The command deploys MongoDB on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the MongoDB chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------|----------------------------------------------------------------------------------------------|----------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | MongoDB image registry | `docker.io` | +| `image.repository` | MongoDB Image name | `bitnami/mongodb` | +| `image.tag` | MongoDB Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `usePassword` | Enable password authentication | `true` | +| `existingSecret` | Existing secret with MongoDB credentials | `nil` | +| `mongodbRootPassword` | MongoDB admin password | `random alhpanumeric string (10)` | +| `mongodbUsername` | MongoDB custom user | `nil` | +| `mongodbPassword` | MongoDB custom user password | `random alhpanumeric string (10)` | +| `mongodbDatabase` | Database to create | `nil` | +| `mongodbEnableIPv6` | Switch to enable/disable IPv6 on MongoDB | `true` | +| `mongodbExtraFlags` | MongoDB additional command line flags | [] | +| `service.annotations` | Kubernetes service annotations | `{}` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.nodePort` | Port to bind to for NodePort service type | `nil` | +| `port` | MongoDB service port | `27017` | +| `replicaSet.enabled` | Switch to enable/disable replica set configuration | `false` | +| `replicaSet.name` | Name of the replica set | `rs0` | +| `replicaSet.useHostnames` | Enable DNS hostnames in the replica set config | `true` | +| `replicaSet.key` | Key used for authentication in the replica set | `nil` | +| `replicaSet.replicas.secondary` | Number of secondary nodes in the replica set | `1` | +| `replicaSet.replicas.arbiter` | Number of arbiter nodes in the replica set | `1` | +| `replicaSet.pdb.minAvailable.primary` | PDB for the MongoDB Primary nodes | `1` | +| `replicaSet.pdb.minAvailable.secondary` | PDB for the MongoDB Secondary nodes | `1` | +| `replicaSet.pdb.minAvailable.arbiter` | PDB for the MongoDB Arbiter nodes | `1` | +| `podAnnotations` | Annotations to be added to pods | {} | +| `resources` | Pod resources | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | {} | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.annotations` | Persistent Volume annotations | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (avoids creating one if this is given) | `nil` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `configmap` | MongoDB configuration file to be used | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set mongodbRootPassword=secretpassword,mongodbUsername=my-user,mongodbPassword=my-password,mongodbDatabase=my-database \ + stable/mongodb +``` + +The above command sets the MongoDB `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/mongodb +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Replication + +You can start the MongoDB chart in replica set mode with the following command: + +```bash +$ helm install --name my-release stable/mongodb --set replication.enabled=true +``` +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available MongoDB deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/mongodb/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/mongodb +``` + +To horizontally scale this chart, run the following command to scale the number of secondary nodes in your MongoDB replica set. + +```console +$ kubectl scale statefulset my-release-mongodb-secondary --replicas=3 +``` + +Some characteristics of this chart are: + +* Each of the participants in the replication has a fixed stateful set so you always know where to find the primary, secondary or arbiter nodes. +* The number of secondary and arbiter nodes can be scaled out independently. +* Easy to move an application from using a standalone MongoDB server to use a replica set. + +## Initialize a fresh instance + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +The allowed extensions are `.sh`, and `.js`. + +## Persistence + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. diff --git a/qliksense/charts/chronos/charts/mongodb/files/docker-entrypoint-initdb.d/README.md b/qliksense/charts/chronos/charts/mongodb/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..a929990 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom .sh, or .js file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-mongodb](https://github.com/bitnami/bitnami-docker-mongodb#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/qliksense/charts/chronos/charts/mongodb/templates/NOTES.txt b/qliksense/charts/chronos/charts/mongodb/templates/NOTES.txt new file mode 100644 index 0000000..af81001 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/NOTES.txt @@ -0,0 +1,66 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.mongodbRootPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer" and not specifying "mongodbRootPassword" + you have most likely exposed the MongoDB service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "mongodbRootPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB can be accessed via port 27017 on the following DNS name from within your cluster: + + {{ template "mongodb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.usePassword -}} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- end }} +{{- if and .Values.mongodbUsername .Values.mongodbDatabase }} +{{- if .Values.mongodbPassword }} + +To get the password for "{{ .Values.mongodbUsername }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} +{{- end }} + +To connect to your database run the following command: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --image bitnami/mongodb --command -- mongo admin --host {{ template "mongodb.fullname" . }} {{- if .Values.usePassword }} -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.fullname" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "mongodb.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + mongo --host $SERVICE_IP --port {{ .Values.service.nodePort }} {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "mongodb.fullname" . }} 27017:27017 & + mongo --host 127.0.0.1 {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} diff --git a/qliksense/charts/chronos/charts/mongodb/templates/_helpers.tpl b/qliksense/charts/chronos/charts/mongodb/templates/_helpers.tpl new file mode 100644 index 0000000..855dc29 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "mongodb.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name for the admin secret. +*/}} +{{- define "mongodb.adminSecret" -}} + {{- if .Values.auth.existingAdminSecret -}} + {{- .Values.auth.existingAdminSecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-admin + {{- end -}} +{{- end -}} + +{{/* +Create the name for the key secret. +*/}} +{{- define "mongodb.keySecret" -}} + {{- if .Values.auth.existingKeySecret -}} + {{- .Values.auth.existingKeySecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-keyfile + {{- end -}} +{{- end -}} + +{{/* +Return the proper MongoDB image name +*/}} +{{- define "mongodb.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/chronos/charts/mongodb/templates/configmap.yaml b/qliksense/charts/chronos/charts/mongodb/templates/configmap.yaml new file mode 100644 index 0000000..66dc853 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +data: + mongodb.conf: |- +{{ toYaml .Values.configmap | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/chronos/charts/mongodb/templates/deployment-standalone.yaml b/qliksense/charts/chronos/charts/mongodb/templates/deployment-standalone.yaml new file mode 100644 index 0000000..d8ff01b --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/deployment-standalone.yaml @@ -0,0 +1,143 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + chart: {{ template "mongodb.chart" . }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.fullname" . }} + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + {{- if .Values.usePassword }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + {{- end }} + - name: MONGODB_USERNAME + value: {{ default "" .Values.mongodbUsername | quote }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_DATABASE + value: {{ default "" .Values.mongodbDatabase | quote }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + ports: + - name: mongodb + containerPort: 27017 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + + {{- else }} + emptyDir: {} + {{- end -}} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- end -}} diff --git a/qliksense/charts/chronos/charts/mongodb/templates/headless-svc-rs.yaml b/qliksense/charts/chronos/charts/mongodb/templates/headless-svc-rs.yaml new file mode 100644 index 0000000..29fcf34 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/headless-svc-rs.yaml @@ -0,0 +1,24 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }}-headless + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: {{ .Values.service.port }} + selector: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/chronos/charts/mongodb/templates/initialization-configmap.yaml b/qliksense/charts/chronos/charts/mongodb/templates/initialization-configmap.yaml new file mode 100644 index 0000000..840e77c --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/initialization-configmap.yaml @@ -0,0 +1,13 @@ +{{ if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mongodb.fullname" . }}-init-scripts + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]").AsConfig | indent 2 }} +{{ end }} \ No newline at end of file diff --git a/qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml b/qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml new file mode 100644 index 0000000..eb7f14a --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: arbiter + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.arbiter }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml b/qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml new file mode 100644 index 0000000..6434e3f --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: primary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.primary }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml b/qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml new file mode 100644 index 0000000..03f317d --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: secondary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.secondary }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/mongodb/templates/pvc-standalone.yaml b/qliksense/charts/chronos/charts/mongodb/templates/pvc-standalone.yaml new file mode 100644 index 0000000..8182ce7 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/pvc-standalone.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.replicaSet.enabled) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/chronos/charts/mongodb/templates/secrets.yaml b/qliksense/charts/chronos/charts/mongodb/templates/secrets.yaml new file mode 100644 index 0000000..ecbf1eb --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/secrets.yaml @@ -0,0 +1,34 @@ +{{ if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{- if .Values.usePassword }} + {{- if .Values.mongodbRootPassword }} + mongodb-root-password: {{ .Values.mongodbRootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + {{- if .Values.mongodbPassword }} + mongodb-password: {{ .Values.mongodbPassword | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if .Values.replicaSet.enabled }} + {{- if .Values.replicaSet.key }} + mongodb-replica-set-key: {{ .Values.replicaSet.key | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/mongodb/templates/statefulset-arbiter-rs.yaml b/qliksense/charts/chronos/charts/mongodb/templates/statefulset-arbiter-rs.yaml new file mode 100644 index 0000000..4ed30a1 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/statefulset-arbiter-rs.yaml @@ -0,0 +1,121 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: arbiter + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.arbiter }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: arbiter + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-arbiter + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/mongodb/templates/statefulset-primary-rs.yaml b/qliksense/charts/chronos/charts/mongodb/templates/statefulset-primary-rs.yaml new file mode 100644 index 0000000..8dcb004 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/statefulset-primary-rs.yaml @@ -0,0 +1,174 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: 1 + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-primary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "primary" + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + - name: MONGODB_USERNAME + value: {{ .Values.mongodbUsername | quote }} + - name: MONGODB_DATABASE + value: {{ .Values.mongodbDatabase | quote }} + {{- if .Values.usePassword }} + {{- if .Values.mongodbPassword }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/mongodb/templates/statefulset-secondary-rs.yaml b/qliksense/charts/chronos/charts/mongodb/templates/statefulset-secondary-rs.yaml new file mode 100644 index 0000000..d4c4a97 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/statefulset-secondary-rs.yaml @@ -0,0 +1,157 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: secondary + podManagementPolicy: "Parallel" + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.secondary }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: secondary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-secondary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "secondary" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/mongodb/templates/svc-primary-rs.yaml b/qliksense/charts/chronos/charts/mongodb/templates/svc-primary-rs.yaml new file mode 100644 index 0000000..fd440c8 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/svc-primary-rs.yaml @@ -0,0 +1,28 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + component: primary +{{- end }} diff --git a/qliksense/charts/chronos/charts/mongodb/templates/svc-standalone.yaml b/qliksense/charts/chronos/charts/mongodb/templates/svc-standalone.yaml new file mode 100644 index 0000000..4ca9443 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/templates/svc-standalone.yaml @@ -0,0 +1,27 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" +{{- end }} diff --git a/qliksense/charts/chronos/charts/mongodb/values-production.yaml b/qliksense/charts/chronos/charts/mongodb/values-production.yaml new file mode 100644 index 0000000..9070f3b --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/values-production.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: true + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# replication: +# replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/chronos/charts/mongodb/values.yaml b/qliksense/charts/chronos/charts/mongodb/values.yaml new file mode 100644 index 0000000..4b090d4 --- /dev/null +++ b/qliksense/charts/chronos/charts/mongodb/values.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# #replication: +# # replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/chronos/charts/redis/.helmignore b/qliksense/charts/chronos/charts/redis/.helmignore new file mode 100644 index 0000000..b2767ae --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/.helmignore @@ -0,0 +1,3 @@ +.git +# OWNERS file for Kubernetes +OWNERS diff --git a/qliksense/charts/chronos/charts/redis/Chart.yaml b/qliksense/charts/chronos/charts/redis/Chart.yaml new file mode 100644 index 0000000..0b1ce8a --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 5.0.7 +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: http://redis.io/ +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +version: 10.5.6 diff --git a/qliksense/charts/chronos/charts/redis/README.md b/qliksense/charts/chronos/charts/redis/README.md new file mode 100644 index 0000000..72eb836 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/README.md @@ -0,0 +1,497 @@ + +# Redis + +[Redis](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +## TL;DR; + +```bash +# Testing configuration +$ helm install my-release stable/redis +``` + +```bash +# Production configuration +$ helm install my-release stable/redis --values values-production.yaml +``` + +## Introduction + +This chart bootstraps a [Redis](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release stable/redis +``` + +The command deploys Redis on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the Redis chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | Redis password (overrides `password`) | `nil` | +| `image.registry` | Redis Image registry | `docker.io` | +| `image.repository` | Redis Image name | `bitnami/redis` | +| `image.tag` | Redis Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `1` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | Redis password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common Redis node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.runAsUser` | User ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.sysctls` | Set namespaced sysctls for the container (both redis master and slave pods) | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Redis exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | {} | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.podLabels` | Additional labels for Redis master pod | {} | +| `master.podAnnotations` | Additional annotations for Redis master pod | {} | +| `redisPort` | Redis port (in both master and slaves) | `6379` | +| `master.command` | Redis master entrypoint string. The command `redis-server` is executed if this is not provided. | `/run.sh` | +| `master.configmap` | Additional Redis configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of Redis commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | Redis master additional command line flags | [] | +| `master.nodeSelector` | Redis master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for Redis master pod assignment | [] | +| `master.affinity` | Affinity settings for Redis master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.priorityClassName` | Redis Master pod priorityClassName | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.command` | Redis slave entrypoint array. The docker image's ENTRYPOINT is used if this is not provided. | `/run.sh` | +| `slave.configmap` | Additional Redis configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of Redis commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | Redis slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `30` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `10` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.priorityClassName` | Redis Slave pod priorityClassName | {} | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | Redis Sentinel port | `26379` | +| `sentinel.configmap` | Additional Redis configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for Redis read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | Redis sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | Redis Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | Redis Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + stable/redis +``` + +The above command sets the Redis server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml stable/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis pod as it attempts to write to the `/bitnami` directory. Consider installing Redis with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of slaves: +```diff +- cluster.slaveCount: 2 ++ cluster.slaveCount: 3 +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a Redis master StatefulSet (only one master node allowed) and a Redis slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - Redis Master service: Points to the master, where read-write operations can be performed + - Redis Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis master StatefulSet (only one master allowed) and a Redis slave StatefulSet. In this case, the pods will contain en extra container with Redis Sentinel. This container will form a cluster of Redis Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - Redis service: Exposes port 6379 for Redis read-only operations and port 26379 for accesing Redis Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for Redis you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Host Kernel Settings +Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME stable/redis +``` + +## NetworkPolicy + +To enable network policy for Redis, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to Redis. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: +* Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +* Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + + - Recommended: Create a clone of the Redis Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release stable/redis --set persistence.existingClaim= + ``` + + - Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install stable/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + + - `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) + - `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis StatefulSet before upgrading: +```bash +$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` +And edit the Redis slave (and metrics if enabled) deployment: +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Notable changes + +### 9.0.0 +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### 7.0.0 +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis Sentinel containers inside of the Redis Pods (feature disabled by default). In case the master crashes, a new Redis node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/qliksense/charts/chronos/charts/redis/ci/default-values.yaml b/qliksense/charts/chronos/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/qliksense/charts/chronos/charts/redis/ci/dev-values.yaml b/qliksense/charts/chronos/charts/redis/ci/dev-values.yaml new file mode 100644 index 0000000..be01913 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/ci/dev-values.yaml @@ -0,0 +1,9 @@ +master: + persistence: + enabled: false + +cluster: + enabled: true + slaveCount: 1 + +usePassword: false diff --git a/qliksense/charts/chronos/charts/redis/ci/extra-flags-values.yaml b/qliksense/charts/chronos/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/qliksense/charts/chronos/charts/redis/ci/insecure-sentinel-values.yaml b/qliksense/charts/chronos/charts/redis/ci/insecure-sentinel-values.yaml new file mode 100644 index 0000000..2e9174f --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/ci/insecure-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: false + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/chronos/charts/redis/ci/production-sentinel-values.yaml b/qliksense/charts/chronos/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..36a00e3 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/chronos/charts/redis/ci/production-values.yaml b/qliksense/charts/chronos/charts/redis/ci/production-values.yaml new file mode 100644 index 0000000..6fa9c88 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/ci/production-values.yaml @@ -0,0 +1,525 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/chronos/charts/redis/ci/redis-lib-values.yaml b/qliksense/charts/chronos/charts/redis/ci/redis-lib-values.yaml new file mode 100644 index 0000000..e03382b --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/ci/redis-lib-values.yaml @@ -0,0 +1,13 @@ +## Redis library image +## ref: https://hub.docker.com/r/library/redis/ +## +image: + registry: docker.io + repository: redis + tag: '5.0.5' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/chronos/charts/redis/ci/redisgraph-module-values.yaml b/qliksense/charts/chronos/charts/redis/ci/redisgraph-module-values.yaml new file mode 100644 index 0000000..8096020 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/ci/redisgraph-module-values.yaml @@ -0,0 +1,10 @@ +image: + registry: docker.io + repository: redislabs/redisgraph + tag: '1.0.0' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/chronos/charts/redis/templates/NOTES.txt b/qliksense/charts/chronos/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..5b1089e --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/NOTES.txt @@ -0,0 +1,104 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis server: + +1. Run a Redis pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash + +2. Connect using the Redis CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} diff --git a/qliksense/charts/chronos/charts/redis/templates/_helpers.tpl b/qliksense/charts/chronos/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..3397a7b --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/_helpers.tpl @@ -0,0 +1,355 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/chronos/charts/redis/templates/configmap.yaml b/qliksense/charts/chronos/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..d17ec26 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{ tpl .Values.configmap . | indent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{ tpl .Values.master.configmap . | indent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{ tpl .Values.slave.configmap . | indent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{ tpl .Values.sentinel.configmap . | indent 4 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/redis/templates/headless-svc.yaml b/qliksense/charts/chronos/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..909cbce --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis +{{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel +{{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/chronos/charts/redis/templates/health-configmap.yaml b/qliksense/charts/chronos/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..35c61b5 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,134 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_SENTINEL_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/qliksense/charts/chronos/charts/redis/templates/metrics-prometheus.yaml b/qliksense/charts/chronos/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..3f33454 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,30 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/chronos/charts/redis/templates/metrics-svc.yaml b/qliksense/charts/chronos/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..74f6fa8 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,30 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.service.labels -}} + {{ toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" -}} {{ if .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/redis/templates/networkpolicy.yaml b/qliksense/charts/chronos/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..da05552 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,73 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/redis/templates/prometheusrule.yaml b/qliksense/charts/chronos/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..500c3b3 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/redis/templates/psp.yaml b/qliksense/charts/chronos/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..28ae22a --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/psp.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/qliksense/charts/chronos/charts/redis/templates/redis-master-statefulset.yaml b/qliksense/charts/chronos/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..b61c539 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,419 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master +{{- if .Values.master.podLabels }} +{{ toYaml .Values.master.podLabels | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} +{{ toYaml .Values.master.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" + {{- end }} + {{- with .Values.master.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: "{{ .Values.master.schedulerName }}" + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: "{{ template "redis.image" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.command }} + {{ .Values.master.command }} ${ARGS[@]} + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.master.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + echo "Getting information about current running sentinels" + # Get information from existing sentinels + existing_sentinels=$(timeout -s 9 {{ .Values.sentinel.initialCheckTimeout }} redis-cli --raw -h {{ template "redis.fullname" . }} -a "$REDIS_PASSWORD" -p {{ .Values.sentinel.service.sentinelPort }} SENTINEL sentinels {{ .Values.sentinel.masterSet }}) + echo "$existing_sentinels" | awk -f /health/parse_sentinels.awk | tee -a /opt/bitnami/redis-sentinel/etc/sentinel.conf + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc/ + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel-tmp-conf + emptyDir: {} + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.master.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.master.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/chronos/charts/redis/templates/redis-master-svc.yaml b/qliksense/charts/chronos/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..3a98e66 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,39 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{ toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{ toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/qliksense/charts/chronos/charts/redis/templates/redis-role.yaml b/qliksense/charts/chronos/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..71f75ef --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/redis-role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{ toYaml .Values.rbac.role.rules | indent 2 }} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/chronos/charts/redis/templates/redis-rolebinding.yaml b/qliksense/charts/chronos/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..aceb258 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/qliksense/charts/chronos/charts/redis/templates/redis-serviceaccount.yaml b/qliksense/charts/chronos/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..f027176 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/qliksense/charts/chronos/charts/redis/templates/redis-slave-statefulset.yaml b/qliksense/charts/chronos/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..d5a8db5 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,437 @@ +{{- if .Values.cluster.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: +{{ toYaml .Values.slave.updateStrategy | indent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} +{{ toYaml .Values.slave.podLabels | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} +{{ toYaml .Values.slave.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: "{{ .Values.slave.schedulerName }}" + {{- end }} + {{- with .Values.slave.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.command }} + {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.slave.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.slave.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.slave.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/redis/templates/redis-slave-svc.yaml b/qliksense/charts/chronos/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..052ecea --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{ toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: +{{ toYaml .Values.slave.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/qliksense/charts/chronos/charts/redis/templates/redis-with-sentinel-svc.yaml b/qliksense/charts/chronos/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..5017c22 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,40 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{ toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: +{{ toYaml .Values.sentinel.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/chronos/charts/redis/templates/secret.yaml b/qliksense/charts/chronos/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..ead9c61 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/templates/secret.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/qliksense/charts/chronos/charts/redis/values-production.yaml b/qliksense/charts/chronos/charts/redis/values-production.yaml new file mode 100644 index 0000000..cae2af1 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/values-production.yaml @@ -0,0 +1,630 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/chronos/charts/redis/values.schema.json b/qliksense/charts/chronos/charts/redis/values.schema.json new file mode 100644 index 0000000..2138e45 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "condition": false, + "value": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "master.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "slave.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/qliksense/charts/chronos/charts/redis/values.yaml b/qliksense/charts/chronos/charts/redis/values.yaml new file mode 100644 index 0000000..2649466 --- /dev/null +++ b/qliksense/charts/chronos/charts/redis/values.yaml @@ -0,0 +1,631 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/chronos/requirements.yaml b/qliksense/charts/chronos/requirements.yaml new file mode 100644 index 0000000..f910f89 --- /dev/null +++ b/qliksense/charts/chronos/requirements.yaml @@ -0,0 +1,9 @@ +dependencies: + - name: mongodb + version: 4.5.0 + repository: "@stable" + condition: mongodb.enabled + - name: redis + version: 10.5.6 + repository: "@stable" + condition: redis.enabled diff --git a/qliksense/charts/chronos/templates/NOTES.txt b/qliksense/charts/chronos/templates/NOTES.txt new file mode 100644 index 0000000..e69de29 diff --git a/qliksense/charts/chronos/templates/_helpers.tpl b/qliksense/charts/chronos/templates/_helpers.tpl new file mode 100644 index 0000000..05f08b8 --- /dev/null +++ b/qliksense/charts/chronos/templates/_helpers.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "chronos.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chronos.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chronos.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* Return chronos image name */}} +{{- define "chronos.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} + +{{/* Return leader election image name */}} +{{- define "leader-election.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.leaderelection.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.leaderelection.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.leaderelection.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} diff --git a/qliksense/charts/chronos/templates/deployment.yaml b/qliksense/charts/chronos/templates/deployment.yaml new file mode 100644 index 0000000..957d319 --- /dev/null +++ b/qliksense/charts/chronos/templates/deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "chronos.fullname" . }} + labels: + app: {{ template "chronos.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "chronos.name" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "chronos.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + spec: +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + serviceAccountName: {{ template "chronos.fullname" . }} +{{- if .Values.podAntiAffinityEnabled }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: # hard requirement + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - {{ template "chronos.name" . }} + topologyKey: kubernetes.io/hostname +{{- end }} + containers: + - image: {{ template "leader-election.image" . }} + name: "election" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - --election={{ template "chronos.fullname" . }} + - --election-namespace={{ .Release.Namespace }} + - --http=0.0.0.0:4040 + ports: + - containerPort: 4040 + livenessProbe: + httpGet: + path: /health + port: 4040 +{{ toYaml .Values.probes | indent 10 }} + - image: {{ template "chronos.image" . }} + name: {{ template "chronos.name" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - server + - --environment=k8s + - --disableapi={{ .Values.settings.disableapi }} + - --listenport={{ .Values.settings.port }} + - --svc={{ template "chronos.fullname" . }} + - --nojwtvalidation={{ not .Values.jwt.enabled }} + - --jwksuri={{ default (printf "http://%s-keys:8080/v1/keys/qlik.api.internal" .Release.Name ) .Values.jwt.jwksURI }} + - --jwtaud={{ .Values.jwt.jwtAud }} + - --jwtiss={{ .Values.jwt.jwtIss }} + - --loglevel={{ .Values.settings.loglevel }} +{{- if .Values.settings.resources }} + resources: +{{ toYaml .Values.settings.resources | indent 10 }} +{{- end }} + ports: + - containerPort: {{ .Values.settings.port }} + env: + - name: MONGO_URI + valueFrom: + secretKeyRef: +{{- if .Values.mongodb.uriSecretName }} + name: {{ .Values.mongodb.uriSecretName }} +{{ else }} + name: {{ .Release.Name }}-mongoconfig +{{- end }} + key: mongodb-uri + - name: REDIS_URI + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-redis-secret" .Release.Name) (tpl (default "" .Values.redis.existingSecret) .) }} + key: {{ default "redis-addr" .Values.redis.addressKey }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-redis-secret" .Release.Name) (tpl (default "" .Values.redis.existingSecret) .) }} + key: redis-password +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} + volumeMounts: +{{- include "qlik.ca-certificates.volumeMount" . | nindent 10 }} +{{- end }}{{- end }}{{- end }} + readinessProbe: + httpGet: + path: /ready + port: {{ .Values.settings.port }} +{{ toYaml .Values.probes | indent 10 }} + livenessProbe: + httpGet: + path: /health + port: {{ .Values.settings.port }} +{{ toYaml .Values.probes | indent 10 }} +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} + volumes: +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} diff --git a/qliksense/charts/chronos/templates/mongo-secret.yaml b/qliksense/charts/chronos/templates/mongo-secret.yaml new file mode 100644 index 0000000..ff3323e --- /dev/null +++ b/qliksense/charts/chronos/templates/mongo-secret.yaml @@ -0,0 +1,20 @@ +{{- if or .Values.mongodb.uri .Values.mongodb.enabled }} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-mongoconfig + labels: + app: {{ template "chronos.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: +{{- if .Values.mongodb.uri}} + mongodb-uri: {{ .Values.mongodb.uri | b64enc }} +{{- else if .Values.mongodb.enabled}} + mongodb-uri: {{ print "mongodb://" .Release.Name "-mongodb:27017/chronos" | b64enc }} +{{- end}} + +{{- end}} diff --git a/qliksense/charts/chronos/templates/rbac.yaml b/qliksense/charts/chronos/templates/rbac.yaml new file mode 100644 index 0000000..2c4dbdd --- /dev/null +++ b/qliksense/charts/chronos/templates/rbac.yaml @@ -0,0 +1,39 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: {{ template "chronos.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "chronos.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: + # Required for leader election + - apiGroups: + - "" + resources: + - endpoints + verbs: + - "get" + - "update" + +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: {{ template "chronos.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "chronos.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +subjects: +- kind: ServiceAccount + name: {{ template "chronos.fullname" . }} +roleRef: + kind: Role + name: {{ template "chronos.fullname" . }} + apiGroup: rbac.authorization.k8s.io diff --git a/qliksense/charts/chronos/templates/redis-secret.yaml b/qliksense/charts/chronos/templates/redis-secret.yaml new file mode 100644 index 0000000..2b9bca8 --- /dev/null +++ b/qliksense/charts/chronos/templates/redis-secret.yaml @@ -0,0 +1,20 @@ +{{- if or .Values.redis.uri .Values.redis.enabled }} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-redis-secret +type: Opaque +data: +{{- if .Values.redis.uri}} + redis-addr: {{ .Values.redis.uri | b64enc }} +{{- else if .Values.redis.enabled}} + redis-addr: {{ print .Release.Name "-redis-master:6379" | b64enc }} +{{- end}} +{{ if not .Values.redis.usePassword }} # usePassword=false + redis-password: {{ print "" | b64enc }} +{{- else if .Values.redis.password }} # usePassword=true AND password is set + redis-password: {{ print .Values.redis.password | b64enc }} +{{- end }} + +{{- end }} diff --git a/qliksense/charts/chronos/templates/sa.yaml b/qliksense/charts/chronos/templates/sa.yaml new file mode 100644 index 0000000..3f3857b --- /dev/null +++ b/qliksense/charts/chronos/templates/sa.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "chronos.fullname" . }} + labels: + app: {{ template "chronos.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} diff --git a/qliksense/charts/chronos/templates/svc.yaml b/qliksense/charts/chronos/templates/svc.yaml new file mode 100644 index 0000000..9fe6d9f --- /dev/null +++ b/qliksense/charts/chronos/templates/svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "chronos.fullname" . }} + labels: + app: {{ template "chronos.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.settings.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + ports: + - name: {{ template "chronos.name" . }} + port: {{ .Values.settings.port }} + targetPort: {{ .Values.settings.port }} + selector: + app: {{ template "chronos.name" . }} diff --git a/qliksense/charts/chronos/values.yaml b/qliksense/charts/chronos/values.yaml new file mode 100644 index 0000000..0f6ae0a --- /dev/null +++ b/qliksense/charts/chronos/values.yaml @@ -0,0 +1,161 @@ +# Default values for chronos. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Docker image information +# +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set in qsefe will override this value. + ## + registry: ghcr.io + # Name of the docker image + repository: qlik-download/chronos + # Tag of the docker image + tag: 0.14.0 + # Pull policy for the image + pullPolicy: IfNotPresent + +## Secret for pulling images from private docker registry +# +imagePullSecrets: + - name: artifactory-docker-secret + +## Chronos configuration +# +settings: + # Port the service is listening on + port: 8585 + # Disable API + disableapi: false + # Log Level + loglevel: "info" + + ## Resource management + # resources + +## Number of replicas +# +replicaCount: 1 + +## Enable pod antiaffinity rules when chronos has more than 1 replica +# +podAntiAffinityEnabled: false + +## Configure k8s probes +# +probes: + # Number of seconds to wait before performing the first probe + initialDelaySeconds: 30 + # Number of seconds after which the probe times out + timeoutSeconds: 5 + # Number of times k8s tries a probe before giving up + failureThreshold: 10 + +## Metrics configuration +# +metrics: + # Prometheus configurations + prometheus: + # Include annotations for prometheus scraping + enabled: true + +## MongoDB configuration +# +mongodb: + image: + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## This value overrides the mongo image tag in chart v.4.5.0 (tag: 4.0.3-debian-9) + tag: 3.6.6-debian-9 + + ## Enables a local mongo chart + enabled: true + usePassword: false + + ## Specify a custom mongo uri. Not needed when the local mongo is enabled. + ## Secret: {{ .Release.Name }}-mongoconfig.mongodb-uri + # uri: + + ## Name of secret to mount for mongo URI. The secret must have the `mongodb-uri` key + # uriSecretName: + +## Redis configuration +## See the URL below to configure the Redis chart at will +## https://github.com/kubernetes/charts/tree/master/stable/redis#configuration +# +redis: + # Enable Redis as chart's dependency + enabled: true + # Redis docker image pull policy + image: + pullPolicy: IfNotPresent + + # Specifies whether RBAC resources should be created. + # https://github.com/kubernetes/charts/blob/060135390785ad6af618d498c982a13573d1f2d9/stable/redis/values.yaml#L74-L89 + rbac: + create: true + # role: + # rules: [] + + serviceAccount: + # Specifies whether a ServiceAccount should be created + # If create is true, a name for the serviceAccount is generated using the fullname template + create: true + + ## Custom URL (port included) where Redis can be found + ## Not needed when .Values.redis.enabled is true + # uri: + + # Disable password authentication + usePassword: false + # Set custom password + # password: + + # Disable master-secondary topology by default. + # Need to override in production-like environments. + cluster: + enabled: false + # Expose Redis metrics via oliver006/redis_exporter. + # See https://github.com/oliver006/redis_exporter for more information. + metrics: + enabled: true + # Annotations for prometheus + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + + master: + securityContext: + enabled: false + slave: + securityContext: + enabled: false + +## Leader election sidecar container configuration. +## Please override these values only if you know what you're doing. +## ref: https://kubernetes.io/blog/2016/01/simple-leader-election-with-kubernetes/ +# +leaderelection: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set in qsefe will override this value. + ## + registry: ghcr.io + repository: qlik-download/leader-elector + tag: "1.8.0" + +## JWT validation configurations. +## +jwt: + # Enable JWT validation using keys retrieved from the configured JWKS endpoint + enabled: true + + ## URI where the JWKS to validate JWTs is located. + ## Overrides the default of http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal + # jwksURI: + + # Expected `audience` value within the JWT claims + jwtAud: qlik.api.internal + # Expected `issuer` value within the JWT claims + jwtIss: qlik.api.internal diff --git a/qliksense/charts/collections/.helmignore b/qliksense/charts/collections/.helmignore new file mode 100644 index 0000000..72f370d --- /dev/null +++ b/qliksense/charts/collections/.helmignore @@ -0,0 +1,2 @@ +tests +dependencies.yaml diff --git a/qliksense/charts/collections/Chart.yaml b/qliksense/charts/collections/Chart.yaml new file mode 100644 index 0000000..477478d --- /dev/null +++ b/qliksense/charts/collections/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +description: Service aggregating metadata for resource types such as apps, sharing + tasks, generic links etc +home: https://www.qlik.com +name: collections +sources: +- https://github.com/qlik-trial/collections +version: 2.8.55 diff --git a/qliksense/charts/collections/README.md b/qliksense/charts/collections/README.md new file mode 100644 index 0000000..bab2333 --- /dev/null +++ b/qliksense/charts/collections/README.md @@ -0,0 +1,79 @@ +# collections + +[collections](https://github.com/qlik-trial/collections) is the service responsible for the collections resource (and `/v1/collections`). + +## Introduction + +This chart bootstraps a collections deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/collections +``` + +The command deploys collections on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the collections chart and their default values. + +| Parameter | Description | Default | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------- | +| `global.imageRegistry` | Set this in a super-chart to override `image.registry` for this an all other charts | | +| `config.pdsURI` | URI to the policy-decision service | `http://{.Release.Name}-policy-decisions:5080` | +| `config.accessControl.enabled` | Toggle access control. (Rules enforcement) | `true` +| `config.accessControl.queryTimeout` | Timeout value in seconds when retrieving AST policy from PDS. | `30` +| `config.accessControl.evaluateTimeout` | Timeout value in seconds when evalute items access from PDS. | `30` | +| `config.auth.enabled` | Toggle JWT validation using retrieved keys from the configured JWKS endpoint. | `true` | +| `config.auth.jwksURI` | The endpoint to retrieve the JWKS | `http://{.Release.Name}-keys:8080/v1/keys/qlik.api.internal` | +| `config.auth.jwtAud` | The expected `audience` value within the JWT claims | `qlik.api.internal` | +| `config.auth.jwtIss` | The expected `issuer` value within the JWT claims | `qlik.api.internal` | +| `config.messaging.nats.addr` | NATS server address | `nats://{{ .Release.Name }}-nats-client:4222` | +| `config.messaging.nats.connectWaitSeconds` | The time to backoff after attempting to connect to a server. | `30` | +| `config.messaging.nats.tokenAuth.enabled` | Whether authenticating to NATS using a token is enabled | `true` | +| `config.messaging.nats.tokenAuth.privateKey` | The private key that corresponds to the JWKS in authentication service | See [values](./values.yaml) | +| `config.messaging.nats.tokenAuth.kid` | The key ID that corresponds to the JWKS in authentication service | `2gMxQ_Xn45K4P_UZK8QcQT72l1R9-zwQGnNTiDvx8VI` | +| `config.messaging.nats.tokenAuth.url` | URL to authentication service | `http://{{ .Release.Name }}-edge-auth:8080/v1/internal-tokens` | +| `config.messaging.stan.clusterID` | NATS Streaming cluster ID | `{{ .Release.Name }}-nats-streaming-cluster` | +| `config.messaging.stan.channel` | NATS Streaming channel to subscribe to | `system-events.engine.app` | +| `config.legacyRouter.enabled` | Enable legacy Goa router (default) or false to use Gorilla mux | `true` | +| `image.registry` | image registry | `qliktech-docker.jfrog.io` | +| `image.repository` | image repository name (i.e. just the name without the registry) | `collections` | +| `image.tag` | image version | `1.6.6` | +| `image.pullPolicy` | image pull policy | `Always` if `image.tag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-registry-secret"}]` | +| `replicaCount` | Number of collections replicas | `1` | +| `terminationGracePeriodSeconds` | Number of seconds to wait during pod termination after sending SIGTERM until SIGKILL | `30` | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | collections listen port | `8080` | +| `ingress.class` | the `kubernetes.io/ingress.class` to use | `nginx` | +| `ingress.authURL` | The URL to use for nginx's `auth-url` configuration to authenticate `/api` requests | `http://{.Release.Name}-edge-auth.{.Release.Namespace}.svc.cluster.local:8080/v1/auth` | +| `ingress.annotations` | Ingress additional annotations | `[]]` | +| `metrics.prometheus.enabled` | whether prometheus metrics are enabled | `true` | +| `mongodb.enabled` | enable Mongodb as a chart dependency | `true` | +| `mongodb.uri` | If the mongodb chart dependency isn't used, specify the URI path to mongo | | +| `mongodb.uriSecretName` | name of secret to mount for mongo URI. The secret must have the `mongodb-uri` key | `{release.Name}-mongoconfig` | +| `hpa.enabled` | Toggle horizontal pod autoscaler. | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install --name my-release -f values.yaml qlik/collections +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/collections/charts/messaging/Chart.yaml b/qliksense/charts/collections/charts/messaging/Chart.yaml new file mode 100644 index 0000000..115a5e4 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +description: | + Messaging system services. NATS and NATS Streaming are supported. Other services can communicate with each other and orchestrate their works using the services provided by this chart. +home: https://www.qlik.com +keywords: +- messaging +- queue +- nats +- nats-streaming +name: messaging +sources: +- https://github.com/nats-io/gnatsd +- https://github.com/nats-io/nats-streaming-server +- https://github.com/helm/charts/tree/master/stable/nats +- https://github.com/nats-io/prometheus-nats-exporter +- https://github.com/qlik-trial/nats-prom-exporter +version: 0.7.13 diff --git a/qliksense/charts/collections/charts/messaging/README.md b/qliksense/charts/collections/charts/messaging/README.md new file mode 100644 index 0000000..65a342b --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/README.md @@ -0,0 +1,235 @@ +# messaging + +This charts provides **messaging system** (a.k.a. message queue, message bus, etc.) capabilities for services. +Currently, [NATS](https://www.nats.io) and [NATS Streaming](https://nats.io/documentation/streaming/nats-streaming-intro/) +are included in this chart, but in the future, other message systems like RabbitMQ can also be added. + +## Installing the Chart + +To install the chart with the release name `messaging`: + +```console +helm install --name messaging qlik/messaging +``` + +## Uninstalling the Chart + +To uninstall/delete the `messaging` deployment: + +```console +helm delete messaging +``` + +## Configuration + +### NATS + +| Parameter | Description | Default | +| --------------------------------- | ------------------------------------------- | ------------------------------------- | +| `nats.enabled` | enable NATS messaging system | `true` | +| `nats.image.registry` | NATS image registry | `qliktech-docker-snapshot.jfrog.io` | +| `nats.image.repository` | NATS Image name | `qnatsd` | +| `nats.image.tag` | NATS Image tag | `0.1.1` | +| `nats.image.pullPolicy` | pull policy for nats docker image | `IfNotPresent` | +| `nats.image.pullSecrets` | specify image pull secrets | `artifactory-registry-secret` | +| `nats.replicaCount` | number of nats replicas | `1` | +| `nats.antiAffinity` | anti-affinity for nats pod assignment | `soft` | +| `nats.auth.enabled` | enable authentication for nats clients | `true` | +| `nats.auth.user` | username for nats client authentication | `nats_client` | +| `nats.auth.password` | password for nats client authentication | `T0pS3cr3t` | +| `nats.auth.jwtUsers` | array of jwt authenticated users | See [Authentication](#authentication) | +| `nats.clusterAuth.enabled` | enable authentication for nats clustering | `false` | +| `nats.clusterAuth.user` | username for nats clustering authentication | `nats_cluster` | +| `nats.clusterAuth.password` | password for nats clustering authentication | random string | +| `nats.statefulset.updateStrategy` | update strategy for nats statefulsets | `onDelete` | +| `nats.client.service.type` | nats-client service type | `ClusterIP` | +| `nats.client.service.port` | nats-client service port | `4222` | +| `nats.cluster.service.type` | nats-cluster service type | `ClusterIP` | +| `nats.cluster.service.port` | nats-cluster service port | `6222` | +| `nats.monitoring.service.type` | nats-monitoring service type | `ClusterIP` | +| `nats.monitoring.service.port` | nats-monitoring service port | `8222` | +| `nats.livenessProbe.enabled` | enable liveness probe | `true` | +| `nats.readinessProbe.enabled` | enable readiness probe | `true` | +| `nats.resources` | CPU and memory requests and limits for nats | `{}` | +| `extraArgs` | Optional flags for NATS | See [values.yaml](./values.yaml) | + +### NATS Streaming + +| Parameter | Description | Default | +| ------------------------------------------- | ------------------------------------------- | --------------------------------------------- | +| `nats-streaming.enabled` | enable NATS messaging system | `false` | +| `nats-streaming.image.pullPolicy` | pull policy for nats docker image | `IfNotPresent` | +| `nats-streaming.image.pullSecrets` | specify image pull secrets | `artifactory-registry-secret` | +| `nats-streaming.replicaCount` | number of nats replicas | `3` | +| `nats-streaming.antiAffinity` | anti-affinity for nats pod assignment | `soft` | +| `nats-streaming.auth.enabled` | enable authentication for nats clients | `true` | +| `nats-streaming.auth.user` | username for nats client authentication | `nats_client` | +| `nats-streaming.auth.password` | password for nats client authentication | `nil` (Uses Secret below for password) | +| `nats-streaming.auth.secretName` | secretName for nats client authentication | `{{ .Release.Name }}-nats-secret` | +| `nats-streaming.auth.secretKey` | secretKey for nats client authentication | `client-password` | +| `nats-streaming.statefulset.updateStrategy` | update strategy for nats statefulsets | `onDelete` | +| `nats-streaming.monitoring.service.type` | nats-streaming-monitoring service type | `ClusterIP` | +| `nats-streaming.monitoring.service.port` | nats-streaming-monitoring service port | `8222` | +| `nats-streaming.livenessProbe.enabled` | enable liveness probe | `true` | +| `nats-streaming.readinessProbe.enabled` | enable readiness probe | `true` | +| `nats-streaming.resources` | CPU and memory requests and limits for nats | `{}` | +| `nats-streaming.clusterID` | nats streaming cluster name id | `{{ .Release.Name }}-nats-streaming-cluster` | +| `nats-streaming.natsSvc` | external nats server url | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats-streaming.persistence.volume.enabled` | enable persistence storage for the data | `false` | + +### Network Policy for NATS and NATS Streaming + +| Parameter | Description | Default | +| -------------------------------------- | ---------------------------------------------------------------- | --------------------- | +| `networkPolicy.nats.enabled` | enable custom network policy for NATS messaging system | `false` | +| `networkPolicy.nats-streaming.enabled` | enable custom network policy for NATS Streaming messaging system | `false` | +| `networkPolicy.keys.release` | keys service release name for egress rules | `{{ .Release.Name }}` | + +## Requirements + +### Network Plugin to enable Network Policies in Kubernetes cluster + +This chart include options to enable [Network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) for the created +`nats` and `nats-streaming` clusters. + +Network policies are implemented by the network plugin, so the Kubernetes cluster must be configured with a networking solution which supports NetworkPolicy - +simply creating the resource without a controller to implement it will have no effect. + +For local development, please refer to [Setting Up a Minikube Cluster - Configuring Network Plugin to support Network Policies](https://github.com/qlik-trial/elastic-charts/blob/master/docs/prerequisites/minikube-cluster.md#configuring-network-plugin-to-support-network-policies) +for detailed instructions. + +### Secrets + +For deploying this chart to **stage**/**prod**, you need the following secrets written to **vault**. + +*The passwords should not start with a number!* + +| Secret | Key | Purpose | +| -------------------------------------------------------------- | ------- | ----------------------------------- | +| `/secret/{environment}/messaging/{region}/natsClientPassword` | `value` | password for client authentication | +| `/secret/{environment}/messaging/{region}/natsClusterPassword` | `value` | password for cluster authentication | + +## Connecting to NATS / NATS Streaming + +### From the command line: +#### Port-forward NATS Client Service: +```sh + > kubectl port-forward messaging-nats-0 4222 +``` +#### Connect via `telnet`: +```sh + > telnet localhost 4222 +``` +#### Connect with no auth: +```sh + CONNECT {} +``` +#### Connect with auth: +```sh + CONNECT {"user":"my-user","pass":"T0pS3cr3t"} +``` +#### Subscribing to channel, publishing to a channel, and receiving the published message: +```sh + SUB foo 1 + +OK + PUB foo 11 + Hello World + +OK + MSG foo 1 11 + Hello World +``` + +### Using [go-nats](https://github.com/nats-io/go-nats/) and [go-nats-streaming](https://github.com/nats-io/go-nats-streaming) clients: +```golang +package main + +import ( + "log" + + "github.com/nats-io/go-nats" + "github.com/nats-io/go-nats-streaming" +) + +func main() { + nc, err := nats.Connect("nats://nats_client:asdf@localhost:4222") + if err != nil { + log.Fatal(err) + } + sc, err := stan.Connect("messaging-nats-streaming-cluster", "client-123", stan.NatsConn(nc)) + if err != nil { + log.Fatal(err) + } + sc.Publish("hello", []byte("msg1")) + + sc.Subscribe("hello", func(m *stan.Msg) { + log.Printf("[Received] %+v", m) + }, stan.StartWithLastReceived()) + + sc.Publish("hello", []byte("msg2")) + + select{} +} +``` + +### With Network Policies enabled + +To connect to `NATS` as a client with Network Policies enabled , the pod in which the service client is in must have the label +`{{ .Release.Name }}-nats-client=true`. + +Otherwise, if enabled, the `ingress` `Network Policy` for `NATS` will block incoming traffic from any pod without the appropriate label. + +`Network Policy` is enabled in `stage` and `production` environments. + +## Authentication + +It's important to know that when using NATS Streaming, a NATS connection is also required and that it is the NATS connection that handles authentication and authorization not the NATS Streaming connnection. + +### JWT Authentication + +NATS has been configured to allow authentication using service-to-service(S2S) JWTs, but in order to be authenticated, a service must be whitelisted. +The `nats.auth.jwtUsers` value can be used to provide a whitelist of users that should be authenticated using a S2S JWT. +**Note** when using a S2S JWT both the NATS username and JWT `subject` must match + +Adding a new service to the whitelist is as simple as updating `nats.auth.jwtUsers` value as such: +```yaml +nats: + auth: + jwtUsers: + - user: "my-service" + - user: "my-service"` + ...etc +``` + +### Authorization + +The above method of adding a JWT authentication whitelist also allows for setting authorization rules. +NATS [authorization rules](https://nats.io/documentation/managing_the_server/authorization/) can be configured on a per subject basis. +**Note:** For now do not attempt to configure authorization rules for NATS Streaming, further documentation will be provided at a later date. + +The following is an example of adding publish/subscribe authorization rules +```yaml +nats: + auth: + jwtUsers: + - user: "my-service" + permissions: + subscribe: + allow: + - "events.>" # service can subscribe to any subject that starts with `events.` + deny: + - "events.*.private" # service cannot subscribe to subjects such as `events.A.private` or `events.B.private` but can subscribe to `events.A.B.private` + publish: + allow: + - "events.mysubject.>" # service can publish to any subject that starts with `events.mysubject.` + deny: + - "bad" # service cannot publish to the `bad` subject +``` +Wildcard support works as follow: + +The dot character `.` is the token separator. + +The asterisk character `*` is a token wildcard match. +`e.g foo.* matches foo.bar, foo.baz, but not foo.bar.baz.` + +The greater-than symbol `>` is a full wildcard match. +`e.g. foo.> matches foo.bar, foo.baz, foo.bar.baz, foo.bar.1, etc.` diff --git a/qliksense/charts/collections/charts/messaging/charts/nats-streaming/Chart.yaml b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/Chart.yaml new file mode 100644 index 0000000..903259e --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +appVersion: 0.6.0 +description: A NATS Streaming cluster setup +home: https://nats.io/ +keywords: +- NATS +- Messaging +- publish +- subscribe +- streaming +- cluster +- persistence +name: nats-streaming +version: 0.1.0 diff --git a/qliksense/charts/collections/charts/messaging/charts/nats-streaming/README.md b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/README.md new file mode 100644 index 0000000..3c62c91 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/README.md @@ -0,0 +1,123 @@ +# NATS Streaming Clustering Helm Chart + +Sets up a [NATS](http://nats.io/) Streaming server cluster with Raft based replication. + +## Getting started + +This chart relies on an already available NATS Service to which the +NATS Streaming nodes that will form a clusters can connect to. +You can install the NATS Operator and then use it to create a NATS cluster +via the following: + +```console +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/example-nats-cluster.yaml +``` + +This will create a NATS cluster on the `nats-io` namespace. Then, to +install a NATS Streaming cluster the URL to the NATS cluster can be +specified as follows (using `my-release` for a name label for the +cluster): + +```console +$ git clone https://github.com/wallyqs/nats-streaming-cluster-chart nats-streaming-cluster +$ helm install nats-streaming-cluster -n my-release --set natsUrl=nats://nats.nats-io.svc.cluster.local:4222 +``` + +This will create 3 follower nodes plus an extra Pod which is +configured to be in bootstrapping mode, which will start as the leader +of the Raft group as soon as it joins. + +```console +$ kubectl get pods --namespace default -l "app=nats-streaming-cluster,release=my-release" +NAME READY STATUS RESTARTS AGE +my-release-nats-streaming-cluster-0 1/1 Running 0 30s +my-release-nats-streaming-cluster-1 1/1 Running 0 23s +my-release-nats-streaming-cluster-2 1/1 Running 0 17s +my-release-nats-streaming-cluster-bootstrap 1/1 Running 0 30s +``` + +Note that in case the bootstrapping Pod fails then it will not be +recreated and instead one of the extra follower Pods will take over +the leadership. The follower Pods are part of a Deployment so those +in case of failure they will be recreated. + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | -------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS Streaming image registry | `docker.io` | +| `image.repository` | NATS Streaming Image name | `nats-streaming` | +| `image.tag` | NATS Streaming Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.secretName` | Client authentication secret name | `nil` | +| `auth.secretKey` | Client authentication secret key | `nil` | +| `clusterID` | NATS Streaming Cluster Name ID | `"test-cluster"` | +| `natsSvc` | External NATS Server URL | `"nats://username:password@nats:4222"` | +| `maxChannels` | Max # of channels | `100` | +| `maxSubs` | Max # of subscriptions per channel | `1000` | +| `maxMsgs` | Max # of messages per channel | `"1000000"` | +| `maxBytes` | Max messages total size per channel | `900mb` | +| `maxAge` | Max duration a message can be stored | `"0s" (unlimited)` | +| `maxMsgs` | Max # of messages per channel | `1000000` | +| `debug` | Enable debugging | `false` | +| `trace` | Enable detailed tracing | `false` | +| `replicaCount` | Number of NATS Streaming nodes | `3` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `RollingUpdate` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `monitoring.service.type` | Kubernetes Service type (NATS Streaming monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS Streaming monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS Streaming monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS Streaming monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS Streaming monitoring service type is `LoadBalancer` | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `sidecars` | Attach additional containers to the pod. | `nil` | + +### File Specific Persistence Configuration + +| Parameter | Description | Default | +| --------------------------------- | ---------------------------------- | ---------------- | +| `persistence.file.compactEnabled` | Enable compaction | true | +| `persistence.file.bufferSize` | File buffer size (in bytes) | "2097152" (2MB) | +| `persistence.file.crc` | Enable file CRC-32 checksum | true | +| `persistence.file.sync` | Enable File.Sync on Flush | true | +| `persistence.file.fdsLimit` | Max File Descriptor limit (approx) | 0 (unlimited) | + +*Additional configuration parameters not typically used may be found in [values.yaml](values.yaml).* diff --git a/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/NOTES.txt b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/NOTES.txt new file mode 100644 index 0000000..cc0eaa0 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/NOTES.txt @@ -0,0 +1,25 @@ +NATS Streaming server has been installed. + +Externally monitor the NATS streaming server by running these commands. + +{{- if contains "NodePort" .Values.monitoring.service.type }} + +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats-streaming-cluster.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + +echo http://$NODE_IP:$NODE_PORT/streaming + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "nats-streaming-cluster.fullname" . }}' +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats-streaming-cluster.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +echo http://$SERVICE_IP:{{ .Values.monitoring.service.port }}/streaming + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + +export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats-streaming-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 8222:{{ .Values.monitoring.service.port }} +echo "Visit http://127.0.0.1:8222/streaming to monitor the NATS streaming server" + +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/_helpers.tpl b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/_helpers.tpl new file mode 100644 index 0000000..81001e5 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nats-streaming-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats-streaming-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper NATS Streaming image name +*/}} +{{- define "nats-streaming-cluster.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a NATS Streaming cluster peers list string based on fullname, namespace, # of servers +in a format like "host-1,host-2,host-3" +*/}} +{{- define "nats-streaming-cluster.peers" -}} +{{- $name := include "nats-streaming-cluster.fullname" . -}} +{{- $nats_streaming_cluster := dict "peers" (list) -}} +{{- range $idx, $v := until (int .Values.replicaCount) }} +{{- $noop := printf "%s-%d" $name $idx | append $nats_streaming_cluster.peers | set $nats_streaming_cluster "peers" -}} +{{- end }} +{{- printf "%s" (join "," $nats_streaming_cluster.peers) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml new file mode 100644 index 0000000..2950f19 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }}-monitoring + labels: + app: {{ template "nats-streaming-cluster.name" . }} + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - name: monitoring + port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml new file mode 100644 index 0000000..b642078 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/statefulset.yaml b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/statefulset.yaml new file mode 100644 index 0000000..3a9f5b2 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/templates/statefulset.yaml @@ -0,0 +1,247 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats-streaming-cluster.name" . }} + image: {{ template "nats-streaming-cluster.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.auth.enabled }} + - name: USER + value: {{ .Values.auth.user }} + {{- if .Values.auth.token }} + - name: AUTH + value: {{ .Values.auth.token }} + {{- else }} + - name: PASSWORD + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretKey }} + {{- else }} + value: {{ .Values.auth.password }} + {{- end }} + {{- end }} + {{- end }} + args: [ + "-clustered", + "-cid", "{{ tpl (.Values.clusterID) . }}", + "-m", "{{ .Values.monitoring.service.port }}", + "-ns", "{{ tpl (.Values.natsSvc) . }}", + "-mc", "{{ .Values.maxChannels }}", + "-msu", "{{ .Values.maxSubs }}", + "-mm", "{{ .Values.maxMsgs }}", + "-mb", "{{ .Values.maxBytes }}", + "-ma", "{{ .Values.maxAge }}", + "-hbi", "{{ .Values.hbInterval }}", + "-hbt", "{{ .Values.hbTimeout }}", + "-hbf", "{{ .Values.hbFailCount }}", + + "--cluster_node_id", "$(POD_NAME)", + "--cluster_peers", "{{ template "nats-streaming-cluster.peers" . }}", + "--store", "file", + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/data", + "--cluster_log_path", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/raft", + {{- if .Values.cluster_raft_logging }} + "--cluster_raft_logging", + {{- end }} + {{- if .Values.persistence.file.compactEnabled }} + "--file_compact_enabled", + "--file_compact_frag", "{{ .Values.persistence.file.compactFrag }}", + "--file_compact_interval", "{{ .Values.persistence.file.compactInterval }}", + "--file_compact_min_size", "{{ .Values.persistence.file.compactMinSize }}", + {{- end }} + "--file_buffer_size", "{{ .Values.persistence.file.bufferSize }}", + {{- if .Values.persistence.file.crc }} + "--file_crc", + "--file_crc_poly", "{{ .Values.persistence.file.crcPoly }}", + {{- end }} + {{- if .Values.persistence.file.sync }} + "--file_sync", + {{- end }} + "--file_slice_max_msgs", "{{ .Values.persistence.file.sliceMaxMsgs }}", + "--file_slice_max_bytes", "{{ .Values.persistence.file.sliceMaxBytes }}", + "--file_slice_max_age", "{{ .Values.persistence.file.sliceMaxAge }}", + {{- if ne .Values.persistence.file.sliceArchiveScript "" }} + "--file_slice_archive_script", "{{ .Values.persistence.file.sliceArchiveScript }}", + {{- end }} + "--file_fds_limit", "{{ .Values.persistence.file.fdsLimit }}", + "--file_parallel_recovery", "{{ .Values.persistence.file.parallelRecovery }}", + + {{- if .Values.auth.enabled }} + "--user", "$(USER)", + {{- if .Values.auth.token }} + "--auth", "$(AUTH)", + {{- else }} + "--pass", "$(PASSWORD)", + {{- end }} + {{- end }} + + {{- if .Values.debug }} + "-SD", + {{- end }} + {{- if .Values.trace }} + "-SV", + {{- end }} + ] + ports: + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + volumeMounts: + - name: datadir + mountPath: /nats + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + {{ if not .Values.persistence.volume.enabled }} + - name: datadir + emptyDir: {} + {{- end }} + {{- if .Values.persistence.volume.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: "{{ .Values.persistence.volume.size }}" + {{- if .Values.persistence.volume.storageClass }} + {{- if (eq "-" .Values.persistence.volume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.volume.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats-streaming/values.yaml b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/values.yaml new file mode 100644 index 0000000..fecf26a --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats-streaming/values.yaml @@ -0,0 +1,287 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## NATS Streaming image +## +image: + registry: docker.io + repository: nats-streaming + tag: 0.11.2 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS Streaming replicas +replicaCount: 3 + +# +# Streaming server configuration +# + +# Cluster ID (default: test-cluster) +clusterID: "test-cluster" + +# Connect to this external NATS Server URL +natsSvc: "nats://nats:4222" + +# Max number of channels (0 for unlimited) +maxChannels: 100 + +# Max number of subscriptions per channel (0 for unlimited) +maxSubs: 1000 + +# Max number of messages per channel (0 for unlimited) +maxMsgs: "1000000" + +# Max messages total size per channel (0 for unlimited) +maxBytes: "900mb" + +# Max duration a message can be stored ("0s" for unlimited) +maxAge: 0 + +# +# ADVANCED CONFIGURATION: Change these with caution. +# + +# Interval at which server sends heartbeat to a client +hbInterval: 30s + +# How long server waits for a heartbeat response +hbTimeout: 10s + +# Number of failed heartbeats before server closes the client connection +hbFailCount: 330 + +# Use for general debugging. Enabling this will negatively affect performance. +debug: false + +# Do not normally set this as it will drastically decrease performance and generate +# volumous logs. +trace: false + +# Use for raft related debugging +cluster_raft_logging: false + +persistence: + file: + + # Enable compaction + compactEnabled: true + + # Enable file CRC-32 checksum + crc: true + + # Enable File.Sync on Flush + sync: true + + # Store will try to use no more file descriptors than this given limit + fdsLimit: 0 + + ## + ## ADVANCED CONFIGURATION: Change these with caution. + ## + # File buffer size (in bytes) + bufferSize: "2097152" + + # File fragmentation % threshold for compaction + compactFrag: 50 + + # Minimum interval (in seconds) between file compactions, 5min + compactInterval: 300 + + # Minimum file size for compaction + compactMinSize: "1048576" + + # Polynomial used to make the table used for CRC-32 checksum (default is crc32.IEEE) + crcPoly : "3988292384" + + # Maximum number of messages per file slice (subject to channel limits) + sliceMaxMsgs: 0 + + # Maximum file slice size - including index file (subject to channel limits) 64MB + sliceMaxBytes: "67108931" + + # Maximum file slice duration starting when the first message is stored (subject to channel limits) + sliceMaxAge: 0 + + # Path to script to use if you want to archive a file slice being removed + sliceArchiveScript: "" + + # On startup, number of channels that can be recovered in parallel + parallelRecovery: 1 + + volume: + # If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +## NATS Streaming Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + ## Annotations for Promethues metrics + # prometheus.io/scrape: "true" + # prometheus.io/port: "7777" + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/nats-streaming-server#authorization +## +auth: + enabled: false + # user: nats_client + # password: + # token: + # secretName: "{{ .Release.Name }}-nats-secret" + # secretKey: "client-password" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + +## NATS Streaming svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + # TODO: Uncomment these args once synadia/prometheus-nats-exporter adds NATS Streaming support + # - -serverz + # - -storez + # - -clientz + # - -channelz + +## Sidecars +sidecars: + ## e.g. + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/Chart.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/Chart.yaml new file mode 100644 index 0000000..04326ef --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +appVersion: 1.3.0 +description: An open-source, cloud-native messaging system +home: https://nats.io/ +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png +keywords: +- nats +- messaging +- addressing +- discovery +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: nats +sources: +- https://github.com/bitnami/bitnami-docker-nats +version: 2.0.1 diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/README.md b/qliksense/charts/collections/charts/messaging/charts/nats/README.md new file mode 100644 index 0000000..c05b4c2 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/README.md @@ -0,0 +1,190 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `OnDelete` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `extraArgs` | Optional flags for NATS | `[]` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `client.service.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `client.service.port` | NATS client port | `4222` | +| `client.service.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `client.service.annotations` | Annotations for NATS client service | {} | +| `client.service.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `cluster.service.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `cluster.service.port` | NATS cluster port | `6222` | +| `cluster.service.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `cluster.service.annotations` | Annotations for NATS cluster service | {} | +| `cluster.service.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `monitoring.service.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | +| `metrics.enabled` | Enable Prometheus metrics via exporter side-car | `false` | +| `metrics.image.registry` | Prometheus metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Prometheus metrics exporter image name | `synadia/prometheus-nats-exporter` | +| `metrics.image.tag` | Prometheus metrics exporter image tag | `0.1.0` | +| `metrics.image.pullPolicy` | Prometheus metrics image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Prometheus metrics image pull secrets | `nil` | +| `metrics.port` | Prometheus metrics exporter port | `7777` | +| `metrics.podAnnotations` | Prometheus metrics exporter annotations | `prometheus.io/scrape: "true"`, `prometheus.io/port: "7777"` | +| `metrics.resources` | Prometheus metrics exporter resource requests/limit | {} | +| `sidecars` | Attach additional containers to the pod | `nil` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Sidecars + +If you have a need for additional containers to run within the same pod as NATS (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` + +## Upgrading + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is nats: + +```console +$ kubectl delete statefulset nats-nats --cascade=false +``` diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/NOTES.txt b/qliksense/charts/collections/charts/messaging/charts/nats/templates/NOTES.txt new file mode 100644 index 0000000..224df8f --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/NOTES.txt @@ -0,0 +1,88 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.client.service.type "LoadBalancer") (contains .Values.client.service.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "client.service.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.client.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoring.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoring.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "Monitoring URL: http://$SERVICE_IP/" + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoring.service.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "nats.fullname" . }}-monitoring {{ .Values.monitoring.service.port }}:{{ .Values.monitoring.service.port }} +{{- end }} + +2. Access NATS monitoring by opening the URL obtained in a browser. +{{- end }} + +{{- if .Values.metrics.enabled }} + +3. Get the NATS Prometheus Metrics URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +4. Access NATS Prometheus metrics by opening the URL obtained in a browser. +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/_helpers.tpl b/qliksense/charts/collections/charts/messaging/charts/nats/templates/_helpers.tpl new file mode 100644 index 0000000..ef93757 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Nats image name +*/}} +{{- define "nats.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random alphanumeric password string. +We prepend a random letter to the string to avoid password validation errors +*/}} +{{- define "nats.randomPassword" -}} +{{- randAlpha 1 -}}{{- randAlphaNum 9 -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/client-svc.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/templates/client-svc.yaml new file mode 100644 index 0000000..aacab44 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.client.service.annotations }} + annotations: +{{ toYaml .Values.client.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.client.service.type }} + {{- if and (eq .Values.client.service.type "LoadBalancer") .Values.client.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.client.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.client.service.port }} + targetPort: client + name: client + {{- if and (eq .Values.client.service.type "NodePort") (not (empty .Values.client.service.nodePort)) }} + nodePort: {{ .Values.client.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/cluster-svc.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000..b48c791 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.cluster.service.annotations }} + annotations: +{{ toYaml .Values.cluster.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.cluster.service.type }} + {{- if and (eq .Values.cluster.service.type "LoadBalancer") .Values.cluster.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.cluster.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.cluster.service.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.cluster.service.type "NodePort") (not (empty .Values.cluster.service.nodePort)) }} + nodePort: {{ .Values.cluster.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/configmap.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/templates/configmap.yaml new file mode 100644 index 0000000..cde9fb2 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/configmap.yaml @@ -0,0 +1,85 @@ +{{- $authPwd := default (include "nats.randomPassword" .) .Values.auth.password -}} +{{- $clusterAuthPwd := default (include "nats.randomPassword" .) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.client.service.port }} + http: 0.0.0.0:{{ .Values.monitoring.service.port }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.cluster.service.port }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + ] + } + users.json: {{ if .Values.auth.enabled }}{{ toJson .Values.auth.jwtUsers | quote }}{{else}}"[]"{{ end }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/headless-svc.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/templates/headless-svc.yaml new file mode 100644 index 0000000..d340551 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/headless-svc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-headless + labels: + app: {{ template "nats.name" . }} + chart: {{ template "nats.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: client + port: 4222 + targetPort: client + - name: cluster + port: 6222 + targetPort: cluster + selector: + app: {{ template "nats.name" . }} + release: {{ .Release.Name | quote }} \ No newline at end of file diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/ingress.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/templates/ingress.yaml new file mode 100644 index 0000000..eec9749 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/monitoring-svc.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000..f8f4081 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/networkpolicy.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000..12032f8 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.client.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.cluster.service.port }} + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/statefulset.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/templates/statefulset.yaml new file mode 100644 index 0000000..a4a8283 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/statefulset.yaml @@ -0,0 +1,160 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + serviceName: {{ template "nats.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} +{{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - qnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + {{- range .Values.extraArgs }} + - {{ tpl (.) $ }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.client.service.port }} + - name: cluster + containerPort: {{ .Values.cluster.service.port }} + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/templates/tls-secret.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/templates/tls-secret.yaml new file mode 100644 index 0000000..5acf441 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/charts/nats/values.yaml b/qliksense/charts/collections/charts/messaging/charts/nats/values.yaml new file mode 100644 index 0000000..266ab69 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/charts/nats/values.yaml @@ -0,0 +1,297 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.3.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: false + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +client: + service: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +cluster: + service: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: + +# Optional additional arguments +extraArgs: [] + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +sidecars: +## Add sidecars to the pod. +## e.g. +# - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/collections/charts/messaging/nats-streaming/Chart.yaml b/qliksense/charts/collections/charts/messaging/nats-streaming/Chart.yaml new file mode 100644 index 0000000..4eb2bd7 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats-streaming/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +description: A NATS Streaming cluster setup +name: nats-streaming +version: 0.1.0 +appVersion: 0.6.0 +keywords: +- NATS +- Messaging +- publish +- subscribe +- streaming +- cluster +- persistence +home: https://nats.io/ diff --git a/qliksense/charts/collections/charts/messaging/nats-streaming/README.md b/qliksense/charts/collections/charts/messaging/nats-streaming/README.md new file mode 100644 index 0000000..3c62c91 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats-streaming/README.md @@ -0,0 +1,123 @@ +# NATS Streaming Clustering Helm Chart + +Sets up a [NATS](http://nats.io/) Streaming server cluster with Raft based replication. + +## Getting started + +This chart relies on an already available NATS Service to which the +NATS Streaming nodes that will form a clusters can connect to. +You can install the NATS Operator and then use it to create a NATS cluster +via the following: + +```console +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/example-nats-cluster.yaml +``` + +This will create a NATS cluster on the `nats-io` namespace. Then, to +install a NATS Streaming cluster the URL to the NATS cluster can be +specified as follows (using `my-release` for a name label for the +cluster): + +```console +$ git clone https://github.com/wallyqs/nats-streaming-cluster-chart nats-streaming-cluster +$ helm install nats-streaming-cluster -n my-release --set natsUrl=nats://nats.nats-io.svc.cluster.local:4222 +``` + +This will create 3 follower nodes plus an extra Pod which is +configured to be in bootstrapping mode, which will start as the leader +of the Raft group as soon as it joins. + +```console +$ kubectl get pods --namespace default -l "app=nats-streaming-cluster,release=my-release" +NAME READY STATUS RESTARTS AGE +my-release-nats-streaming-cluster-0 1/1 Running 0 30s +my-release-nats-streaming-cluster-1 1/1 Running 0 23s +my-release-nats-streaming-cluster-2 1/1 Running 0 17s +my-release-nats-streaming-cluster-bootstrap 1/1 Running 0 30s +``` + +Note that in case the bootstrapping Pod fails then it will not be +recreated and instead one of the extra follower Pods will take over +the leadership. The follower Pods are part of a Deployment so those +in case of failure they will be recreated. + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | -------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS Streaming image registry | `docker.io` | +| `image.repository` | NATS Streaming Image name | `nats-streaming` | +| `image.tag` | NATS Streaming Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.secretName` | Client authentication secret name | `nil` | +| `auth.secretKey` | Client authentication secret key | `nil` | +| `clusterID` | NATS Streaming Cluster Name ID | `"test-cluster"` | +| `natsSvc` | External NATS Server URL | `"nats://username:password@nats:4222"` | +| `maxChannels` | Max # of channels | `100` | +| `maxSubs` | Max # of subscriptions per channel | `1000` | +| `maxMsgs` | Max # of messages per channel | `"1000000"` | +| `maxBytes` | Max messages total size per channel | `900mb` | +| `maxAge` | Max duration a message can be stored | `"0s" (unlimited)` | +| `maxMsgs` | Max # of messages per channel | `1000000` | +| `debug` | Enable debugging | `false` | +| `trace` | Enable detailed tracing | `false` | +| `replicaCount` | Number of NATS Streaming nodes | `3` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `RollingUpdate` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `monitoring.service.type` | Kubernetes Service type (NATS Streaming monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS Streaming monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS Streaming monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS Streaming monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS Streaming monitoring service type is `LoadBalancer` | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `sidecars` | Attach additional containers to the pod. | `nil` | + +### File Specific Persistence Configuration + +| Parameter | Description | Default | +| --------------------------------- | ---------------------------------- | ---------------- | +| `persistence.file.compactEnabled` | Enable compaction | true | +| `persistence.file.bufferSize` | File buffer size (in bytes) | "2097152" (2MB) | +| `persistence.file.crc` | Enable file CRC-32 checksum | true | +| `persistence.file.sync` | Enable File.Sync on Flush | true | +| `persistence.file.fdsLimit` | Max File Descriptor limit (approx) | 0 (unlimited) | + +*Additional configuration parameters not typically used may be found in [values.yaml](values.yaml).* diff --git a/qliksense/charts/collections/charts/messaging/nats-streaming/templates/NOTES.txt b/qliksense/charts/collections/charts/messaging/nats-streaming/templates/NOTES.txt new file mode 100644 index 0000000..cc0eaa0 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats-streaming/templates/NOTES.txt @@ -0,0 +1,25 @@ +NATS Streaming server has been installed. + +Externally monitor the NATS streaming server by running these commands. + +{{- if contains "NodePort" .Values.monitoring.service.type }} + +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats-streaming-cluster.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + +echo http://$NODE_IP:$NODE_PORT/streaming + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "nats-streaming-cluster.fullname" . }}' +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats-streaming-cluster.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +echo http://$SERVICE_IP:{{ .Values.monitoring.service.port }}/streaming + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + +export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats-streaming-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 8222:{{ .Values.monitoring.service.port }} +echo "Visit http://127.0.0.1:8222/streaming to monitor the NATS streaming server" + +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/nats-streaming/templates/_helpers.tpl b/qliksense/charts/collections/charts/messaging/nats-streaming/templates/_helpers.tpl new file mode 100644 index 0000000..81001e5 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats-streaming/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nats-streaming-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats-streaming-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper NATS Streaming image name +*/}} +{{- define "nats-streaming-cluster.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a NATS Streaming cluster peers list string based on fullname, namespace, # of servers +in a format like "host-1,host-2,host-3" +*/}} +{{- define "nats-streaming-cluster.peers" -}} +{{- $name := include "nats-streaming-cluster.fullname" . -}} +{{- $nats_streaming_cluster := dict "peers" (list) -}} +{{- range $idx, $v := until (int .Values.replicaCount) }} +{{- $noop := printf "%s-%d" $name $idx | append $nats_streaming_cluster.peers | set $nats_streaming_cluster "peers" -}} +{{- end }} +{{- printf "%s" (join "," $nats_streaming_cluster.peers) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/collections/charts/messaging/nats-streaming/templates/monitoring-svc.yaml b/qliksense/charts/collections/charts/messaging/nats-streaming/templates/monitoring-svc.yaml new file mode 100644 index 0000000..2950f19 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats-streaming/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }}-monitoring + labels: + app: {{ template "nats-streaming-cluster.name" . }} + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - name: monitoring + port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/collections/charts/messaging/nats-streaming/templates/networkpolicy.yaml b/qliksense/charts/collections/charts/messaging/nats-streaming/templates/networkpolicy.yaml new file mode 100644 index 0000000..b642078 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats-streaming/templates/networkpolicy.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/nats-streaming/templates/statefulset.yaml b/qliksense/charts/collections/charts/messaging/nats-streaming/templates/statefulset.yaml new file mode 100644 index 0000000..3a9f5b2 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats-streaming/templates/statefulset.yaml @@ -0,0 +1,247 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats-streaming-cluster.name" . }} + image: {{ template "nats-streaming-cluster.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.auth.enabled }} + - name: USER + value: {{ .Values.auth.user }} + {{- if .Values.auth.token }} + - name: AUTH + value: {{ .Values.auth.token }} + {{- else }} + - name: PASSWORD + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretKey }} + {{- else }} + value: {{ .Values.auth.password }} + {{- end }} + {{- end }} + {{- end }} + args: [ + "-clustered", + "-cid", "{{ tpl (.Values.clusterID) . }}", + "-m", "{{ .Values.monitoring.service.port }}", + "-ns", "{{ tpl (.Values.natsSvc) . }}", + "-mc", "{{ .Values.maxChannels }}", + "-msu", "{{ .Values.maxSubs }}", + "-mm", "{{ .Values.maxMsgs }}", + "-mb", "{{ .Values.maxBytes }}", + "-ma", "{{ .Values.maxAge }}", + "-hbi", "{{ .Values.hbInterval }}", + "-hbt", "{{ .Values.hbTimeout }}", + "-hbf", "{{ .Values.hbFailCount }}", + + "--cluster_node_id", "$(POD_NAME)", + "--cluster_peers", "{{ template "nats-streaming-cluster.peers" . }}", + "--store", "file", + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/data", + "--cluster_log_path", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/raft", + {{- if .Values.cluster_raft_logging }} + "--cluster_raft_logging", + {{- end }} + {{- if .Values.persistence.file.compactEnabled }} + "--file_compact_enabled", + "--file_compact_frag", "{{ .Values.persistence.file.compactFrag }}", + "--file_compact_interval", "{{ .Values.persistence.file.compactInterval }}", + "--file_compact_min_size", "{{ .Values.persistence.file.compactMinSize }}", + {{- end }} + "--file_buffer_size", "{{ .Values.persistence.file.bufferSize }}", + {{- if .Values.persistence.file.crc }} + "--file_crc", + "--file_crc_poly", "{{ .Values.persistence.file.crcPoly }}", + {{- end }} + {{- if .Values.persistence.file.sync }} + "--file_sync", + {{- end }} + "--file_slice_max_msgs", "{{ .Values.persistence.file.sliceMaxMsgs }}", + "--file_slice_max_bytes", "{{ .Values.persistence.file.sliceMaxBytes }}", + "--file_slice_max_age", "{{ .Values.persistence.file.sliceMaxAge }}", + {{- if ne .Values.persistence.file.sliceArchiveScript "" }} + "--file_slice_archive_script", "{{ .Values.persistence.file.sliceArchiveScript }}", + {{- end }} + "--file_fds_limit", "{{ .Values.persistence.file.fdsLimit }}", + "--file_parallel_recovery", "{{ .Values.persistence.file.parallelRecovery }}", + + {{- if .Values.auth.enabled }} + "--user", "$(USER)", + {{- if .Values.auth.token }} + "--auth", "$(AUTH)", + {{- else }} + "--pass", "$(PASSWORD)", + {{- end }} + {{- end }} + + {{- if .Values.debug }} + "-SD", + {{- end }} + {{- if .Values.trace }} + "-SV", + {{- end }} + ] + ports: + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + volumeMounts: + - name: datadir + mountPath: /nats + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + {{ if not .Values.persistence.volume.enabled }} + - name: datadir + emptyDir: {} + {{- end }} + {{- if .Values.persistence.volume.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: "{{ .Values.persistence.volume.size }}" + {{- if .Values.persistence.volume.storageClass }} + {{- if (eq "-" .Values.persistence.volume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.volume.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/collections/charts/messaging/nats-streaming/values.yaml b/qliksense/charts/collections/charts/messaging/nats-streaming/values.yaml new file mode 100644 index 0000000..fecf26a --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats-streaming/values.yaml @@ -0,0 +1,287 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## NATS Streaming image +## +image: + registry: docker.io + repository: nats-streaming + tag: 0.11.2 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS Streaming replicas +replicaCount: 3 + +# +# Streaming server configuration +# + +# Cluster ID (default: test-cluster) +clusterID: "test-cluster" + +# Connect to this external NATS Server URL +natsSvc: "nats://nats:4222" + +# Max number of channels (0 for unlimited) +maxChannels: 100 + +# Max number of subscriptions per channel (0 for unlimited) +maxSubs: 1000 + +# Max number of messages per channel (0 for unlimited) +maxMsgs: "1000000" + +# Max messages total size per channel (0 for unlimited) +maxBytes: "900mb" + +# Max duration a message can be stored ("0s" for unlimited) +maxAge: 0 + +# +# ADVANCED CONFIGURATION: Change these with caution. +# + +# Interval at which server sends heartbeat to a client +hbInterval: 30s + +# How long server waits for a heartbeat response +hbTimeout: 10s + +# Number of failed heartbeats before server closes the client connection +hbFailCount: 330 + +# Use for general debugging. Enabling this will negatively affect performance. +debug: false + +# Do not normally set this as it will drastically decrease performance and generate +# volumous logs. +trace: false + +# Use for raft related debugging +cluster_raft_logging: false + +persistence: + file: + + # Enable compaction + compactEnabled: true + + # Enable file CRC-32 checksum + crc: true + + # Enable File.Sync on Flush + sync: true + + # Store will try to use no more file descriptors than this given limit + fdsLimit: 0 + + ## + ## ADVANCED CONFIGURATION: Change these with caution. + ## + # File buffer size (in bytes) + bufferSize: "2097152" + + # File fragmentation % threshold for compaction + compactFrag: 50 + + # Minimum interval (in seconds) between file compactions, 5min + compactInterval: 300 + + # Minimum file size for compaction + compactMinSize: "1048576" + + # Polynomial used to make the table used for CRC-32 checksum (default is crc32.IEEE) + crcPoly : "3988292384" + + # Maximum number of messages per file slice (subject to channel limits) + sliceMaxMsgs: 0 + + # Maximum file slice size - including index file (subject to channel limits) 64MB + sliceMaxBytes: "67108931" + + # Maximum file slice duration starting when the first message is stored (subject to channel limits) + sliceMaxAge: 0 + + # Path to script to use if you want to archive a file slice being removed + sliceArchiveScript: "" + + # On startup, number of channels that can be recovered in parallel + parallelRecovery: 1 + + volume: + # If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +## NATS Streaming Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + ## Annotations for Promethues metrics + # prometheus.io/scrape: "true" + # prometheus.io/port: "7777" + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/nats-streaming-server#authorization +## +auth: + enabled: false + # user: nats_client + # password: + # token: + # secretName: "{{ .Release.Name }}-nats-secret" + # secretKey: "client-password" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + +## NATS Streaming svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + # TODO: Uncomment these args once synadia/prometheus-nats-exporter adds NATS Streaming support + # - -serverz + # - -storez + # - -clientz + # - -channelz + +## Sidecars +sidecars: + ## e.g. + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/collections/charts/messaging/nats/Chart.yaml b/qliksense/charts/collections/charts/messaging/nats/Chart.yaml new file mode 100644 index 0000000..2310083 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/Chart.yaml @@ -0,0 +1,17 @@ +name: nats +version: 2.0.1 +appVersion: 1.3.0 +description: An open-source, cloud-native messaging system +keywords: +- nats +- messaging +- addressing +- discovery +home: https://nats.io/ +sources: +- https://github.com/bitnami/bitnami-docker-nats +maintainers: +- name: Bitnami + email: containers@bitnami.com +engine: gotpl +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png diff --git a/qliksense/charts/collections/charts/messaging/nats/README.md b/qliksense/charts/collections/charts/messaging/nats/README.md new file mode 100644 index 0000000..c05b4c2 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/README.md @@ -0,0 +1,190 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `OnDelete` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `extraArgs` | Optional flags for NATS | `[]` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `client.service.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `client.service.port` | NATS client port | `4222` | +| `client.service.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `client.service.annotations` | Annotations for NATS client service | {} | +| `client.service.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `cluster.service.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `cluster.service.port` | NATS cluster port | `6222` | +| `cluster.service.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `cluster.service.annotations` | Annotations for NATS cluster service | {} | +| `cluster.service.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `monitoring.service.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | +| `metrics.enabled` | Enable Prometheus metrics via exporter side-car | `false` | +| `metrics.image.registry` | Prometheus metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Prometheus metrics exporter image name | `synadia/prometheus-nats-exporter` | +| `metrics.image.tag` | Prometheus metrics exporter image tag | `0.1.0` | +| `metrics.image.pullPolicy` | Prometheus metrics image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Prometheus metrics image pull secrets | `nil` | +| `metrics.port` | Prometheus metrics exporter port | `7777` | +| `metrics.podAnnotations` | Prometheus metrics exporter annotations | `prometheus.io/scrape: "true"`, `prometheus.io/port: "7777"` | +| `metrics.resources` | Prometheus metrics exporter resource requests/limit | {} | +| `sidecars` | Attach additional containers to the pod | `nil` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Sidecars + +If you have a need for additional containers to run within the same pod as NATS (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` + +## Upgrading + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is nats: + +```console +$ kubectl delete statefulset nats-nats --cascade=false +``` diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/NOTES.txt b/qliksense/charts/collections/charts/messaging/nats/templates/NOTES.txt new file mode 100644 index 0000000..224df8f --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/NOTES.txt @@ -0,0 +1,88 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.client.service.type "LoadBalancer") (contains .Values.client.service.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "client.service.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.client.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoring.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoring.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "Monitoring URL: http://$SERVICE_IP/" + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoring.service.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "nats.fullname" . }}-monitoring {{ .Values.monitoring.service.port }}:{{ .Values.monitoring.service.port }} +{{- end }} + +2. Access NATS monitoring by opening the URL obtained in a browser. +{{- end }} + +{{- if .Values.metrics.enabled }} + +3. Get the NATS Prometheus Metrics URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +4. Access NATS Prometheus metrics by opening the URL obtained in a browser. +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/_helpers.tpl b/qliksense/charts/collections/charts/messaging/nats/templates/_helpers.tpl new file mode 100644 index 0000000..ef93757 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Nats image name +*/}} +{{- define "nats.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random alphanumeric password string. +We prepend a random letter to the string to avoid password validation errors +*/}} +{{- define "nats.randomPassword" -}} +{{- randAlpha 1 -}}{{- randAlphaNum 9 -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/client-svc.yaml b/qliksense/charts/collections/charts/messaging/nats/templates/client-svc.yaml new file mode 100644 index 0000000..aacab44 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.client.service.annotations }} + annotations: +{{ toYaml .Values.client.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.client.service.type }} + {{- if and (eq .Values.client.service.type "LoadBalancer") .Values.client.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.client.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.client.service.port }} + targetPort: client + name: client + {{- if and (eq .Values.client.service.type "NodePort") (not (empty .Values.client.service.nodePort)) }} + nodePort: {{ .Values.client.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/cluster-svc.yaml b/qliksense/charts/collections/charts/messaging/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000..b48c791 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.cluster.service.annotations }} + annotations: +{{ toYaml .Values.cluster.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.cluster.service.type }} + {{- if and (eq .Values.cluster.service.type "LoadBalancer") .Values.cluster.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.cluster.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.cluster.service.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.cluster.service.type "NodePort") (not (empty .Values.cluster.service.nodePort)) }} + nodePort: {{ .Values.cluster.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/configmap.yaml b/qliksense/charts/collections/charts/messaging/nats/templates/configmap.yaml new file mode 100644 index 0000000..cde9fb2 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/configmap.yaml @@ -0,0 +1,85 @@ +{{- $authPwd := default (include "nats.randomPassword" .) .Values.auth.password -}} +{{- $clusterAuthPwd := default (include "nats.randomPassword" .) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.client.service.port }} + http: 0.0.0.0:{{ .Values.monitoring.service.port }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.cluster.service.port }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + ] + } + users.json: {{ if .Values.auth.enabled }}{{ toJson .Values.auth.jwtUsers | quote }}{{else}}"[]"{{ end }} diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/headless-svc.yaml b/qliksense/charts/collections/charts/messaging/nats/templates/headless-svc.yaml new file mode 100644 index 0000000..d340551 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/headless-svc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-headless + labels: + app: {{ template "nats.name" . }} + chart: {{ template "nats.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: client + port: 4222 + targetPort: client + - name: cluster + port: 6222 + targetPort: cluster + selector: + app: {{ template "nats.name" . }} + release: {{ .Release.Name | quote }} \ No newline at end of file diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/ingress.yaml b/qliksense/charts/collections/charts/messaging/nats/templates/ingress.yaml new file mode 100644 index 0000000..eec9749 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/monitoring-svc.yaml b/qliksense/charts/collections/charts/messaging/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000..f8f4081 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/networkpolicy.yaml b/qliksense/charts/collections/charts/messaging/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000..12032f8 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.client.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.cluster.service.port }} + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/statefulset.yaml b/qliksense/charts/collections/charts/messaging/nats/templates/statefulset.yaml new file mode 100644 index 0000000..a4a8283 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/statefulset.yaml @@ -0,0 +1,160 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + serviceName: {{ template "nats.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} +{{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - qnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + {{- range .Values.extraArgs }} + - {{ tpl (.) $ }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.client.service.port }} + - name: cluster + containerPort: {{ .Values.cluster.service.port }} + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/qliksense/charts/collections/charts/messaging/nats/templates/tls-secret.yaml b/qliksense/charts/collections/charts/messaging/nats/templates/tls-secret.yaml new file mode 100644 index 0000000..5acf441 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/nats/values.yaml b/qliksense/charts/collections/charts/messaging/nats/values.yaml new file mode 100644 index 0000000..266ab69 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/nats/values.yaml @@ -0,0 +1,297 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.3.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: false + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +client: + service: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +cluster: + service: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: + +# Optional additional arguments +extraArgs: [] + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +sidecars: +## Add sidecars to the pod. +## e.g. +# - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/collections/charts/messaging/requirements.yaml b/qliksense/charts/collections/charts/messaging/requirements.yaml new file mode 100644 index 0000000..5c1e09a --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/requirements.yaml @@ -0,0 +1,13 @@ +dependencies: + - name: nats + version: 2.0.1 + repository: "file://./nats" + # messaging.nats.enabled is used by services that depend on the messaging chart to enable or disable nats + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.nats.enabled,nats.enabled + - name: nats-streaming + version: 0.1.0 + repository: "file://./nats-streaming" + # messaging.nats-streaming.enabled is used by services that depend on the messaging chart to enable or disable nats streaming + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.nats-streaming.enabled,nats-streaming.enabled diff --git a/qliksense/charts/collections/charts/messaging/templates/_helper.tpl b/qliksense/charts/collections/charts/messaging/templates/_helper.tpl new file mode 100644 index 0000000..d03e4d7 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/templates/_helper.tpl @@ -0,0 +1,38 @@ +{{- define "messaging.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "messaging.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "messaging.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.name" -}} +{{- "nats" -}} +{{- end -}} + +{{- define "nats.fullname" -}} +{{- $name := "nats" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming.name" -}} +{{- "nats-streaming" -}} +{{- end -}} + +{{- define "nats-streaming.fullname" -}} +{{- $name := "nats-streaming" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/qliksense/charts/collections/charts/messaging/templates/nats-secret.yaml b/qliksense/charts/collections/charts/messaging/templates/nats-secret.yaml new file mode 100644 index 0000000..e58ebdf --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/templates/nats-secret.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ .Release.Name }}-nats-secret +data: + {{ if (.Values.nats.enabled) and .Values.nats.auth.password }} + client-password: {{ print .Values.nats.auth.password | b64enc }} + {{- end }} + {{ if .Values.nats.auth.token }} + client-token: {{ print .Values.nats.auth.token | b64enc }} + {{- end }} + + {{ if .Values.nats.clusterAuth.password }} + cluster-password: {{ print .Values.nats.clusterAuth.password | b64enc }} + {{- end }} + {{ if .Values.nats.clusterAuth.token }} + cluster-token: {{ print .Values.nats.clusterAuth.token | b64enc }} + {{- end }} diff --git a/qliksense/charts/collections/charts/messaging/templates/networkpolicy-nats-streaming.yaml b/qliksense/charts/collections/charts/messaging/templates/networkpolicy-nats-streaming.yaml new file mode 100644 index 0000000..cd855c0 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/templates/networkpolicy-nats-streaming.yaml @@ -0,0 +1,51 @@ +{{- if and (index .Values "nats-streaming" "enabled") (index .Values "networkPolicy" "nats-streaming" "enabled") }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "{{ template "nats-streaming.fullname" . }}" + labels: + chart: "{{ template "messaging.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + - ports: + - port: {{ index .Values "nats-streaming" "monitoring" "service" "port" }} + from: + - podSelector: + matchLabels: + {{ template "nats-streaming.fullname" . }}-admin: "true" + - ports: + - port: {{ index .Values "nats-streaming" "metrics" "port" }} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/templates/networkpolicy-nats.yaml b/qliksense/charts/collections/charts/messaging/templates/networkpolicy-nats.yaml new file mode 100644 index 0000000..df645c6 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/templates/networkpolicy-nats.yaml @@ -0,0 +1,51 @@ +{{- if and (.Values.nats.enabled) (.Values.networkPolicy.nats.enabled) }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "{{ template "nats.fullname" . }}" + labels: + chart: "{{ template "messaging.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + - ports: + - port: {{ .Values.nats.client.service.port }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + - ports: + - port: {{ .Values.nats.metrics.port }} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "keys" + release: {{ tpl ( .Values.networkPolicy.keys.release ) . | quote }} +{{- end }} diff --git a/qliksense/charts/collections/charts/messaging/values.yaml b/qliksense/charts/collections/charts/messaging/values.yaml new file mode 100644 index 0000000..fe31552 --- /dev/null +++ b/qliksense/charts/collections/charts/messaging/values.yaml @@ -0,0 +1,218 @@ +## Default values for the messaging Helm chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## NATS configuration +## +nats: + ## Enables NATS chart by default + enabled: true + + ## Image pull policy for NATS chart + image: + registry: ghcr.io + repository: qlik-download/qnatsd + tag: 0.1.1 + pullPolicy: IfNotPresent + pullSecrets: + - name: artifactory-registry-secret + + ## Number of NATS nodes + replicaCount: 1 + + ## NATS statefulset configurations + statefulset: + updateStrategy: RollingUpdate + + ## NATS svc used for client connections + ## ref: https://github.com/nats-io/gnatsd#running + ## + client: + service: + type: ClusterIP + port: 4222 + + ## Kubernetes svc used for clustering + ## ref: https://github.com/nats-io/gnatsd#clustering + ## + cluster: + service: + type: ClusterIP + port: 6222 + + ## NATS svc used for monitoring + ## ref: https://github.com/nats-io/gnatsd#monitoring + ## + monitoring: + service: + type: ClusterIP + port: 8222 + ## Annotations for Promethues + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + + ## Client Authentication + auth: + enabled: true + user: "nats_client" + password: "T0pS3cr3t" + + ## Configuration of users that are authenticated used JWTs + ## Users can be configured with permissions to allow or deny publish/subscribe access to subjects + ## ref: https://nats.io/documentation/managing_the_server/authorization/ + ## + jwtUsers: + - user: "data-engineering-exporter" + - user: "qix-sessions" + - user: "audit" + - user: "users" + - user: "edge-auth" + - user: "tenants" + - user: "identity-providers" + - user: "resource-library" + - user: "engine" + - user: "collections" + + extraArgs: + - --jwt_users_file=/opt/bitnami/nats/users.json + - --jwt_auth_url=http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal + ## for localdev use this configuration instead + # - --jwt_auth_url=http://keys:8080/v1/keys/qlik.api.internal + + ## Cluster Authentication + clusterAuth: + enabled: false + + ## Metrics / Prometheus NATS Exporter + ## + ## ref: https://github.com/nats-io/prometheus-nats-exporter + metrics: + enabled: true + image: + registry: ghcr.io + repository: qlik-download/prometheus-nats-exporter + tag: 0.1.0-16 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-registry-secret + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "false" # prometheus annotations are added on the monitoring service instead + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +## NATS Streaming configuration +## +nats-streaming: + enabled: true + + ## NATS Streaming image + image: + pullSecrets: + - name: artifactory-registry-secret + + ## NATS Streaming replicas + replicaCount: 3 + + ## NATS Streaming statefulset configurations + statefulset: + updateStrategy: RollingUpdate + + ## NATS Streaming extra options for liveness and readiness probes + readinessProbe: + enabled: true + initialDelaySeconds: 30 + + ## NATS Streaming svc used for monitoring + ## + monitoring: + service: + type: ClusterIP + port: 8222 + # Annotations for Promethues + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + + ## NATS Streaming cluster id + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + + ## NATS server + natsSvc: "nats://{{ .Release.Name }}-nats-client:4222" + + ## NATS server client Authentication + auth: + enabled: true + user: nats_client + secretName: "{{ .Release.Name }}-nats-secret" + secretKey: "client-password" + + ## Use for general debugging. Enabling this will negatively affect performance. + debug: true + + persistence: + volume: + ## If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + maxAge: "2h" + + ## Metrics / Prometheus NATS Exporter + ## + ## ref: https://github.com/nats-io/prometheus-nats-exporter + metrics: + enabled: true + image: + registry: ghcr.io + repository: qlik-download/prometheus-nats-exporter + tag: 0.1.0-16 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-registry-secret + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "false" # prometheus annotations are added on the monitoring service instead + ## Metrics exporter flags + args: + - -channelz + + +## NATS and NATS Streaming Network Policy +## +networkPolicy: + ## NATS + nats: + enabled: false + ## NATS Streaminng + nats-streaming: + enabled: false + ## Keys + keys: + ## Set keys release name for egress rules + release: "{{ .Release.Name }}" diff --git a/qliksense/charts/collections/charts/mongodb/.helmignore b/qliksense/charts/collections/charts/mongodb/.helmignore new file mode 100644 index 0000000..6b8710a --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/.helmignore @@ -0,0 +1 @@ +.git diff --git a/qliksense/charts/collections/charts/mongodb/Chart.yaml b/qliksense/charts/collections/charts/mongodb/Chart.yaml new file mode 100644 index 0000000..cc8038a --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 4.0.3 +description: NoSQL document-oriented database that stores JSON-like documents with + dynamic schemas, simplifying the integration of data in content-driven applications. +home: https://mongodb.org +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb +sources: +- https://github.com/bitnami/bitnami-docker-mongodb +version: 4.5.0 diff --git a/qliksense/charts/collections/charts/mongodb/OWNERS b/qliksense/charts/collections/charts/mongodb/OWNERS new file mode 100644 index 0000000..2c3e9fa --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/OWNERS @@ -0,0 +1,12 @@ +approvers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 +reviewers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 diff --git a/qliksense/charts/collections/charts/mongodb/README.md b/qliksense/charts/collections/charts/mongodb/README.md new file mode 100644 index 0000000..1b9d003 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/README.md @@ -0,0 +1,158 @@ +# MongoDB + +[MongoDB](https://www.mongodb.com/) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +## TL;DR; + +```bash +$ helm install stable/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/mongodb +``` + +The command deploys MongoDB on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the MongoDB chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------|----------------------------------------------------------------------------------------------|----------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | MongoDB image registry | `docker.io` | +| `image.repository` | MongoDB Image name | `bitnami/mongodb` | +| `image.tag` | MongoDB Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `usePassword` | Enable password authentication | `true` | +| `existingSecret` | Existing secret with MongoDB credentials | `nil` | +| `mongodbRootPassword` | MongoDB admin password | `random alhpanumeric string (10)` | +| `mongodbUsername` | MongoDB custom user | `nil` | +| `mongodbPassword` | MongoDB custom user password | `random alhpanumeric string (10)` | +| `mongodbDatabase` | Database to create | `nil` | +| `mongodbEnableIPv6` | Switch to enable/disable IPv6 on MongoDB | `true` | +| `mongodbExtraFlags` | MongoDB additional command line flags | [] | +| `service.annotations` | Kubernetes service annotations | `{}` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.nodePort` | Port to bind to for NodePort service type | `nil` | +| `port` | MongoDB service port | `27017` | +| `replicaSet.enabled` | Switch to enable/disable replica set configuration | `false` | +| `replicaSet.name` | Name of the replica set | `rs0` | +| `replicaSet.useHostnames` | Enable DNS hostnames in the replica set config | `true` | +| `replicaSet.key` | Key used for authentication in the replica set | `nil` | +| `replicaSet.replicas.secondary` | Number of secondary nodes in the replica set | `1` | +| `replicaSet.replicas.arbiter` | Number of arbiter nodes in the replica set | `1` | +| `replicaSet.pdb.minAvailable.primary` | PDB for the MongoDB Primary nodes | `1` | +| `replicaSet.pdb.minAvailable.secondary` | PDB for the MongoDB Secondary nodes | `1` | +| `replicaSet.pdb.minAvailable.arbiter` | PDB for the MongoDB Arbiter nodes | `1` | +| `podAnnotations` | Annotations to be added to pods | {} | +| `resources` | Pod resources | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | {} | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.annotations` | Persistent Volume annotations | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (avoids creating one if this is given) | `nil` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `configmap` | MongoDB configuration file to be used | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set mongodbRootPassword=secretpassword,mongodbUsername=my-user,mongodbPassword=my-password,mongodbDatabase=my-database \ + stable/mongodb +``` + +The above command sets the MongoDB `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/mongodb +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Replication + +You can start the MongoDB chart in replica set mode with the following command: + +```bash +$ helm install --name my-release stable/mongodb --set replication.enabled=true +``` +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available MongoDB deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/mongodb/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/mongodb +``` + +To horizontally scale this chart, run the following command to scale the number of secondary nodes in your MongoDB replica set. + +```console +$ kubectl scale statefulset my-release-mongodb-secondary --replicas=3 +``` + +Some characteristics of this chart are: + +* Each of the participants in the replication has a fixed stateful set so you always know where to find the primary, secondary or arbiter nodes. +* The number of secondary and arbiter nodes can be scaled out independently. +* Easy to move an application from using a standalone MongoDB server to use a replica set. + +## Initialize a fresh instance + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +The allowed extensions are `.sh`, and `.js`. + +## Persistence + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. diff --git a/qliksense/charts/collections/charts/mongodb/files/docker-entrypoint-initdb.d/README.md b/qliksense/charts/collections/charts/mongodb/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..a929990 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom .sh, or .js file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-mongodb](https://github.com/bitnami/bitnami-docker-mongodb#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/qliksense/charts/collections/charts/mongodb/templates/NOTES.txt b/qliksense/charts/collections/charts/mongodb/templates/NOTES.txt new file mode 100644 index 0000000..af81001 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/NOTES.txt @@ -0,0 +1,66 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.mongodbRootPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer" and not specifying "mongodbRootPassword" + you have most likely exposed the MongoDB service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "mongodbRootPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB can be accessed via port 27017 on the following DNS name from within your cluster: + + {{ template "mongodb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.usePassword -}} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- end }} +{{- if and .Values.mongodbUsername .Values.mongodbDatabase }} +{{- if .Values.mongodbPassword }} + +To get the password for "{{ .Values.mongodbUsername }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} +{{- end }} + +To connect to your database run the following command: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --image bitnami/mongodb --command -- mongo admin --host {{ template "mongodb.fullname" . }} {{- if .Values.usePassword }} -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.fullname" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "mongodb.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + mongo --host $SERVICE_IP --port {{ .Values.service.nodePort }} {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "mongodb.fullname" . }} 27017:27017 & + mongo --host 127.0.0.1 {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} diff --git a/qliksense/charts/collections/charts/mongodb/templates/_helpers.tpl b/qliksense/charts/collections/charts/mongodb/templates/_helpers.tpl new file mode 100644 index 0000000..855dc29 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "mongodb.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name for the admin secret. +*/}} +{{- define "mongodb.adminSecret" -}} + {{- if .Values.auth.existingAdminSecret -}} + {{- .Values.auth.existingAdminSecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-admin + {{- end -}} +{{- end -}} + +{{/* +Create the name for the key secret. +*/}} +{{- define "mongodb.keySecret" -}} + {{- if .Values.auth.existingKeySecret -}} + {{- .Values.auth.existingKeySecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-keyfile + {{- end -}} +{{- end -}} + +{{/* +Return the proper MongoDB image name +*/}} +{{- define "mongodb.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/collections/charts/mongodb/templates/configmap.yaml b/qliksense/charts/collections/charts/mongodb/templates/configmap.yaml new file mode 100644 index 0000000..66dc853 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +data: + mongodb.conf: |- +{{ toYaml .Values.configmap | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/collections/charts/mongodb/templates/deployment-standalone.yaml b/qliksense/charts/collections/charts/mongodb/templates/deployment-standalone.yaml new file mode 100644 index 0000000..d8ff01b --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/deployment-standalone.yaml @@ -0,0 +1,143 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + chart: {{ template "mongodb.chart" . }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.fullname" . }} + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + {{- if .Values.usePassword }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + {{- end }} + - name: MONGODB_USERNAME + value: {{ default "" .Values.mongodbUsername | quote }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_DATABASE + value: {{ default "" .Values.mongodbDatabase | quote }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + ports: + - name: mongodb + containerPort: 27017 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + + {{- else }} + emptyDir: {} + {{- end -}} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- end -}} diff --git a/qliksense/charts/collections/charts/mongodb/templates/headless-svc-rs.yaml b/qliksense/charts/collections/charts/mongodb/templates/headless-svc-rs.yaml new file mode 100644 index 0000000..29fcf34 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/headless-svc-rs.yaml @@ -0,0 +1,24 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }}-headless + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: {{ .Values.service.port }} + selector: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/collections/charts/mongodb/templates/initialization-configmap.yaml b/qliksense/charts/collections/charts/mongodb/templates/initialization-configmap.yaml new file mode 100644 index 0000000..840e77c --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/initialization-configmap.yaml @@ -0,0 +1,13 @@ +{{ if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mongodb.fullname" . }}-init-scripts + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]").AsConfig | indent 2 }} +{{ end }} \ No newline at end of file diff --git a/qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml b/qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml new file mode 100644 index 0000000..eb7f14a --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: arbiter + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.arbiter }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml b/qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml new file mode 100644 index 0000000..6434e3f --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: primary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.primary }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml b/qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml new file mode 100644 index 0000000..03f317d --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: secondary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.secondary }} +{{- end }} diff --git a/qliksense/charts/collections/charts/mongodb/templates/pvc-standalone.yaml b/qliksense/charts/collections/charts/mongodb/templates/pvc-standalone.yaml new file mode 100644 index 0000000..8182ce7 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/pvc-standalone.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.replicaSet.enabled) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/collections/charts/mongodb/templates/secrets.yaml b/qliksense/charts/collections/charts/mongodb/templates/secrets.yaml new file mode 100644 index 0000000..ecbf1eb --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/secrets.yaml @@ -0,0 +1,34 @@ +{{ if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{- if .Values.usePassword }} + {{- if .Values.mongodbRootPassword }} + mongodb-root-password: {{ .Values.mongodbRootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + {{- if .Values.mongodbPassword }} + mongodb-password: {{ .Values.mongodbPassword | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if .Values.replicaSet.enabled }} + {{- if .Values.replicaSet.key }} + mongodb-replica-set-key: {{ .Values.replicaSet.key | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/collections/charts/mongodb/templates/statefulset-arbiter-rs.yaml b/qliksense/charts/collections/charts/mongodb/templates/statefulset-arbiter-rs.yaml new file mode 100644 index 0000000..4ed30a1 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/statefulset-arbiter-rs.yaml @@ -0,0 +1,121 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: arbiter + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.arbiter }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: arbiter + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-arbiter + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/collections/charts/mongodb/templates/statefulset-primary-rs.yaml b/qliksense/charts/collections/charts/mongodb/templates/statefulset-primary-rs.yaml new file mode 100644 index 0000000..8dcb004 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/statefulset-primary-rs.yaml @@ -0,0 +1,174 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: 1 + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-primary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "primary" + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + - name: MONGODB_USERNAME + value: {{ .Values.mongodbUsername | quote }} + - name: MONGODB_DATABASE + value: {{ .Values.mongodbDatabase | quote }} + {{- if .Values.usePassword }} + {{- if .Values.mongodbPassword }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/collections/charts/mongodb/templates/statefulset-secondary-rs.yaml b/qliksense/charts/collections/charts/mongodb/templates/statefulset-secondary-rs.yaml new file mode 100644 index 0000000..d4c4a97 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/statefulset-secondary-rs.yaml @@ -0,0 +1,157 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: secondary + podManagementPolicy: "Parallel" + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.secondary }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: secondary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-secondary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "secondary" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/collections/charts/mongodb/templates/svc-primary-rs.yaml b/qliksense/charts/collections/charts/mongodb/templates/svc-primary-rs.yaml new file mode 100644 index 0000000..fd440c8 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/svc-primary-rs.yaml @@ -0,0 +1,28 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + component: primary +{{- end }} diff --git a/qliksense/charts/collections/charts/mongodb/templates/svc-standalone.yaml b/qliksense/charts/collections/charts/mongodb/templates/svc-standalone.yaml new file mode 100644 index 0000000..4ca9443 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/templates/svc-standalone.yaml @@ -0,0 +1,27 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" +{{- end }} diff --git a/qliksense/charts/collections/charts/mongodb/values-production.yaml b/qliksense/charts/collections/charts/mongodb/values-production.yaml new file mode 100644 index 0000000..9070f3b --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/values-production.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: true + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# replication: +# replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/collections/charts/mongodb/values.yaml b/qliksense/charts/collections/charts/mongodb/values.yaml new file mode 100644 index 0000000..4b090d4 --- /dev/null +++ b/qliksense/charts/collections/charts/mongodb/values.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# #replication: +# # replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/collections/requirements.yaml b/qliksense/charts/collections/requirements.yaml new file mode 100644 index 0000000..7b4c8b3 --- /dev/null +++ b/qliksense/charts/collections/requirements.yaml @@ -0,0 +1,9 @@ +dependencies: + - name: mongodb + version: 4.5.0 + repository: "@stable" + condition: mongodb.enabled + - name: messaging + version: 0.7.13 + repository: "@qlik" + condition: messaging.enabled diff --git a/qliksense/charts/collections/templates/_helper.tpl b/qliksense/charts/collections/templates/_helper.tpl new file mode 100644 index 0000000..f5ec076 --- /dev/null +++ b/qliksense/charts/collections/templates/_helper.tpl @@ -0,0 +1,53 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "collections.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "collections.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper collections image name */}} +{{- define "collections.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} + +{{- define "ingressClass" -}} + {{- $ingressClass := .Values.ingress.class -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} diff --git a/qliksense/charts/collections/templates/deployment.yaml b/qliksense/charts/collections/templates/deployment.yaml new file mode 100644 index 0000000..996bf69 --- /dev/null +++ b/qliksense/charts/collections/templates/deployment.yaml @@ -0,0 +1,131 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "collections.fullname" . }} + labels: + app: {{ template "collections.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "collections.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.config.messaging.enabled }} + {{ tpl ( .Values.config.messaging.podLabel.key ) . }}: {{ .Values.config.messaging.podLabel.value | quote }} + {{- end }} + spec: + containers: + - name: {{ template "collections.name" . }} + image: {{ template "collections.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- end }} + env: + - name: TERMINATION_GRACE_PERIOD_SECONDS + value: {{ .Values.terminationGracePeriodSeconds | quote }} + - name: MONGO_URI + valueFrom: + secretKeyRef: + {{ if .Values.mongodb.uriSecretName -}} + name: {{ .Values.mongodb.uriSecretName }} + {{ else -}} + name: {{ .Release.Name }}-mongoconfig + {{ end -}} + key: mongodb-uri + - name: PDS_URI + value: {{ default (printf "http://%s-policy-decisions:5080" .Release.Name ) .Values.config.pdsURI | quote }} + - name: ACCESS_CONTROL_ENABLED + value: {{ .Values.config.accessControl.enabled | quote }} + - name: AUTH_ENABLED + value: {{ .Values.config.auth.enabled | quote }} + - name: AUTH_JWKS_URI + value: {{ default (printf "http://%s-keys:8080/v1/keys/qlik.api.internal" .Release.Name ) .Values.config.auth.jwksURI | quote }} + - name: LEGACY_ROUTER_ENABLED + value: {{ .Values.config.legacyRouter.enabled | quote }} + {{- if .Values.config.auth.jwtAud }} + - name: AUTH_JWT_AUD + value: {{ .Values.config.auth.jwtAud | quote }} + {{- end -}} + {{- if .Values.config.auth.jwtIss }} + - name: AUTH_JWT_ISS + value: {{ .Values.config.auth.jwtIss | quote }} + {{- end }} + - name: ROLLBAR_ENABLED + value: {{ .Values.rollbar.enabled | quote }} + {{- if .Values.rollbar.enabled }} + - name: ROLLBAR_TOKEN + value: {{ required "A valid .Values.rollbar.token entry required!" (.Values.rollbar.token | quote) }} + {{- end }} + {{- if .Values.config.logLevel }} + - name: LOG_LEVEL + value: {{ .Values.config.logLevel | quote }} + {{- end }} + {{- if .Values.config.messaging.enabled }} + - name: MESSAGING_ENABLED + value: {{ .Values.config.messaging.enabled | quote }} + - name: NATS_ADDR + value: {{ tpl ( .Values.config.messaging.nats.addr ) . | quote }} + - name: NATS_CONNECT_WAIT_SECONDS + value: {{ .Values.config.messaging.nats.connectWaitSeconds | quote }} + - name: NATS_STREAMING_CLUSTER_ID + value: {{ tpl ( .Values.config.messaging.stan.clusterID ) . | quote }} + - name: NATS_STREAMING_CHANNEL + value: {{ .Values.config.messaging.stan.channel | quote }} + {{- end }} + {{- if and (.Values.config.messaging.enabled) (.Values.config.messaging.nats.tokenAuth.enabled) }} + - name: NATS_TOKEN_AUTH_ENABLED + value: {{ .Values.config.messaging.nats.tokenAuth.enabled | quote }} + - name: NATS_TOKEN_AUTH_PRIVATE_KEY + valueFrom: + secretKeyRef: + name: {{ template "collections.fullname" . }}-tokenconfig + key: token-privateKey + - name: NATS_TOKEN_AUTH_KID + valueFrom: + secretKeyRef: + name: {{ template "collections.fullname" . }}-tokenconfig + key: token-kid + - name: NATS_TOKEN_AUTH_URL + value: {{ tpl ( .Values.config.messaging.nats.tokenAuth.url ) . | quote }} + - name: ACCESS_CONTROL_QUERY_TIMEOUT + value: {{ .Values.config.accessControl.queryTimeout | quote }} + - name: ACCESS_CONTROL_EVALUATE_TIMEOUT + value: {{ .Values.config.accessControl.evaluateTimeout | quote }} + {{- end }} + - name: ENV + value: {{ .Values.config.env | quote }} +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} + volumeMounts: +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} + readinessProbe: + httpGet: + path: /ready + port: {{ .Values.service.port }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} + volumes: +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + selector: + matchLabels: + app: {{ template "collections.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/collections/templates/hpa.yml b/qliksense/charts/collections/templates/hpa.yml new file mode 100644 index 0000000..0c30a6c --- /dev/null +++ b/qliksense/charts/collections/templates/hpa.yml @@ -0,0 +1,26 @@ +{{- if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "collections.fullname" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "collections.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: +{{- if .Values.hpa.targetAverageUtilizationCpuEnabled }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationCpu }} +{{- end }} +{{- if .Values.hpa.targetAverageUtilizationMemoryEnabled }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationMemory }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/collections/templates/ingress.yaml b/qliksense/charts/collections/templates/ingress.yaml new file mode 100644 index 0000000..4c46f42 --- /dev/null +++ b/qliksense/charts/collections/templates/ingress.yaml @@ -0,0 +1,31 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "collections.fullname" . }} + labels: + app: {{ template "collections.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + kubernetes.io/ingress.class: {{ template "ingressClass" . }} + nginx.ingress.kubernetes.io/auth-url: {{ default (printf "http://%s-edge-auth.%s.svc.cluster.local:8080/v1/auth" .Release.Name .Release.Namespace ) .Values.ingress.authURL | quote }} + nginx.ingress.kubernetes.io/auth-response-headers: Authorization + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite (?i)/api/v1/(.*) /v1/$1 break; +{{- with .Values.ingress.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + rules: + - http: + paths: + - path: /api/v1/collections + backend: + serviceName: {{ template "collections.fullname" . }} + servicePort: {{ .Values.service.port }} + - path: /api/v1/items + backend: + serviceName: {{ template "collections.fullname" . }} + servicePort: {{ .Values.service.port }} diff --git a/qliksense/charts/collections/templates/mongo-secret.yaml b/qliksense/charts/collections/templates/mongo-secret.yaml new file mode 100644 index 0000000..09cde28 --- /dev/null +++ b/qliksense/charts/collections/templates/mongo-secret.yaml @@ -0,0 +1,13 @@ +{{- if or .Values.mongodb.uri .Values.mongodb.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-mongoconfig +type: Opaque +data: +{{- if .Values.mongodb.uri}} + mongodb-uri: {{ .Values.mongodb.uri | b64enc }} +{{- else if .Values.mongodb.enabled}} + mongodb-uri: {{ print "mongodb://" .Release.Name "-mongodb:27017" | b64enc }} +{{- end}} +{{- end}} diff --git a/qliksense/charts/collections/templates/service.yaml b/qliksense/charts/collections/templates/service.yaml new file mode 100644 index 0000000..e8328b7 --- /dev/null +++ b/qliksense/charts/collections/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "collections.fullname" . }} + labels: + app: {{ template "collections.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: {{ template "collections.name" . }} + selector: + app: {{ template "collections.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/collections/templates/token-secret.yaml b/qliksense/charts/collections/templates/token-secret.yaml new file mode 100644 index 0000000..33c7e44 --- /dev/null +++ b/qliksense/charts/collections/templates/token-secret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "collections.fullname" . }}-tokenconfig + labels: + app: {{ template "collections.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + token-privateKey: {{ .Values.config.messaging.nats.tokenAuth.privateKey | b64enc }} + token-kid: {{ .Values.config.messaging.nats.tokenAuth.kid | b64enc }} diff --git a/qliksense/charts/collections/values.yaml b/qliksense/charts/collections/values.yaml new file mode 100644 index 0000000..e489a66 --- /dev/null +++ b/qliksense/charts/collections/values.yaml @@ -0,0 +1,204 @@ +## Default values for Collections Helm Chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## Override the name of the Chart. +## +# nameOverride: + +## Collections service configuration +config: + ## endpoint to reach the policy-decisions - override the default of http://{.Release.Name}-policy-decisions:5080 + # pdsURI: + ## Access Control / rules enforcement setup + accessControl: + ## when enabled, rules are enforced + enabled: true + queryTimeout: 30 + evaluateTimeout: 30 + ## Log level (debug|info|warn|error) + # logLevel: debug + + ## Authentication configurations. + ## + auth: + # toggle JWT validation using retrieved keys from the configured JWKS endpoint + enabled: true + # endpoint to retrieve the JWKS - override the default of http://{.Release.Name}-keys:8080/v1/keys/qlik.api.internal + # jwksURI: + # expected `audience` value within the JWT claims + jwtAud: qlik.api.internal + # expected `issuer` value within the JWT claims + jwtIss: qlik.api.internal + + ## Messaging + ## + messaging: + ## True to use NATS and NATS Streaming. + enabled: true + + ## Pod label required to allow communication with NATS + podLabel: + key: "{{ .Release.Name }}-nats-client" + value: true + + ## NATS configuration + ## + nats: + ## Address of NATS server + addr: "nats://{{ .Release.Name }}-nats-client:4222" + ## For localdev use this configuration instead + # addr: "nats://messaging-nats-client:4222" + + ## connectWaitSeconds sets the time to backoff after attempting to connect + ## to a server. + ## connectWaitSeconds is used when creating the initial connection to a server. + ## connectWaitSeconds is not used when reconnecting to a server after the connection was lost. + # connectWaitSeconds: 30 + + ## NATS token authentication configuration + tokenAuth: + enabled: true + privateKey: | + -----BEGIN EC PRIVATE KEY----- + MIGkAgEBBDBKPV0yFCreja79W1krQslS0VOWUU/4aH8LneCDzF/oSypFfTtgLJ5Q + 5sYYVx7sjwKgBwYFK4EEACKhZANiAASCHasQzU1WjrXXKpJjYcmRpnNyXKtS5gWG + xFQFMJdISUqKPjwT1XoAvG7LxqkqL4j6xTlj0HGgihV0DvUsduy3TTi1D+2FgZfJ + i1iYZrFKvVd7X4mnjv84sPZp86HAgck= + -----END EC PRIVATE KEY----- + kid: 2gMxQ_Xn45K4P_UZK8QcQT72l1R9-zwQGnNTiDvx8VI + url: "http://{{ .Release.Name }}-edge-auth:8080/v1/internal-tokens" + ## For localdev use this configuration instead + # url: "http://edge-auth:8080/v1/internal-tokens" + + ## NATS Streaming configuration + ## + stan: + ## NATS Streaming cluster ID + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + ## For localdev use this configuration instead + # clusterID: "messaging-nats-streaming-cluster" + + ## System events channel to subscribe to + channel: system-events.engine.app,system-events.generic-links + + ## Environment for Rollbar + # env: localdev + + # Directive to use the legacy router implementation i.e. Goa.mux + legacyRouter: + enabled: false + + +image: + ## Default registry where this repository should be pulled from. Will be overridden by `global.imageRegistry` if set + registry: ghcr.io + ## Collections image. + repository: qlik-download/collections + ## Collections image version. + ## ref: https://hub.docker.com/r/qlik/collections/tags/ + tag: 1.6.6 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Number of replicas. +## +replicaCount: 1 + +## Number of seconds to wait during pod termination after sending SIGTERM until SIGKILL. +## +terminationGracePeriodSeconds: 30 + +## deployment resources +resources: {} + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 8080 + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + + ## class provides an kubernetes.io/ingress.class override of default nginx + class: "nginx" + + ## authURL override of default http://{.Release.Name}.{.Release.Namespace}.svc.cluster.local:8080/v1/auth + # authURL: + + ## Annotations to be added to the ingress. + ## + annotations: [] + +## Metrics configuration +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + +## MongoDB configuration +mongodb: + image: + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## This value overrides the mongo image tag in chart v.4.5.0 (tag: 4.0.3-debian-9) + tag: 3.6.12 + ## Enables a local mongo chart + enabled: true + ## disable password for local dev mode + usePassword: false + + ## Specify a custom mongo uri. Not needed when the local mongo is enabled. + ## Secret: {{ Release.Name }}-mongoconfig.mongo-uri + # uri: + + ## name of secret to mount for mongo URI. The secret must have the `mongodb-uri` key + # uriSecretName: + +## Rollbar configuration +rollbar: + ## Enables a rollbar error tracking + enabled: false + # token: + +## Messaging chart configuration +messaging: + ## Set messaging.enabled to true for localdev and CI builds + enabled: false + nats: + enabled: true + replicaCount: 1 + auth: + enabled: false + clusterAuth: + enabled: false + nats-streaming: + enabled: false + replicaCount: 3 + auth: + enabled: false + +## Horizontal pod autoscaler +hpa: + ## Toggle horizontal pod autoscaler + enabled: false + ## Minimum number of replicas + minReplicas: 3 + ## Maximum number of replicas + maxReplicas: 6 + ## See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details + targetAverageUtilizationCpu: 80 + targetAverageUtilizationMemory: 80 diff --git a/qliksense/charts/data-connections/.helmignore b/qliksense/charts/data-connections/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/data-connections/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/data-connections/Chart.yaml b/qliksense/charts/data-connections/Chart.yaml new file mode 100644 index 0000000..2cd5596 --- /dev/null +++ b/qliksense/charts/data-connections/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +appVersion: 0.5.0 +description: Helm chart for data-connections, a service managing data connections +home: https://www.qlik.com +name: data-connections +sources: +- https://github.com/qlik-trial/data-connections +version: 1.7.4 diff --git a/qliksense/charts/data-connections/README.md b/qliksense/charts/data-connections/README.md new file mode 100644 index 0000000..1b45ca5 --- /dev/null +++ b/qliksense/charts/data-connections/README.md @@ -0,0 +1,85 @@ +# data-connections + +[data-connections](https://github.com/qlik-trial/data-connections) is responsible for managing data connections and data credentials. + +## Introduction + +This chart bootstraps a data-connections deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qliktech-docker-snapshot.jfrog.io/data-connections +``` + +The command deploys data-connections on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the data-connections chart and their default values. + +| Parameter | Description | Default | +| ----------------------------------- | ----------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil` | +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io` | +| `image.repository` | Docker image name with no registry | `data-connections` | +| `image.tag` | image version | `0.5.0` | +| `image.pullPolicy` | image pull policy | `Always` if `image.tag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | a list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `replicaCount` | number of data-connections replicas | `1` | +| `deployment.annotations` | deployment annotations | `{}` | +| `service.type` | service type | `ClusterIP` | +| `service.port` | data-connections listen port | `9011` | +| `ingress.class` | the `kubernetes.io/ingress.class` to use | `nginx` | +| `ingress.authURL` | The URL to use for nginx's `auth-url` configuration to authenticate `/api` requests | `http://{.Release.Name}-edge-auth.{.Release.Namespace}.svc.cluster.local:8080/v1/auth` | +| `metrics.prometheus.enabled` | enable annotations for prometheus scraping | `true` | +| `mongodb.enabled` | enable Mongodb as a chart dependency | `true` | +| `mongodb.uri` | If the mongodb chart dependency isn't used, specify the URI path to mongo | | +| `mongodb.uriSecretName` | name of secret to mount for mongo URI. The secret must have the `mongodb-uri` key | `{release.Name}-mongoconfig` | +| `nats.enabled` | Enable NATS streaming | `true` | +| `nats.addr` | NATS server address | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats.clusterID` | NATS Streaming cluster ID | `{{ .Release.Name }}-nats-streaming-cluster` | +| `nats.podLabel.key` | pod label key required to allow communication with NATS | `{{ .Release.Name }}-nats-client` | +| `nats.podLabel.value` | pod label value required to allow communication with NATS | `true` | +| `nfsconnector.uri` | NFS connector URI | `{{ .Release.Name }}-data-connector-nfs:50051` | +| `env.loglevel` | Log level | `info` | +| `env.enableJwtAuth` | Enable JWT authentication | `true` | +| `env.enableConnIndexBuild` | Enable indexes build | `false` | +| `env.enableIndexBuild` | Enable indexes build for collections | `false` for all collections | +| `env.bypassQuery` | Bypass query filtering | `` | +| `dcRedis.enabled` | Enable to use redis | `false` | +| `dcRedis.configName` | Redis secret configuration name (created by Dcaas) | | +| `dcRedis.encryptionKey` | Encryption key used to encrypt data credentials when saving to Redis | | +| `serviceJwt.privateKeyPath` | Private key mount path | `/mnt/key` | +| `serviceJwt.jwtPrivateKey` | Private key | See value file | +| `serviceJwt.keyId` | key ID | See value file | +| `serviceJwt.authUri` | URI of service used to sign internal tokens | See value file | +| `jwks.uri` | URI where the JWKS to validate JWTs is located | `http://keys:8080/v1/keys/qlik.api.internal` | +| `elasticEncryption.enabled` | Enable encryption service | `false` | +| `elasticEncryption.uri` | URI of encryption service | `http://encryption:8080` | +| `datafiles.uri` | URI of the qix-datafiles service | `http://{.Release.Name}-qix-datafiles:8080` | +| `spaces.uri` | URI of Spaces service | `http://{.Release.Name}-spaces:6080` | +| `features.uri` | URI of feature-flags service | `http://{.Release.Name}-feature-flags:8080` | +| `pds.uri` | URI of policy-decisions service | `http://{{.Release.Name}}-policy-decisions:5080` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install --name my-release -f values.yaml qliktech-docker-snapshot.jfrog.io/data-connections +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/data-connections/charts/mongodb/.helmignore b/qliksense/charts/data-connections/charts/mongodb/.helmignore new file mode 100644 index 0000000..6b8710a --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/.helmignore @@ -0,0 +1 @@ +.git diff --git a/qliksense/charts/data-connections/charts/mongodb/Chart.yaml b/qliksense/charts/data-connections/charts/mongodb/Chart.yaml new file mode 100644 index 0000000..cc8038a --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 4.0.3 +description: NoSQL document-oriented database that stores JSON-like documents with + dynamic schemas, simplifying the integration of data in content-driven applications. +home: https://mongodb.org +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb +sources: +- https://github.com/bitnami/bitnami-docker-mongodb +version: 4.5.0 diff --git a/qliksense/charts/data-connections/charts/mongodb/OWNERS b/qliksense/charts/data-connections/charts/mongodb/OWNERS new file mode 100644 index 0000000..2c3e9fa --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/OWNERS @@ -0,0 +1,12 @@ +approvers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 +reviewers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 diff --git a/qliksense/charts/data-connections/charts/mongodb/README.md b/qliksense/charts/data-connections/charts/mongodb/README.md new file mode 100644 index 0000000..1b9d003 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/README.md @@ -0,0 +1,158 @@ +# MongoDB + +[MongoDB](https://www.mongodb.com/) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +## TL;DR; + +```bash +$ helm install stable/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/mongodb +``` + +The command deploys MongoDB on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the MongoDB chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------|----------------------------------------------------------------------------------------------|----------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | MongoDB image registry | `docker.io` | +| `image.repository` | MongoDB Image name | `bitnami/mongodb` | +| `image.tag` | MongoDB Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `usePassword` | Enable password authentication | `true` | +| `existingSecret` | Existing secret with MongoDB credentials | `nil` | +| `mongodbRootPassword` | MongoDB admin password | `random alhpanumeric string (10)` | +| `mongodbUsername` | MongoDB custom user | `nil` | +| `mongodbPassword` | MongoDB custom user password | `random alhpanumeric string (10)` | +| `mongodbDatabase` | Database to create | `nil` | +| `mongodbEnableIPv6` | Switch to enable/disable IPv6 on MongoDB | `true` | +| `mongodbExtraFlags` | MongoDB additional command line flags | [] | +| `service.annotations` | Kubernetes service annotations | `{}` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.nodePort` | Port to bind to for NodePort service type | `nil` | +| `port` | MongoDB service port | `27017` | +| `replicaSet.enabled` | Switch to enable/disable replica set configuration | `false` | +| `replicaSet.name` | Name of the replica set | `rs0` | +| `replicaSet.useHostnames` | Enable DNS hostnames in the replica set config | `true` | +| `replicaSet.key` | Key used for authentication in the replica set | `nil` | +| `replicaSet.replicas.secondary` | Number of secondary nodes in the replica set | `1` | +| `replicaSet.replicas.arbiter` | Number of arbiter nodes in the replica set | `1` | +| `replicaSet.pdb.minAvailable.primary` | PDB for the MongoDB Primary nodes | `1` | +| `replicaSet.pdb.minAvailable.secondary` | PDB for the MongoDB Secondary nodes | `1` | +| `replicaSet.pdb.minAvailable.arbiter` | PDB for the MongoDB Arbiter nodes | `1` | +| `podAnnotations` | Annotations to be added to pods | {} | +| `resources` | Pod resources | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | {} | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.annotations` | Persistent Volume annotations | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (avoids creating one if this is given) | `nil` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `configmap` | MongoDB configuration file to be used | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set mongodbRootPassword=secretpassword,mongodbUsername=my-user,mongodbPassword=my-password,mongodbDatabase=my-database \ + stable/mongodb +``` + +The above command sets the MongoDB `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/mongodb +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Replication + +You can start the MongoDB chart in replica set mode with the following command: + +```bash +$ helm install --name my-release stable/mongodb --set replication.enabled=true +``` +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available MongoDB deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/mongodb/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/mongodb +``` + +To horizontally scale this chart, run the following command to scale the number of secondary nodes in your MongoDB replica set. + +```console +$ kubectl scale statefulset my-release-mongodb-secondary --replicas=3 +``` + +Some characteristics of this chart are: + +* Each of the participants in the replication has a fixed stateful set so you always know where to find the primary, secondary or arbiter nodes. +* The number of secondary and arbiter nodes can be scaled out independently. +* Easy to move an application from using a standalone MongoDB server to use a replica set. + +## Initialize a fresh instance + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +The allowed extensions are `.sh`, and `.js`. + +## Persistence + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. diff --git a/qliksense/charts/data-connections/charts/mongodb/files/docker-entrypoint-initdb.d/README.md b/qliksense/charts/data-connections/charts/mongodb/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..a929990 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom .sh, or .js file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-mongodb](https://github.com/bitnami/bitnami-docker-mongodb#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/NOTES.txt b/qliksense/charts/data-connections/charts/mongodb/templates/NOTES.txt new file mode 100644 index 0000000..af81001 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/NOTES.txt @@ -0,0 +1,66 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.mongodbRootPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer" and not specifying "mongodbRootPassword" + you have most likely exposed the MongoDB service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "mongodbRootPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB can be accessed via port 27017 on the following DNS name from within your cluster: + + {{ template "mongodb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.usePassword -}} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- end }} +{{- if and .Values.mongodbUsername .Values.mongodbDatabase }} +{{- if .Values.mongodbPassword }} + +To get the password for "{{ .Values.mongodbUsername }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} +{{- end }} + +To connect to your database run the following command: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --image bitnami/mongodb --command -- mongo admin --host {{ template "mongodb.fullname" . }} {{- if .Values.usePassword }} -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.fullname" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "mongodb.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + mongo --host $SERVICE_IP --port {{ .Values.service.nodePort }} {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "mongodb.fullname" . }} 27017:27017 & + mongo --host 127.0.0.1 {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/_helpers.tpl b/qliksense/charts/data-connections/charts/mongodb/templates/_helpers.tpl new file mode 100644 index 0000000..855dc29 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "mongodb.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name for the admin secret. +*/}} +{{- define "mongodb.adminSecret" -}} + {{- if .Values.auth.existingAdminSecret -}} + {{- .Values.auth.existingAdminSecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-admin + {{- end -}} +{{- end -}} + +{{/* +Create the name for the key secret. +*/}} +{{- define "mongodb.keySecret" -}} + {{- if .Values.auth.existingKeySecret -}} + {{- .Values.auth.existingKeySecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-keyfile + {{- end -}} +{{- end -}} + +{{/* +Return the proper MongoDB image name +*/}} +{{- define "mongodb.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/configmap.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/configmap.yaml new file mode 100644 index 0000000..66dc853 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +data: + mongodb.conf: |- +{{ toYaml .Values.configmap | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/deployment-standalone.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/deployment-standalone.yaml new file mode 100644 index 0000000..d8ff01b --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/deployment-standalone.yaml @@ -0,0 +1,143 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + chart: {{ template "mongodb.chart" . }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.fullname" . }} + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + {{- if .Values.usePassword }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + {{- end }} + - name: MONGODB_USERNAME + value: {{ default "" .Values.mongodbUsername | quote }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_DATABASE + value: {{ default "" .Values.mongodbDatabase | quote }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + ports: + - name: mongodb + containerPort: 27017 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + + {{- else }} + emptyDir: {} + {{- end -}} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- end -}} diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/headless-svc-rs.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/headless-svc-rs.yaml new file mode 100644 index 0000000..29fcf34 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/headless-svc-rs.yaml @@ -0,0 +1,24 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }}-headless + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: {{ .Values.service.port }} + selector: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/initialization-configmap.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/initialization-configmap.yaml new file mode 100644 index 0000000..840e77c --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/initialization-configmap.yaml @@ -0,0 +1,13 @@ +{{ if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mongodb.fullname" . }}-init-scripts + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]").AsConfig | indent 2 }} +{{ end }} \ No newline at end of file diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml new file mode 100644 index 0000000..eb7f14a --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: arbiter + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.arbiter }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml new file mode 100644 index 0000000..6434e3f --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: primary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.primary }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml new file mode 100644 index 0000000..03f317d --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: secondary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.secondary }} +{{- end }} diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/pvc-standalone.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/pvc-standalone.yaml new file mode 100644 index 0000000..8182ce7 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/pvc-standalone.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.replicaSet.enabled) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/secrets.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/secrets.yaml new file mode 100644 index 0000000..ecbf1eb --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/secrets.yaml @@ -0,0 +1,34 @@ +{{ if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{- if .Values.usePassword }} + {{- if .Values.mongodbRootPassword }} + mongodb-root-password: {{ .Values.mongodbRootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + {{- if .Values.mongodbPassword }} + mongodb-password: {{ .Values.mongodbPassword | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if .Values.replicaSet.enabled }} + {{- if .Values.replicaSet.key }} + mongodb-replica-set-key: {{ .Values.replicaSet.key | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/statefulset-arbiter-rs.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/statefulset-arbiter-rs.yaml new file mode 100644 index 0000000..4ed30a1 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/statefulset-arbiter-rs.yaml @@ -0,0 +1,121 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: arbiter + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.arbiter }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: arbiter + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-arbiter + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/statefulset-primary-rs.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/statefulset-primary-rs.yaml new file mode 100644 index 0000000..8dcb004 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/statefulset-primary-rs.yaml @@ -0,0 +1,174 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: 1 + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-primary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "primary" + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + - name: MONGODB_USERNAME + value: {{ .Values.mongodbUsername | quote }} + - name: MONGODB_DATABASE + value: {{ .Values.mongodbDatabase | quote }} + {{- if .Values.usePassword }} + {{- if .Values.mongodbPassword }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/statefulset-secondary-rs.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/statefulset-secondary-rs.yaml new file mode 100644 index 0000000..d4c4a97 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/statefulset-secondary-rs.yaml @@ -0,0 +1,157 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: secondary + podManagementPolicy: "Parallel" + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.secondary }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: secondary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-secondary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "secondary" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/svc-primary-rs.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/svc-primary-rs.yaml new file mode 100644 index 0000000..fd440c8 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/svc-primary-rs.yaml @@ -0,0 +1,28 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + component: primary +{{- end }} diff --git a/qliksense/charts/data-connections/charts/mongodb/templates/svc-standalone.yaml b/qliksense/charts/data-connections/charts/mongodb/templates/svc-standalone.yaml new file mode 100644 index 0000000..4ca9443 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/templates/svc-standalone.yaml @@ -0,0 +1,27 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" +{{- end }} diff --git a/qliksense/charts/data-connections/charts/mongodb/values-production.yaml b/qliksense/charts/data-connections/charts/mongodb/values-production.yaml new file mode 100644 index 0000000..9070f3b --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/values-production.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: true + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# replication: +# replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/data-connections/charts/mongodb/values.yaml b/qliksense/charts/data-connections/charts/mongodb/values.yaml new file mode 100644 index 0000000..4b090d4 --- /dev/null +++ b/qliksense/charts/data-connections/charts/mongodb/values.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# #replication: +# # replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/data-connections/requirements.yaml b/qliksense/charts/data-connections/requirements.yaml new file mode 100644 index 0000000..4bdb5c9 --- /dev/null +++ b/qliksense/charts/data-connections/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: mongodb + version: 4.5.0 + repository: "@stable" + condition: mongodb.enabled diff --git a/qliksense/charts/data-connections/templates/NOTES.txt b/qliksense/charts/data-connections/templates/NOTES.txt new file mode 100644 index 0000000..cdd7cc5 --- /dev/null +++ b/qliksense/charts/data-connections/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "data-connections.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "data-connections.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "data-connections.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "data-connections.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:9011 to use your application" + kubectl port-forward $POD_NAME 9011:{{ .Values.service.port }} +{{- end }} diff --git a/qliksense/charts/data-connections/templates/_helpers.tpl b/qliksense/charts/data-connections/templates/_helpers.tpl new file mode 100644 index 0000000..278127d --- /dev/null +++ b/qliksense/charts/data-connections/templates/_helpers.tpl @@ -0,0 +1,60 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "data-connections.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "data-connections.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "data-connections.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* Return data-connections image name */}} +{{- define "data-connections.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} + +{{- define "ingressClass" -}} + {{- $ingressClass := .Values.ingress.class -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} diff --git a/qliksense/charts/data-connections/templates/deployment.yaml b/qliksense/charts/data-connections/templates/deployment.yaml new file mode 100644 index 0000000..988887e --- /dev/null +++ b/qliksense/charts/data-connections/templates/deployment.yaml @@ -0,0 +1,130 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: {{ template "data-connections.fullname" . }} + labels: + app: {{ template "data-connections.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.deployment.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "data-connections.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "data-connections.name" . }} + release: {{ .Release.Name }} + {{- if .Values.nats.enabled }} + {{ tpl ( .Values.nats.podLabel.key ) . }}: {{ .Values.nats.podLabel.value | quote }} + {{- end }} + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connections.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 10 }} +{{- end }}{{- end }}{{- end }} + - name: dc-storage + mountPath: {{ .Values.serviceJwt.privateKeyPath | quote }} + readOnly: true + env: + - name: MONGO_URI + valueFrom: + secretKeyRef: + {{ if .Values.mongodb.uriSecretName -}} + name: {{ .Values.mongodb.uriSecretName }} + {{ else -}} + name: {{ .Release.Name }}-mongoconfig + {{ end -}} + key: mongodb-uri + - name: BYPASS_QUERY + value: {{ .Values.env.bypassQuery | quote }} + - name: ENABLE_CRYPTO + value: {{ .Values.elasticEncryption.enabled | quote }} + - name: ENABLE_JWT_AUTH + value: {{ .Values.env.enableJwtAuth | quote }} + - name: ENABLE_INDEX_BUILD + value: {{ .Values.env.enableIndexBuild | quote }} + - name: JWKS_URI + value: {{ default (printf "http://%s-keys:8080/v1/keys/qlik.api.internal" .Release.Name ) .Values.jwks.uri | quote }} + - name: E2S_ADDR + value: {{ default (printf "http://%s-encryption:8080" .Release.Name ) .Values.elasticEncryption.uri | quote }} + - name: DATAFILES_URI + value: {{ default (printf "http://%s-qix-datafiles:8080" .Release.Name ) .Values.datafiles.uri | quote }} + - name: NFS_ADDR + value: {{ tpl ( .Values.nfsconnector.uri ) . | quote }} + - name: SPACE_URI + value: {{ default (printf "http://%s-spaces:6080" .Release.Name ) .Values.spaces.uri | quote }} + - name: AUTH_URI + value: {{ default (printf "http://%s-edge-auth:8080" .Release.Name ) .Values.serviceJwt.authUri | quote }} + - name: FEATURE_FLAG_ADDR + value: {{ tpl ( .Values.features.uri ) . | quote }} + - name: PDS_ADDR + value: {{ tpl ( .Values.pds.uri ) . | quote }} + - name: PRIVATE_KEY + value: {{ printf "%s/%s" .Values.serviceJwt.privateKeyPath "jwtPrivateKey" }} + - name: KEY_ID + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-keysconfig + key: jwksKeyID + - name: LOG_LEVEL + value: {{ .Values.env.loglevel | quote }} + - name: NATS_ENABLED + value: {{ .Values.nats.enabled | quote }} + {{- if .Values.nats.enabled }} + - name: NATS_ADDR + value: {{ tpl ( .Values.nats.addr ) . | quote }} + - name: NATS_CLUSTER_ID + value: {{ tpl ( .Values.nats.clusterID ) . | quote }} + {{- end }} + {{- if .Values.dcRedis.enabled }} + - name: REDIS_ADDR + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-dcaas-redis-secret" .Release.Name ) .Values.dcRedis.configName }} + key: redis-addr + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-dcaas-redis-secret" .Release.Name ) .Values.dcRedis.configName }} + key: redis-password + - name: REDIS_ENCRYPTION_KEY + valueFrom: + secretKeyRef: + name: {{ printf "%s-encryptionconfig" .Release.Name }} + key: redisEncryptionKey + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} + - name: dc-storage + secret: + secretName: {{ .Release.Name }}-keysconfig +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} diff --git a/qliksense/charts/data-connections/templates/encryption-secret.yaml b/qliksense/charts/data-connections/templates/encryption-secret.yaml new file mode 100644 index 0000000..09d4729 --- /dev/null +++ b/qliksense/charts/data-connections/templates/encryption-secret.yaml @@ -0,0 +1,9 @@ +{{- if or .Values.dcRedis.enabled .Values.dcRedis.encryptionKey }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-encryptionconfig +type: Opaque +data: + redisEncryptionKey: {{ .Values.dcRedis.encryptionKey | b64enc }} +{{- end }} diff --git a/qliksense/charts/data-connections/templates/ingress.yaml b/qliksense/charts/data-connections/templates/ingress.yaml new file mode 100644 index 0000000..80aafc8 --- /dev/null +++ b/qliksense/charts/data-connections/templates/ingress.yaml @@ -0,0 +1,30 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "data-connections.fullname" . }} + labels: + app: {{ template "data-connections.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + kubernetes.io/ingress.class: {{ template "ingressClass" . }} + nginx.ingress.kubernetes.io/auth-url: {{ default (printf "http://%s-edge-auth.%s.svc.cluster.local:8080/v1/auth" .Release.Name .Release.Namespace ) .Values.ingress.authURL | quote }} + nginx.ingress.kubernetes.io/auth-response-headers: Authorization + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite (?i)/api/(v[0-9])/dc-(.*) /$1/$2 break; +{{- with .Values.ingress.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + rules: + - http: + paths: + - path: /api/v1/dc-dataconnections + backend: + serviceName: {{ template "data-connections.fullname" . }} + servicePort: {{ .Values.service.port }} + - path: /api/v1/dc-datacredentials + backend: + serviceName: {{ template "data-connections.fullname" . }} + servicePort: {{ .Values.service.port }} diff --git a/qliksense/charts/data-connections/templates/keys-secret.yaml b/qliksense/charts/data-connections/templates/keys-secret.yaml new file mode 100644 index 0000000..37a22df --- /dev/null +++ b/qliksense/charts/data-connections/templates/keys-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-keysconfig +type: Opaque +data: + jwtPrivateKey: {{ .Values.serviceJwt.jwtPrivateKey | b64enc }} + jwksKeyID: {{ .Values.serviceJwt.keyId | b64enc }} diff --git a/qliksense/charts/data-connections/templates/mongo-secret.yaml b/qliksense/charts/data-connections/templates/mongo-secret.yaml new file mode 100644 index 0000000..09cde28 --- /dev/null +++ b/qliksense/charts/data-connections/templates/mongo-secret.yaml @@ -0,0 +1,13 @@ +{{- if or .Values.mongodb.uri .Values.mongodb.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-mongoconfig +type: Opaque +data: +{{- if .Values.mongodb.uri}} + mongodb-uri: {{ .Values.mongodb.uri | b64enc }} +{{- else if .Values.mongodb.enabled}} + mongodb-uri: {{ print "mongodb://" .Release.Name "-mongodb:27017" | b64enc }} +{{- end}} +{{- end}} diff --git a/qliksense/charts/data-connections/templates/service.yaml b/qliksense/charts/data-connections/templates/service.yaml new file mode 100644 index 0000000..66c51af --- /dev/null +++ b/qliksense/charts/data-connections/templates/service.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connections.fullname" . }} + labels: + app: {{ template "data-connections.name" . }} + chart: {{ template "data-connections.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- with .Values.service.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port}} + protocol: TCP + name: {{ template "data-connections.name" . }} + selector: + app: {{ template "data-connections.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/data-connections/values.yaml b/qliksense/charts/data-connections/values.yaml new file mode 100644 index 0000000..c542550 --- /dev/null +++ b/qliksense/charts/data-connections/values.yaml @@ -0,0 +1,182 @@ +# Default values for data-connections. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + + ## data-connections image + ## + repository: qlik-download/data-connections + + ## data-connections image version. + ## ref: https://hub.docker.com/r/qlik/data-connections/tags/ + ## + tag: 0.5.0 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: IfNotPresent + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Number of replicas. +## +replicaCount: 1 + +## Deployment configuration. +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/ +deployment: {} + ## Annotations to be added to the deployment. + ## + # annotations: + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 9011 + name: data-connections + +## Redis configuration +dcRedis: + ## If enabled, data-connections will be using dcaas-redis + enabled: false + ## Redis configuration name + # configName: + ## Set encryption key below + encryptionKey: "RaWGq*X37mEqNdj2" + +## RSA or EC Private signing key used for requesting service JWT +## +serviceJwt: + privateKeyPath: /mnt/key + jwtPrivateKey: | + -----BEGIN EC PRIVATE KEY----- + MIGkAgEBBDDjakSJ4RhuvJQbAjFDgN8AVXVjd1EdwKj1cznzAsTKM+ZIg5aPS2rV + eWsDrnin85igBwYFK4EEACKhZANiAAR8IKlP/KtNf+urXvkkm9GV/EZ3FKLOCD1Q + rsJZyZE7mMGe60s58xWbrzKNtakynmzUUrhgDJWHmY4Wn4WDewoKG8CiDfcU20jQ + BpUttahJkZF370SLnTMhNaefNM4S/4A= + -----END EC PRIVATE KEY----- + keyId: Imjw67kdScLNfPPragGwlTSZf4E_XvoNCR3IK6BETGk + ## authUri override of default http://{.Release.Name}-edge-auth:8080" + # authUri: http://edge-auth:8080 + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Annotations to be added to the ingress. + ## + annotations: [] + + ## class provides an kubernetes.io/ingress.class override of default nginx + class: "nginx" + + ## authURL override of default http://{.Release.Name}.{.Release.Namespace}.svc.cluster.local:8080/v1/auth + # authURL: + +## Metrics configuration +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + +## MongoDB configuration +mongodb: + image: + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## This value overrides the mongo image tag in chart v.4.5.0 (tag: 4.0.3-debian-9) + tag: 3.6.6-debian-9 + ## Enables a local mongo chart + enabled: true + usePassword: false + + ## Specify a custom mongo uri. Not needed when the local mongo is enabled. + ## Secret: {{ .Release.Name }}-mongoconfig.mongodb-uri + # uri: + + ## name of secret to mount for mongo URI. The secret must have the `mongodb-uri` key + # uriSecretName: + +## JWKS configuration +jwks: {} + ## URI where the JWKS to validate JWTs is located. + ## If left blank the service will return 401 on all authenticated endpoints + # uri: + +## E2S configuration +elasticEncryption: + ## encryption is enabled by default + ## uri: http://encryption:8080 + enabled: false + +## Other environment variables +env: + ## log level + loglevel: "info" + enableJwtAuth: "true" + enableIndexBuild: "dataconnections:true,datacredentials:true,credmapping:true" + bypassQuery: "Qlik123456" + +## Configuration for Spaces support +## +spaces: {} + ## spaces is enabled by default + ## uri: http://spaces:6080 + +## Datafiles connection configuration +## +datafiles: {} + ## qix-datafiles is enabled by default + ## uri: http://qix-datafiles:8080 + +## NFS connection configuration +## +nfsconnector: + ## NFS connection is disenabled by default + ## Under feature flag + uri: "{{ .Release.Name }}-data-connector-nfs:50051" + +## Configuration for feature-flags service +## +features: + ## uri: http://feature-flags:8080 + uri: "http://{{ .Release.Name }}-feature-flags:8080" + +## Configuration for policy-decisions service +## +pds: + ## uri: http://policy-decisions:5080 + uri: "http://{{ .Release.Name }}-policy-decisions:5080" + +## NATS configuration +## +nats: + ## toggle for enabling messaging functionalities + enabled: true + ## pod label required to allow communication with NATS + podLabel: + key: "{{ .Release.Name }}-nats-client" + value: true + ## NATS server address + addr: "nats://{{ .Release.Name }}-nats-client:4222" + ## for localdev use this configuration instead + # addr: "nats://messaging-nats-client:4222" + ## NATS Streaming cluster ID + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + ## for localdev use this configuration instead + # clusterID: "messaging-nats-streaming-cluster" + + ## deployment resources +resources: {} diff --git a/qliksense/charts/data-connector-common/Chart.yaml b/qliksense/charts/data-connector-common/Chart.yaml new file mode 100644 index 0000000..0160030 --- /dev/null +++ b/qliksense/charts/data-connector-common/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +appVersion: "1" +description: A Helm chart for Data Connector Common +name: data-connector-common +sources: +- https://github.com/qlik-trial/data-connector-common +version: 1.0.0 diff --git a/qliksense/charts/data-connector-common/README.md b/qliksense/charts/data-connector-common/README.md new file mode 100644 index 0000000..e84fc52 --- /dev/null +++ b/qliksense/charts/data-connector-common/README.md @@ -0,0 +1,53 @@ +# data-connector-common + +[data-connector-common](https://github.com/qlik-trial/data-connector-common) is the service that provides tool-kit based connectors. + +## Introduction + +This chart bootstraps a data-connector-common service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `data-connector-common`: + +```console +$ helm install --name data-connector-common qlik/data-connector-common +``` + +The command deploys data-connector-common on the Kubernetes cluster in the default configuration. + +## Uninstalling the Chart + +To uninstall/delete the `data-connector-common` deployment: + +```console +$ helm delete data-connector-common +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +| ------------------------------- | ----------------------------------------------------------------------------------- | ---------------------------------------------------------- | +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io` | +| `image.repository` | image name with no registry | `data-connector-common` | +| `image.tag` | image version | `1.5.0` | +| `image.pullPolicy` | image pull policy | `IfNotPresent` | +| `image.PullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `service.type` | Service type | `ClusterIP` | +| `service.grpc` | Common Connector grpc port | `50095` | +| `service.port` | Common Connector http port | `3005` | +| `service.webport` | Common Connector web port | `6386` | +| `configs.data.ingressAuthUrl` | The URL to use for nginx's `auth-url` configuration to authenticate `/api` requests | `http://{.Release.Name}.{.Release.Namespace}.svc.cluster.local:8080/v1/auth` | +| `deployment.container.replicas` | Number of hub replicas | `1` | +| `config.connectors` | Static list (space separated) of gRPC connector services | | + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name data-connector-common -f values.yaml data-connector-common +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/.helmignore b/qliksense/charts/data-connector-common/charts/qlikcommon/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/Chart.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/Chart.yaml new file mode 100644 index 0000000..484b110 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +appVersion: 1.0.14 +description: Qlik resource contract chartbuilding components and helpers +home: https://github.com/qlik-trial/resource-contract +maintainers: +- email: boris.kuschel@qlik.com + name: bkuschel +name: qlikcommon +version: 1.2.4 diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/README.md b/qliksense/charts/data-connector-common/charts/qlikcommon/README.md new file mode 100644 index 0000000..664b529 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/README.md @@ -0,0 +1,837 @@ +# Qlik Common + +This chart is based off of the Common helper chart hosts in the kubernetes incubator +helm chart repo. Documentation below. + +## Common: The Helm Helper Chart + +This chart is designed to make it easier for you to build and maintain Helm +charts. + +It provides utilities that reflect best practices of Kubernetes chart development, +making it faster for you to write charts. + +## Tips + +A few tips for working with Common: + +- Be careful when using functions that generate random data (like `common.fullname.unique`). + They may trigger unwanted upgrades or have other side effects. + +In this document, we use `RELEASE-NAME` as the name of the release. + +## Resource Kinds + +Kubernetes defines a variety of resource kinds, from `Secret` to `StatefulSet`. +We define some of the most common kinds in a way that lets you easily work with +them. + +The resource kind templates are designed to make it much faster for you to +define _basic_ versions of these resources. They allow you to extend and modify +just what you need, without having to copy around lots of boilerplate. + +To make use of these templates you must define a template that will extend the +base template (though it can be empty). The name of this template is then passed +to the base template, for example: + +```yaml +{{- template "common.service" (list . "mychart.service") -}} +{{- define "mychart.service" -}} +## Define overrides for your Service resource here, e.g. +# metadata: +# labels: +# custom: label +# spec: +# ports: +# - port: 8080 +{{- end -}} +``` + +Note that the `common.service` template defines two parameters: + + - The root context (usually `.`) + - A template name containing the service definition overrides + +A limitation of the Go template library is that a template can only take a +single argument. The `list` function is used to workaround this by constructing +a list or array of arguments that is passed to the template. + +The `common.service` template is responsible for rendering the templates with +the root context and merging any overrides. As you can see, this makes it very +easy to create a basic `Service` resource without having to copy around the +standard metadata and labels. + +Each implemented base resource is described in greater detail below. + +### `common.service` + +The `common.service` template creates a basic `Service` resource with the +following defaults: + +- Service type (ClusterIP, NodePort, LoadBalancer) made configurable by `.Values.service.type` +- Named port `http` configured on port 80 +- Selector set to `app: {{ template "common.name" }}, release: {{ .Release.Name | quote }}` to match the default used in the `Deployment` resource + +Example template: + +```yaml +{{- template "common.service" (list . "mychart.mail.service") -}} +{{- define "mychart.mail.service" -}} +metadata: + name: {{ template "common.fullname" . }}-mail # overrides the default name to add a suffix + labels: # appended to the labels section + protocol: mail +spec: + ports: # composes the `ports` section of the service definition. + - name: smtp + port: 25 + targetPort: 25 + - name: imaps + port: 993 + targetPort: 993 + selector: # this is appended to the default selector + protocol: mail +{{- end -}} +--- +{{ template "common.service" (list . "mychart.web.service") -}} +{{- define "mychart.web.service" -}} +metadata: + name: {{ template "common.fullname" . }}-www # overrides the default name to add a suffix + labels: # appended to the labels section + protocol: www +spec: + ports: # composes the `ports` section of the service definition. + - name: www + port: 80 + targetPort: 8080 +{{- end -}} +``` + +The above template defines _two_ services: a web service and a mail service. + +The most important part of a service definition is the `ports` object, which +defines the ports that this service will listen on. Most of the time, +`selector` is computed for you. But you can replace it or add to it. + +The output of the example above is: + +```yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app: service + chart: service-0.1.0 + heritage: Tiller + protocol: mail + release: release-name + name: release-name-service-mail +spec: + ports: + - name: smtp + port: 25 + targetPort: 25 + - name: imaps + port: 993 + targetPort: 993 + selector: + app: service + release: release-name + protocol: mail + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: service + chart: service-0.1.0 + heritage: Tiller + protocol: www + release: release-name + name: release-name-service-www +spec: + ports: + - name: www + port: 80 + targetPort: 8080 + type: ClusterIP +``` + +## `common.deployment` + +The `common.deployment` template defines a basic `Deployment`. Underneath the +hood, it uses `common.container` (see next section). + +By default, the pod template within the deployment defines the labels `app: {{ template "common.name" . }}` +and `release: {{ .Release.Name | quote }` as this is also used as the selector. The +standard set of labels are not used as some of these can change during upgrades, +which causes the replica sets and pods to not correctly match. + +Example use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +## Define overrides for your Deployment resource here, e.g. +spec: + replicas: {{ .Values.replicaCount }} +{{- end -}} +``` + +## `common.container` + +The `common.container` template creates a basic `Container` spec to be used +within a `Deployment` or `ReplicaSet`. It holds the following defaults: + +- The name is set to `main` +- Uses `.Values.image` to describe the image to run, with the following spec: + ```yaml + image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + ``` +- Exposes the named port `http` as port 80 +- Lays out the compute resources using `.Values.resources` + +Example use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +## Define overrides for your Deployment resource here, e.g. +spec: + template: + spec: + containers: + - +{{ include "common.container" (list (set . "container" .Values.deployment.container) "mychart.deployment.container") | indent 8}} +{{- end -}} +{{- define "mychart.deployment.container" -}} +## Define overrides for your Container here, e.g. +livenessProbe: + httpGet: + path: / + port: 80 +readinessProbe: + httpGet: + path: / + port: 80 +{{- end -}} +``` + +The above example creates a `Deployment` resource which makes use of the +`common.container` template to populate the PodSpec's container list. The usage +of this template is similar to the other resources, you must define and +reference a template that contains overrides for the container object. + +The most important part of a container definition is the image you want to run. +As mentioned above, this is derived from `.Values.image` by default. It is a +best practice to define the image, tag and pull policy in your charts' values as +this makes it easy for an operator to change the image registry, or use a +specific tag or version. Another example of configuration that should be exposed +to chart operators is the container's required compute resources, as this is +also very specific to an operators environment. An example `values.yaml` for +your chart could look like: + +```yaml +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +``` + +The output of running the above values through the earlier template is: + +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: deployment + chart: deployment-0.1.0 + heritage: Tiller + release: release-name + name: release-name-deployment +spec: + template: + metadata: + labels: + app: deployment + spec: + containers: + - image: nginx:stable + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: / + port: 80 + name: deployment + ports: + - containerPort: 80 + name: http + readinessProbe: + httpGet: + path: / + port: 80 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +``` + +## `common.configmap` + +The `common.configmap` template creates an empty `ConfigMap` resource that you +can override with your configuration. + +Example use: + +```yaml +{{- template "common.configmap" (list . "mychart.configmap") -}} +{{- define "mychart.configmap" -}} +data: + zeus: cat + athena: cat + julius: cat + one: |- + {{ .Files.Get "file1.txt" }} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +data: + athena: cat + julius: cat + one: This is a file. + zeus: cat +kind: ConfigMap +metadata: + labels: + app: configmap + chart: configmap-0.1.0 + heritage: Tiller + release: release-name + name: release-name-configmap +``` + +## `common.secret` + +The `common.secret` template creates an empty `Secret` resource that you +can override with your secrets. + +Example use: + +```yaml +{{- template "common.secret" (list . "mychart.secret") -}} +{{- define "mychart.secret" -}} +data: + zeus: {{ print "cat" | b64enc }} + athena: {{ print "cat" | b64enc }} + julius: {{ print "cat" | b64enc }} + one: |- + {{ .Files.Get "file1.txt" | b64enc }} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +data: + athena: Y2F0 + julius: Y2F0 + one: VGhpcyBpcyBhIGZpbGUuCg== + zeus: Y2F0 +kind: Secret +metadata: + labels: + app: secret + chart: secret-0.1.0 + heritage: Tiller + release: release-name + name: release-name-secret +type: Opaque +``` + +## `common.ingress` + +The `common.ingress` template is designed to give you a well-defined `Ingress` +resource, that can be configured using `.Values.ingress`. An example values file +that can be used to configure the `Ingress` resource is: + +```yaml +ingress: + hosts: + - chart-example.local + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + tls: + - secretName: chart-example-tls + hosts: + - chart-example.local +``` + +Example use: + +```yaml +{{- template "common.ingress" (list . "mychart.ingress") -}} +{{- define "mychart.ingress" -}} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + labels: + app: ingress + chart: ingress-0.1.0 + heritage: Tiller + release: release-name + name: release-name-ingress +spec: + rules: + - host: chart-example.local + http: + paths: + - backend: + serviceName: release-name-ingress + servicePort: 80 + path: / + tls: + - hosts: + - chart-example.local + secretName: chart-example-tls +``` + +## `common.persistentvolumeclaim` + +`common.persistentvolumeclaim` can be used to easily add a +`PersistentVolumeClaim` resource to your chart that can be configured using +`.Values.persistence`: + +| Value | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------- | +| persistence.enabled | Whether or not to claim a persistent volume. If false, `common.volume.pvc` will use an emptyDir instead | +| persistence.storageClass | `StorageClass` name | +| persistence.accessMode | Access mode for persistent volume | +| persistence.size | Size of persistent volume | +| persistence.existingClaim | If defined, `PersistentVolumeClaim` is not created and `common.volume.pvc` helper uses this claim | + +An example values file that can be used to configure the +`PersistentVolumeClaim` resource is: + +```yaml +persistence: + enabled: true + storageClass: fast + accessMode: ReadWriteOnce + size: 8Gi +``` + +Example use: + +```yaml +{{- template "common.persistentvolumeclaim" (list . "mychart.persistentvolumeclaim") -}} +{{- define "mychart.persistentvolumeclaim" -}} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app: persistentvolumeclaim + chart: persistentvolumeclaim-0.1.0 + heritage: Tiller + release: release-name + name: release-name-persistentvolumeclaim +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + storageClassName: "fast" +``` + +## Partial API Objects + +When writing Kubernetes resources, you may find the following helpers useful to +construct parts of the spec. + +### EnvVar + +Use the EnvVar helpers within a container spec to simplify specifying key-value +environment variables or referencing secrets as values. + +Example Use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +spec: + template: + spec: + containers: + - {{ template "common.container" (list . "mychart.deployment.container") }} +{{- end -}} +{{- define "mychart.deployment.container" -}} +{{- $fullname := include "common.fullname" . -}} +env: +- {{ template "common.envvar.value" (list "ZEUS" "cat") }} +- {{ template "common.envvar.secret" (list "ATHENA" "secret-name" "athena") }} +{{- end -}} +``` + +Output: + +```yaml +... + spec: + containers: + - env: + - name: ZEUS + value: cat + - name: ATHENA + valueFrom: + secretKeyRef: + key: athena + name: secret-name +... +``` + +### Volume + +Use the Volume helpers within a `Deployment` spec to help define ConfigMap and +PersistentVolumeClaim volumes. + +Example Use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +spec: + template: + spec: + volumes: + - {{ template "common.volume.configMap" (list "config" "configmap-name") }} + - {{ template "common.volume.pvc" (list "data" "pvc-name" .Values.persistence) }} +{{- end -}} +``` + +Output: + +```yaml +... + spec: + volumes: + - configMap: + name: configmap-name + name: config + - name: data + persistentVolumeClaim: + claimName: pvc-name +... +``` + +The `common.volume.pvc` helper uses the following configuration from the `.Values.persistence` object: + +| Value | Description | +| ------------------------- | ----------------------------------------------------- | +| persistence.enabled | If false, creates an `emptyDir` instead | +| persistence.existingClaim | If set, uses this instead of the passed in claim name | + +## Utilities + +### `common.fullname` + +The `common.fullname` template generates a name suitable for the `name:` field +in Kubernetes metadata. It is used like this: + +```yaml +name: {{ template "common.fullname" . }} +``` + +The following different values can influence it: + +```yaml +# By default, fullname uses '{{ .Release.Name }}-{{ .Chart.Name }}'. This +# overrides that and uses the given string instead. +fullnameOverride: "some-name" + +# This adds a prefix +fullnamePrefix: "pre-" +# This appends a suffix +fullnameSuffix: "-suf" + +# Global versions of the above +global: + fullnamePrefix: "pp-" + fullnameSuffix: "-ps" +``` + +Example output: + +```yaml +--- +# with the values above +name: pp-pre-some-name-suf-ps + +--- +# the default, for release "happy-panda" and chart "wordpress" +name: happy-panda-wordpress +``` + +Output of this function is truncated at 54 characters, which leaves 9 additional +characters for customized overriding. Thus you can easily extend this name +in your own charts: + +```yaml +{{- define "my.fullname" -}} + {{ template "common.fullname" . }}-my-stuff +{{- end -}} +``` + +### `common.fullname.unique` + +The `common.fullname.unique` variant of fullname appends a unique seven-character +sequence to the end of the common name field. + +This takes all of the same parameters as `common.fullname` + +Example template: + +```yaml +uniqueName: {{ template "common.fullname.unique" . }} +``` + +Example output: + +```yaml +uniqueName: release-name-fullname-jl0dbwx +``` + +It is also impacted by the prefix and suffix definitions, as well as by +`.Values.fullnameOverride` + +Note that the effective maximum length of this function is 63 characters, not 54. + +### `common.name` + +The `common.name` template generates a name suitable for the `app` label. It is used like this: + +```yaml +app: {{ template "common.name" . }} +``` + +The following different values can influence it: + +```yaml +# By default, name uses '{{ .Chart.Name }}'. This +# overrides that and uses the given string instead. +nameOverride: "some-name" + +# This adds a prefix +namePrefix: "pre-" +# This appends a suffix +nameSuffix: "-suf" + +# Global versions of the above +global: + namePrefix: "pp-" + nameSuffix: "-ps" +``` + +Example output: + +```yaml +--- +# with the values above +name: pp-pre-some-name-suf-ps + +--- +# the default, for chart "wordpress" +name: wordpress +``` + +Output of this function is truncated at 54 characters, which leaves 9 additional +characters for customized overriding. Thus you can easily extend this name +in your own charts: + +```yaml +{{- define "my.name" -}} + {{ template "common.name" . }}-my-stuff +{{- end -}} +``` + +### `common.metadata` + +The `common.metadata` helper generates the `metadata:` section of a Kubernetes +resource. + +This takes three objects: + - .top: top context + - .fullnameOverride: override the fullname with this name + - .metadata + - .labels: key/value list of labels + - .annotations: key/value list of annotations + - .hook: name(s) of hook(s) + +It generates standard labels, annotations, hooks, and a name field. + +Example template: + +```yaml +{{ template "common.metadata" (dict "top" . "metadata" .Values.bio) }} +--- +{{ template "common.metadata" (dict "top" . "metadata" .Values.pet "fullnameOverride" .Values.pet.fullnameOverride) }} +``` + +Example values: + +```yaml +bio: + name: example + labels: + first: matt + last: butcher + nick: technosophos + annotations: + format: bio + destination: archive + hook: pre-install + +pet: + fullnameOverride: Zeus + +``` + +Example output: + +```yaml +metadata: + name: release-name-metadata + labels: + app: metadata + heritage: "Tiller" + release: "RELEASE-NAME" + chart: metadata-0.1.0 + first: "matt" + last: "butcher" + nick: "technosophos" + annotations: + "destination": "archive" + "format": "bio" + "helm.sh/hook": "pre-install" +--- +metadata: + name: Zeus + labels: + app: metadata + heritage: "Tiller" + release: "RELEASE-NAME" + chart: metadata-0.1.0 + annotations: +``` + +Most of the common templates that define a resource type (e.g. `common.configmap` +or `common.job`) use this to generate the metadata, which means they inherit +the same `labels`, `annotations`, `nameOverride`, and `hook` fields. + +### `common.labelize` + +`common.labelize` turns a map into a set of labels. + +Example template: + +```yaml +{{- $map := dict "first" "1" "second" "2" "third" "3" -}} +{{- template "common.labelize" $map -}} +``` + +Example output: + +```yaml +first: "1" +second: "2" +third: "3" +``` + +### `common.labels.standard` + +`common.labels.standard` prints the standard set of labels. + +Example usage: + +``` +{{ template "common.labels.standard" . }} +``` + +Example output: + +```yaml +app: labelizer +heritage: "Tiller" +release: "RELEASE-NAME" +chart: labelizer-0.1.0 +``` + +### `common.hook` + +The `common.hook` template is a convenience for defining hooks. + +Example template: + +```yaml +{{ template "common.hook" "pre-install,post-install" }} +``` + +Example output: + +```yaml +"helm.sh/hook": "pre-install,post-install" +``` + +### `common.chartref` + +The `common.chartref` helper prints the chart name and version, escaped to be +legal in a Kubernetes label field. + +Example template: + +```yaml +chartref: {{ template "common.chartref" . }} +``` + +For the chart `foo` with version `1.2.3-beta.55+1234`, this will render: + +```yaml +chartref: foo-1.2.3-beta.55_1234 +``` + +(Note that `+` is an illegal character in label values) diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_certificates.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_certificates.tpl new file mode 100644 index 0000000..d385098 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_certificates.tpl @@ -0,0 +1,32 @@ +{{- define "common.ca-certificates.volume" -}} +{{- if .Values.certs }} +{{- if .Values.global }}{{- if .Values.global.certs }} +{{- if .Values.global.certs.volume }} +- name: ca-certificates + {{- if .Values.global.certs.volume.hostPath }} + hostPath: + path: {{ .Values.global.certs.volume.hostPath }} + type: Directory + {{- end }} + {{- if .Values.global.certs.volume.existingVolumeClaim }} + persistentVolumeClaim: + claimName: {{ .Values.global.certs.volume.existingVolumeClaim }} + {{- end }} +{{- else }} +- name: ca-certificates + persistentVolumeClaim: + claimName: {{ .Release.Name }}-certs-pvc +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "common.ca-certificates.volumeMount" -}} +{{- if .Values.certs }} +{{- if .Values.global }}{{- if .Values.global.certs }} +- name: ca-certificates + mountPath: {{ default "/etc/ssl/certs" .Values.certs.mountPath | quote }} + readOnly: true +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_chartref.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_chartref.tpl new file mode 100644 index 0000000..e6c1486 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_chartref.tpl @@ -0,0 +1,14 @@ +{{- /* +common.chartref prints a chart name and version. + +It does minimal escaping for use in Kubernetes labels. + +Example output: + + zookeeper-1.2.3 + wordpress-3.2.1_20170219 + +*/ -}} +{{- define "common.chartref" -}} + {{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_configmap.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_configmap.yaml new file mode 100644 index 0000000..f04def2 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_configmap.yaml @@ -0,0 +1,32 @@ +{{- define "common.configmap.tpl" -}} +apiVersion: v1 +kind: ConfigMap +{{ template "common.metadata.configs" . }} +data: + {{- $root := . -}} + {{- if .Values.deployment -}} + {{- range $name, $container:= .Values.deployment }} + {{- if kindIs "map" $container }}{{- if $container.configs }} + {{- range $key, $value := $container.configs.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | quote }} + {{- else }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }}{{- end }} + {{- end }} + {{- end }} + {{- if .Values.configs -}} + {{- range $key, $value := .Values.configs.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | quote }} + {{- else }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- define "common.configmap" -}} +{{- template "common.util.merge" (append . "common.configmap.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_container.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_container.yaml new file mode 100644 index 0000000..4c51b35 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_container.yaml @@ -0,0 +1,98 @@ +{{- define "common.container.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +name: {{ include "common.name" . }} +image: {{ include "common.image" (set . "container" .container ) }} +{{- $pullPolicy := .Values.image.pullPolicy }} +{{- if .container }}{{- if .container.image }}{{- if .container.image.pullPolicy }} + {{- $pullPolicy = .container.image.pullPolicy }} +{{- end }}{{- end }}{{- end }} +imagePullPolicy: {{ $pullPolicy }} +{{- if or .Values.configs .Values.secrets }} +env: +{{- if .Values.configs.data.natsUri }} + - name: NATS_CLIENT_ID + valueFrom: + fieldRef: + fieldPath: metadata.name +{{- end }} +{{ include "common.transformers" (set . "container" .container ) | indent 2 }} +{{- end }} +ports: +{{- $port := .Values.service.port }} +{{- if .container }}{{- if .container.port }} + {{- $port = .container.port }} +{{- end }}{{- end }} +- containerPort: {{ $port }} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end }}{{- end }} +{{- $hasSecret := false -}} +{{- if $secrets.stringData }} +{{- $hasSecret = true -}} +{{- end }} +{{- if $secrets.data }} +{{- $hasSecret = true -}} +{{- end }} +{{- $hasVolumeMounts := false -}} +{{- if .container }}{{- if .container.volumeMounts }} +{{- $hasVolumeMounts = true -}} +{{- end -}}{{- end -}} +{{- if or $hasSecret $hasVolumeMounts }} +volumeMounts: +{{- end }} +{{- if $hasSecret }} +{{- if contains $name .Release.Name }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +{{- else }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +- name: {{ .Release.Name }}-secrets + mountPath: "/run/secrets/qlik.com/{{ .Release.Name }}" + readOnly: true +{{- end }} +{{- end }} +{{- if $hasVolumeMounts }} +{{- range $key, $val:= .container.volumeMounts }} +{{- if kindIs "map" $val }} +{{- if eq $key "default" }} +{{ include "common.volume.mount" (list (include "common.fullname" $root) $root.Values.deployment.container.volumeMounts.mountPath) }} +{{- else }} +{{ include "common.volume.mount" (list (printf "%s-%s" (include "common.fullname" $root) $key) $val) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- include "common.ca-certificates.volumeMount" . }} +{{- $probes := .Values.probes }} +{{- if .container }}{{- if .container.probes }} + {{- $probes = .container.probes }} +{{- end }}{{- end }} +livenessProbe: + httpGet: + path: /health + port: {{ $port }} +{{- if $probes }} +{{ toYaml $probes | indent 2 }} +{{- end }} +readinessProbe: + httpGet: + path: /ready + port: {{ $port }} +{{- if $probes }} +{{ toYaml $probes | indent 2 }} +{{- end }} +{{- if .container }}{{- if .container.resources }} +resources: +{{ toYaml .container.resources | indent 2 }} +{{- end -}}{{- end -}} +{{- end -}} +{{- define "common.container" -}} +{{- /* clear new line so indentation works correctly */ -}} +{{- println "" -}} +{{- include "common.util.merge" (append . "common.container.tpl") | indent 8 -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_deployment.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_deployment.yaml new file mode 100644 index 0000000..0a46af0 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_deployment.yaml @@ -0,0 +1,93 @@ +{{- define "common.deployment.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +apiVersion: apps/v1 +kind: Deployment +{{ template "common.metadata.workload" . }} +spec: +{{- if .Values.deployment.replicas}} + replicas: {{ .Values.deployment.replicas }} +{{- end}} + template: + metadata: + annotations: + checksum/configs: {{ (print (include "common.configmap.tpl" .)) | sha256sum }} + checksum/secrets: {{ (print (include "common.secret.tpl" .)) | sha256sum }} +{{- if .Values.deployment }}{{- if .Values.deployment.annotations }} +{{ include "common.annote" (dict "annotations" .Values.deployment.annotations "root" . ) | indent 8 }} +{{- end }}{{- end }} + labels: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} +{{- if .Values.deployment }}{{- if .Values.deployment.labels }} +{{ include "common.labelize" .Values.deployment.labels | indent 8 }} +{{- end }}{{- end }} +{{- if .Values.configs }}{{- if .Values.configs.data }}{{- if .Values.configs.data.natsUri }} + {{ tpl .Values.configs.data.natsUri . | regexFind "//.*:" | trimAll ":" | trimAll "/" }}: "true" +{{- end }}{{- end }}{{- end }} + spec: +{{- if ne (.Values.rbacEnabled | default false) false }} + serviceAccountName: {{ template "common.fullname" . }} +{{- end }} +{{- if .Values.image }}{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }}{{- end }} + volumes: +{{- if contains $name .Release.Name }} + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} +{{- else }} + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} + - {{ template "common.volume.secret" (list (printf "%s-secrets" (.Release.Name)) (printf "%s" (.Release.Name))) }} +{{- end }} + +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.persistentVolumeClaim }} +{{- range $name, $claim:= .Values.persistence.persistentVolumeClaim }} +{{- if kindIs "map" $claim }} +{{- if eq $name "default" }} + - {{ template "common.volume.pvc" (list (include "common.fullname" $root) (include "common.fullname" $root) $claim) }} +{{- else }} + - {{ template "common.volume.pvc" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $claim) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.hostPath }} +{{- range $name, $hostPath:= .Values.persistence.hostPath }} +{{- if kindIs "map" $hostPath }} + - {{ template "common.volume.hostpath" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $hostPath) }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.emptyDir }} +{{- range $name, $dir:= .Values.persistence.emptyDir }} +{{- if kindIs "map" $dir }} +{{- if $dir.create }} + - {{ template "common.volume.emptydir" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $root.Values.persistence) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{ include "common.ca-certificates.volume" . | nindent 6 }} +{{- if .Values.configs }}{{- if .Values.configs.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} +{{- end }}{{- end }} +{{- if .Values.deployment }}{{- if .Values.deployment.initContainer }} + initContainers: + - +{{ include "common.initContainer.tpl" (set . "container" .Values.deployment.initContainer ) | indent 8 }} +{{- end }}{{- end }} + containers: + - +{{ include "common.container.tpl" (set . "container" .Values.deployment.container ) | indent 8 }} + selector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} +{{- end -}} +{{- define "common.deployment" -}} +{{- $top := first . -}} +{{- if and $top.Values.deployment }} +{{- template "common.util.merge" (append . "common.deployment.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_envvar.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_envvar.tpl new file mode 100644 index 0000000..39a997a --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_envvar.tpl @@ -0,0 +1,32 @@ +{{- define "common.envvar.value" -}} + {{- $name := index . 0 -}} + {{- $value := index . 1 -}} + + name: {{ $name }} + value: {{ default "" $value | quote }} +{{- end -}} + +{{- define "common.envvar.configmap" -}} + {{- $name := index . 0 -}} + {{- $configMapName := index . 1 -}} + {{- $configMapKey := index . 2 -}} + + name: {{ $name }} + valueFrom: + configMapKeyRef: + name: {{ $configMapName }}-configs + key: {{ $configMapKey }} +{{- end -}} + +{{- define "common.envvar.secret" -}} + {{- $name := index . 0 -}} + {{- $secretName := index . 1 -}} + {{- $secretKey := index . 2 -}} + + name: {{ $name }} + valueFrom: + secretKeyRef: + name: {{ $secretName }}-secrets + key: {{ $secretKey }} +{{- end -}} + diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_fullname.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_fullname.tpl new file mode 100644 index 0000000..0f6bc77 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_fullname.tpl @@ -0,0 +1,42 @@ +{{- /* +fullname defines a suitably unique name for a resource by combining +the release name and the chart name. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.fullnameOverride: Replaces the computed name with this given name +- .Values.fullnamePrefix: Prefix +- .Values.global.fullnamePrefix: Global prefix +- .Values.fullnameSuffix: Suffix +- .Values.global.fullnameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "common.fullname" . -}}"' +*/ -}} +{{- define "common.fullname" -}} + {{- $global := default (dict) .Values.global -}} + {{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- $name := default .Chart.Name .Values.nameOverride -}} + {{- if contains $name .Release.Name -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{- /* +common.fullname.unique adds a random suffix to the unique name. + +This takes the same parameters as common.fullname + +*/ -}} +{{- define "common.fullname.unique" -}} + {{ template "common.fullname" . }}-{{ randAlphaNum 7 | lower }} +{{- end }} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_hpa.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_hpa.yaml new file mode 100644 index 0000000..be4215d --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_hpa.yaml @@ -0,0 +1,31 @@ +{{- define "common.hpa.tpl" -}} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +{{ template "common.metadata" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "common.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: +{{ if .Values.hpa.targetAverageUtilizationCpu }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationCpu }} +{{- end }} +{{ if .Values.hpa.targetAverageUtilizationMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationMemory }} +{{- end }} +{{- end -}} +{{- define "common.hpa" -}} +{{- $top := first . -}} +{{- if and $top.Values.hpa }} +{{- template "common.util.merge" (append . "common.hpa.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_image.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_image.tpl new file mode 100644 index 0000000..6a2335a --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_image.tpl @@ -0,0 +1,21 @@ +{{/* Return the proper collections image name */}} +{{- define "common.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{ $image := .Values.image }} + {{- if .container }}{{- if .container.image }} + {{ $image = .container.image }} + {{- end -}}{{- end -}} + {{- $registry := default "docker.io" (default .Values.image.registry $image.registry) -}} + {{- $repository := $image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default .Values.image.tag $image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_ingress.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_ingress.yaml new file mode 100644 index 0000000..ab9a75d --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_ingress.yaml @@ -0,0 +1,49 @@ +{{- define "common.ingress.tpl" -}} +apiVersion: extensions/v1beta1 +kind: Ingress +{{ template "common.metadata" . }} + annotations: + kubernetes.io/ingress.class: {{ template "common.ingress.class" . }} + {{- if .Values.configs }}{{- if .Values.configs.data }}{{- if .Values.configs.data.ingressAuthUrl }} + nginx.ingress.kubernetes.io/auth-url: {{ tpl .Values.configs.data.ingressAuthUrl . | quote }} + {{- end }}{{- end }}{{- end }} + {{- if .Values.ingress}}{{- if .Values.ingress.annotations }} + {{ include "common.annote" (dict "annotations" .Values.ingress.annotations "root" . ) | indent 4 }} + {{- end }}{{- end }} +spec: + {{- if .Values.ingress }} + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: / + backend: + serviceName: {{ template "common.fullname" $ }} + servicePort: 80 + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} + {{- end }} +{{- define "common.ingress" -}} +{{- $top := first . -}} +{{- if and $top.Values.ingress }} +{{- template "common.util.merge" (append . "common.ingress.tpl") -}} +{{- end -}} +{{- end -}} + +{{- define "common.ingress.class" -}} + {{- $ingressClass := "nginx" }} + {{- if .Values.ingress }}{{- if .Values.ingress.class }} + {{- $ingressClass = .Values.ingress.class -}} + {{- end -}}{{- end -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_initContainer.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_initContainer.yaml new file mode 100644 index 0000000..3b12f55 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_initContainer.yaml @@ -0,0 +1,74 @@ +{{- define "common.initContainer.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +name: {{ .container.name }} +image: {{ include "common.image" (set . "container" .container ) }} +{{- $pullPolicy := .Values.image.pullPolicy }} +{{- if .container }}{{- if .container.image }}{{- if .container.image.pullPolicy }} + {{- $pullPolicy = .container.image.pullPolicy }} +{{- end }}{{- end }}{{- end }} +imagePullPolicy: {{ $pullPolicy }} +env: + - name: SERVICE_NAME + value: {{ .Chart.Name }} +{{- if or .container.configs .container.secrets }} +{{ include "common.transformers" (set . "container" .container ) | indent 2 }} +{{- end }} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end }}{{- end }} +{{- $hasSecret := false -}} +{{- if $secrets.stringData }} +{{- $hasSecret = true -}} +{{- end }} +{{- if $secrets.data }} +{{- $hasSecret = true -}} +{{- end }} +{{- $hasVolumeMounts := false -}} +{{- if .container }}{{- if .container.volumeMounts }} +{{- $hasVolumeMounts = true -}} +{{- end -}}{{- end -}} +{{- if or $hasSecret $hasVolumeMounts }} +volumeMounts: +{{- end }} +{{- if $hasSecret }} +{{- if contains $name .Release.Name }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +{{- else }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +- name: {{ .Release.Name }}-secrets + mountPath: "/run/secrets/qlik.com/{{ .Release.Name }}" + readOnly: true +{{- end }} +{{- end }} +{{- if $hasVolumeMounts }} +{{- range $key, $val:= .container.volumeMounts }} +{{- if kindIs "map" $val }} +{{- if eq $key "default" }} +{{ include "common.volume.mount" (list (include "common.fullname" $root) $root.Values.deployment.container.volumeMounts.mountPath) }} +{{- else }} +{{ include "common.volume.mount" (list (printf "%s-%s" (include "common.fullname" $root) $key) $val) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- include "common.ca-certificates.volumeMount" . }} +{{- $probes := .Values.probes }} +{{- if .container }}{{- if .container.probes }} + {{- $probes = .container.probes }} +{{- end }}{{- end }} +{{- if .container }}{{- if .container.resources }} +resources: +{{ toYaml .container.resources | indent 2 }} +{{- end -}}{{- end -}} +{{- end -}} +{{- define "common.initContainer" -}} +{{- /* clear new line so indentation works correctly */ -}} +{{- println "" -}} +{{- include "common.util.merge" (append . "common.initContainer.tpl") | indent 8 -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata.yaml new file mode 100644 index 0000000..83c42d5 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata.yaml @@ -0,0 +1,35 @@ +{{- /* +common.metadata creates a standard metadata header. +It creates a 'metadata:' section with name and labels. +*/ -}} +{{ define "common.metadata" -}} +metadata: + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.configs" -}} +metadata: + name: {{ template "common.fullname" . }}-configs + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.secrets" -}} +metadata: + name: {{ template "common.fullname" . }}-secrets + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.workload" -}} +metadata: + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata_annotations.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata_annotations.tpl new file mode 100644 index 0000000..ed28474 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata_annotations.tpl @@ -0,0 +1,23 @@ +{{- /* +common.hook defines a hook. + +This is to be used in a 'metadata.annotations' section. + +This should be called as 'template "common.metadata.hook" "post-install"' + +Any valid hook may be passed in. Separate multiple hooks with a ",". +*/ -}} +{{- define "common.hook" -}} +"helm.sh/hook": {{printf "%s" . | quote}} +{{- end -}} + +{{- define "common.annote" -}} +{{ $root := .root}} +{{- range $k, $v := .annotations }} +{{- if kindIs "string" $v }} +{{ $k | quote }}: {{ tpl $v $root | quote }} +{{- else -}} +{{ $k | quote }}: {{ $v }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata_labels.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata_labels.tpl new file mode 100644 index 0000000..15fe00c --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_metadata_labels.tpl @@ -0,0 +1,28 @@ +{{- /* +common.labelize takes a dict or map and generates labels. + +Values will be quoted. Keys will not. + +Example output: + + first: "Matt" + last: "Butcher" + +*/ -}} +{{- define "common.labelize" -}} +{{- range $k, $v := . }} +{{ $k }}: {{ $v | quote }} +{{- end -}} +{{- end -}} + +{{- /* +common.labels.standard prints the standard Helm labels. + +The standard labels are frequently used in metadata. +*/ -}} +{{- define "common.labels.standard" -}} +app: {{ template "common.name" . }} +chart: {{ template "common.chartref" . }} +heritage: {{ .Release.Service | quote }} +release: {{ .Release.Name | quote }} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_name.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_name.tpl new file mode 100644 index 0000000..1d42fb0 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_name.tpl @@ -0,0 +1,29 @@ +{{- /* +name defines a template for the name of the chart. It should be used for the `app` label. +This is common practice in many Kubernetes manifests, and is not Helm-specific. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.nameOverride: Replaces the computed name with this given name +- .Values.namePrefix: Prefix +- .Values.global.namePrefix: Global prefix +- .Values.nameSuffix: Suffix +- .Values.global.nameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "common.name" . -}}"' +*/ -}} +{{- define "common.name"}} + {{- $global := default (dict) .Values.global -}} + {{- $base := default .Chart.Name .Values.nameOverride -}} + {{- $gpre := default "" $global.namePrefix -}} + {{- $pre := default "" .Values.namePrefix -}} + {{- $suf := default "" .Values.nameSuffix -}} + {{- $gsuf := default "" $global.nameSuffix -}} + {{- $name := print $gpre $pre $base $suf $gsuf -}} + {{- $name | lower | trunc 54 | trimSuffix "-" -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_networkpolicy.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_networkpolicy.yaml new file mode 100644 index 0000000..e0c4922 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_networkpolicy.yaml @@ -0,0 +1,52 @@ +{{- define "common.networkpolicy.tpl" -}} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +{{ template "common.metadata" . }} +spec: + podSelector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} + policyTypes: + - Egress + egress: + - to: +{{- if .Values.configs }}{{- if .Values.configs.data }} +{{- if .Values.configs.data.natsUri }} + - podSelector: + matchLabels: + app: "nats" + release: {{ .Values.natsRelease | default .Release.Name | quote }} + - podSelector: + matchLabels: + app: "nats-streaming" + release: {{ .Values.natsRelease | default .Release.Name | quote }} +{{- end }} +{{- if or .Values.configs.data.tokenAuthUri .Values.configs.data.ingressAuthUrl }} + - podSelector: + matchLabels: + app: "edge-auth" + release: {{ .Values.edgeAuthRelease | default .Release.Name | quote }} +{{- end }} +{{- if .Values.configs.data.keysUri }} + - podSelector: + matchLabels: + app: "keys" + release: {{ .Values.keysRelease | default .Release.Name | quote }} +{{- end }} +{{- if .Values.configs.data.pdsUri }} + - podSelector: + matchLabels: + app: "policy-decisions" + release: {{ .Values.pdsRelease | default .Release.Name | quote }} +{{- end }} +{{- end }}{{- end }} + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP +{{- end }} +{{- define "common.networkpolicy" -}} +{{- template "common.util.merge" (append . "common.networkpolicy.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_persistentvolumeclaim.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_persistentvolumeclaim.yaml new file mode 100644 index 0000000..4c2ed62 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_persistentvolumeclaim.yaml @@ -0,0 +1,47 @@ +{{- define "common.persistentvolumeclaim.tpl" -}} +{{- $persistence := default .Values.persistence .claim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +{{ template "common.metadata" . }} +spec: + accessModes: + - {{ $persistence.accessMode | quote }} + resources: + requests: + storage: {{ $persistence.size | quote }} +{{- if $persistence.matchLabels }} + selector: + matchLabels: +{{- include "common.labelize" $persistence.matchLabels | indent 6 -}} +{{- end -}} +{{- if $persistence.storageClass }} +{{- if (eq "-" $persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ $persistence.storageClass }}" +{{- end }} +{{- else -}} + {{- if .Values.global }} + {{- if .Values.global.persistence }} + {{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) -}} + storageClassName: "" + {{- else -}} + storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- define "common.persistentvolumeclaim" -}} +{{- $top := first . -}} +{{- if $top.Values.persistence -}}{{- if $top.Values.persistence.enabled -}}{{- if $top.Values.persistence.persistentVolumeClaim -}} + {{- if not $top.claim -}} + {{- $top = set $top "claim" $top.Values.persistence.persistentVolumeClaim.default -}} + {{- end -}} + {{- if not $top.claim.existingClaim -}} + {{- template "common.util.merge" (append . "common.persistentvolumeclaim.tpl") -}} + {{- end -}} +{{- end -}}{{- end -}}{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_persistentvolumeclaims.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_persistentvolumeclaims.yaml new file mode 100644 index 0000000..2cb894b --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_persistentvolumeclaims.yaml @@ -0,0 +1,27 @@ +{{- define "common.persistentvolumeclaims" -}} +{{- $root := . -}} +{{- if .Values.persistence -}}{{- if .Values.persistence.enabled -}} + {{- if .Values.persistence.persistentVolumeClaim -}} + {{- range $name, $claim:= .Values.persistence.persistentVolumeClaim }} + {{- if kindIs "map" $claim }} + {{- if eq $name "default" }} + {{- $root = set $root "claim" $claim -}} + {{- include "common.persistentvolumeclaim" (list $root "mychart.persistentvolumeclaim") -}} + {{- else -}} + {{- $values := set $root.Values "fullnameOverride" (printf "%s-%s" (include "common.fullname" $root) $name) -}} + {{- $root = set (set $root "claim" $claim) "Values" $values -}} + {{- include "common.persistentvolumeclaim" (list $root "mychart.persistentvolumeclaim") -}} + {{- end -}} + {{- end -}} + {{- printf "\n" -}}{{- printf "\n" -}} + {{- printf "---" -}} + {{- printf "\n" -}} + {{- $_:= unset $root.Values "fullnameOverride" -}} + {{- end -}} + {{- end -}} +{{- end -}}{{- end -}} +{{- end -}} + +## No override templates are needed for the case of defining multiple PVCs +{{- define "mychart.persistentvolumeclaim" -}} +{{- end }} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_podSecurityPolicy.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_podSecurityPolicy.yaml new file mode 100644 index 0000000..c06f607 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_podSecurityPolicy.yaml @@ -0,0 +1,55 @@ +{{- define "common.podsecuritypolicy.tpl" -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +{{ template "common.metadata" . }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end -}} +{{- define "common.podsecuritypolicy" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}}{{- if ne ($top.Values.podSecurityPolicy | default false) false -}} +{{- template "common.util.merge" (append . "common.podsecuritypolicy.tpl") -}} +{{- end -}}{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_role.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_role.yaml new file mode 100644 index 0000000..cf1d6f6 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_role.yaml @@ -0,0 +1,23 @@ +{{- define "common.role.tpl" -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +{{ template "common.metadata" . }} +rules: +{{- if .Values.podSecurityPolicy | default false }} +- apiGroups: + - policy + resourceNames: + - {{ template "common.fullname" . }} + resources: + - podsecuritypolicies + verbs: + - use +{{- end }} +{{- end -}} +{{- define "common.role" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.role.tpl") -}} +{{- end -}} +{{- end -}} + diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_rolebinding.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_rolebinding.yaml new file mode 100644 index 0000000..021e896 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_rolebinding.yaml @@ -0,0 +1,19 @@ +{{- define "common.rolebinding.tpl" -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +{{ template "common.metadata" . }} +roleRef: + kind: Role + name: {{ template "common.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} +{{- define "common.rolebinding" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.rolebinding.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_secret.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_secret.yaml new file mode 100644 index 0000000..45ec55f --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_secret.yaml @@ -0,0 +1,45 @@ +{{- define "common.secret.tpl" -}} +apiVersion: v1 +kind: Secret +{{ template "common.metadata.secrets" . }} +type: Opaque +data: + {{- $root := . -}} + {{- if .Values.deployment -}} + {{- range $name, $container:= .Values.deployment }} + {{- if kindIs "map" $container }}{{- if $container.secrets }} + {{- range $key, $value := $container.secrets.stringData }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | b64enc }} + {{- else }} + {{ $key }}: {{ $value | b64enc }} + {{- end }} + {{- end }} + {{- range $key, $value := $container.secrets.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }}{{- end }} + {{- end }} + {{- end }} + {{- if .Values.secrets -}} + {{- if .Values.secrets.stringData -}} + {{- range $key, $value := .Values.secrets.stringData }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | b64enc }} + {{- else }} + {{ $key }}: {{ $value | b64enc }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.secrets.data -}} + {{- range $key, $value := .Values.secrets.data }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- define "common.secret" -}} +{{- template "common.util.merge" (append . "common.secret.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_service.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_service.yaml new file mode 100644 index 0000000..fb4a9e8 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_service.yaml @@ -0,0 +1,25 @@ +{{- define "common.service.tpl" -}} +apiVersion: v1 +kind: Service +{{ template "common.metadata" . }} + annotations: + {{- if .Values.service }}{{- if .Values.service.annotations }} + {{ include "common.annote" (dict "annotations" .Values.service.annotations "root" . ) | indent 4 }} + {{- end }}{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: {{ template "common.name" . }} + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} +{{- end -}} +{{- define "common.service" -}} +{{- $top := first . -}} +{{- if and $top.Values.service}} +{{- template "common.util.merge" (append . "common.service.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_serviceaccount.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_serviceaccount.yaml new file mode 100644 index 0000000..534a4bf --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- define "common.serviceaccount.tpl" -}} +apiVersion: v1 +kind: ServiceAccount +{{ template "common.metadata" . }} +{{- end -}} +{{- define "common.serviceaccount" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.serviceaccount.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_statefulset.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_statefulset.yaml new file mode 100644 index 0000000..04155e3 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_statefulset.yaml @@ -0,0 +1,44 @@ +{{- define "common.statefulset.tpl" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +apiVersion: apps/v1 +kind: statefulset +{{ template "common.metadata.workload" . }} +spec: +{{- if .Values.statefulset.replicas}} + replicas: {{ .Values.statefulset.replicas }} +{{- end}} + template: + metadata: + labels: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} + spec: +{{- if ne (.Values.rbacEnabled | default false) false }} + serviceAccountName: {{ template "common.fullname" . }} +{{- end }} +{{- if .Values.image }}{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }}{{- end }} + volumes: + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} +{{- if .Values.persistence }} + - {{ template "common.volume.pvc" (list (include "common.fullname" .) (include "common.fullname" .) .Values.persistence) }} +{{- end }} +{{- if .Values.configs.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} +{{- end }} + containers: + - +{{ include "common.container.tpl" (set . "container" .Values.statefulset.container ) | indent 8 }} + selector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} +{{- end -}} +{{- define "common.statefulset" -}} +{{- $top := first . -}} +{{- if and $top.Values.statefulset }} +{{- template "common.util.merge" (append . "common.statefulset.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_transformers.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_transformers.tpl new file mode 100644 index 0000000..f42e742 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_transformers.tpl @@ -0,0 +1,41 @@ +{{- define "common.transformers" -}} +{{- $fullname := include "common.fullname" . -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $release := .Release.Name -}} +{{- $commonSecretList := list "mongodbUri" "redisUri" "redisPassword" -}} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end -}}{{- end -}} +{{- if $secrets -}} +{{- range $key, $value := $secrets.stringData }} +{{- if has $key $commonSecretList -}}{{- if not (contains $name $release) }} +{{- $fullname = $release -}} +{{- end }}{{- end }} +- {{ template "common.envvar.value" (list (print $key "_FILE" | snakecase | upper) ( print "/run/secrets/qlik.com/" $fullname ( print "/" $key ) )) }} +{{- $fullname = include "common.fullname" $root -}} +{{- end }} +{{- range $key, $value := $secrets.data }} +{{- if has $key $commonSecretList -}}{{- if not (contains $name $release) }} +{{- $fullname = $release -}} +{{- end }}{{- end }} +- {{ template "common.envvar.value" (list (print $key "_FILE" | snakecase | upper) ( print "/run/secrets/qlik.com/" $fullname ( print "/" $key ) )) }} +{{- $fullname = include "common.fullname" $root -}} +{{- end }} +{{- end }} +{{ $configs := .Values.configs}} +{{- if .container }}{{- if .container.configs }} +{{ $configs = .container.configs}} +{{- end -}}{{- end -}} +{{- if $configs -}} +{{- range $key, $value := $configs.data }} +- {{ template "common.envvar.configmap" (list (print $key | snakecase | upper) $fullname $key ) }} +{{- end }} +{{- range $key, $value := $configs }} +{{- if ne $key "data" }} +- {{ template "common.envvar.value" (list (print $key | snakecase | upper) $value ) }} +{{- end }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_util.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_util.tpl new file mode 100644 index 0000000..6abeec0 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_util.tpl @@ -0,0 +1,15 @@ +{{- /* +common.util.merge will merge two YAML templates and output the result. + +This takes an array of three values: +- the top context +- the template name of the overrides (destination) +- the template name of the base (source) + +*/ -}} +{{- define "common.util.merge" -}} +{{- $top := first . -}} +{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}} +{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}} +{{- regexReplaceAll ".*: null|.*: nil" (toYaml (merge $overrides $tpl)) "${1}" -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_volume.tpl b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_volume.tpl new file mode 100644 index 0000000..360e239 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/templates/_volume.tpl @@ -0,0 +1,62 @@ +{{- define "common.volume.configmap" -}} + {{- $name := index . 0 -}} + {{- $configMapName := index . 1 -}} + + name: {{ $name }} + configMap: + name: {{ $configMapName }}-configs +{{- end -}} + +{{- define "common.volume.secret" -}} + {{- $name := index . 0 -}} + {{- $secretName := index . 1 -}} + + name: {{ $name }} + secret: + secretName: {{ $secretName }}-secrets +{{- end -}} + +{{- define "common.volume.pvc" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $claim := index . 2 -}} + + name: {{ $name }} + {{- if $claim }} + persistentVolumeClaim: + claimName: {{ $claim.existingClaim | default $claimName }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end -}} + +{{- define "common.volume.emptydir" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $persistence := index . 2 -}} + + {{- if $persistence.emptyDir }} + name: {{ $name }} + emptyDir: {} + {{- end -}} +{{- end -}} + +{{- define "common.volume.hostpath" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $persistence := index . 2 -}} + + name: {{ $name }} + hostPath: + path: {{ $persistence.path }} + type: {{ $persistence.type }} +{{- end -}} + + +{{- define "common.volume.mount" -}} +{{- $volume := index . 0 -}} +{{- $mountPath := index . 1 -}} +- name: {{ $volume }} + mountPath: {{ default "/tmp" $mountPath.mountPath | quote }} + readOnly: {{ default false $mountPath.readOnly }} +{{- end -}} diff --git a/qliksense/charts/data-connector-common/charts/qlikcommon/values.yaml b/qliksense/charts/data-connector-common/charts/qlikcommon/values.yaml new file mode 100644 index 0000000..b7cf514 --- /dev/null +++ b/qliksense/charts/data-connector-common/charts/qlikcommon/values.yaml @@ -0,0 +1,4 @@ +# Default values for commons. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value diff --git a/qliksense/charts/data-connector-common/requirements.yaml b/qliksense/charts/data-connector-common/requirements.yaml new file mode 100644 index 0000000..1fcf086 --- /dev/null +++ b/qliksense/charts/data-connector-common/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: qlikcommon + version: "1.2.4" + repository: "@qlik" + condition: global.component-common-imports diff --git a/qliksense/charts/data-connector-common/templates/manifest.yaml b/qliksense/charts/data-connector-common/templates/manifest.yaml new file mode 100644 index 0000000..23725c4 --- /dev/null +++ b/qliksense/charts/data-connector-common/templates/manifest.yaml @@ -0,0 +1,71 @@ +{{- template "common.configmap" (list . "data-connector-common.configmap") -}} +{{- define "data-connector-common.configmap" -}} +{{- end }} + +--- +{{ template "common.secret" (list . "data-connector-common.secret") -}} +{{- define "data-connector-common.secret" -}} +{{- end }} + +--- +{{ template "common.ingress" (list . "data-connector-common.ingress") -}} +{{- define "data-connector-common.ingress" -}} +spec: + rules: + - http: + paths: + - path: /customdata/64/(QlikConnectorsCommonService)/(.+) + backend: + serviceName: {{ template "common.fullname" . }} + servicePort: {{ .Values.service.webport }} +{{- end }} + +--- +{{ template "common.service" (list . "data-connector-common.service") -}} +{{- define "data-connector-common.service" -}} +spec: + ports: + - name: http + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + - name: http-web + port: {{ .Values.service.webport }} + targetPort: {{ .Values.service.webport }} + protocol: TCP + - name: grpc + port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP +{{- end }} + +--- +{{ template "common.deployment" (list . "data-connector-common.deployment") -}} +{{- define "data-connector-common.deployment" -}} +spec: + template: + spec: + containers: + - +{{ include "common.container" (list (set . "container" .Values.deployment.container) "data-connector-common.deployment.container") | indent 8 }} +{{- end }} + +{{- define "data-connector-common.deployment.container" -}} +ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP +readinessProbe: + httpGet: + path: /health + port: http +livenessProbe: + httpGet: + path: /health + port: http +{{- end }} + +--- diff --git a/qliksense/charts/data-connector-common/values.yaml b/qliksense/charts/data-connector-common/values.yaml new file mode 100644 index 0000000..5e544fa --- /dev/null +++ b/qliksense/charts/data-connector-common/values.yaml @@ -0,0 +1,54 @@ +# Default values for data-connector-common. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + ## Default registry where the repository is pulled from. + registry: ghcr.io + ## Repository. + repository: qlik-download/data-connector-common + ## data-connector-common image version. + tag: 1.5.0 + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + pullPolicy: IfNotPresent + ## Secrets for pulling images from a private docker registry. + pullSecrets: + - name: artifactory-docker-secret + +configs: + standalone: "false" + dataHost: 0.0.0.0 + serviceMode: "true" + connectors: "" + data: + dataPortRangeStart: "{{ .Values.service.grpc }}" + dataPortRangeEnd: "{{ .Values.service.grpc }}" + ingressAuthUrl: "http://{{ .Release.Name }}-edge-auth.{{ .Release.Namespace }}.svc.cluster.local:8080/v1/auth" + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 3005 + grpc: 50095 + webport: 6386 +## Metrics configuration +## Prometheus configuration +## The annotations for prometheus scraping are included + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.port }}" + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## class provides an kubernetes.io/ingress.class override of default nginx + class: nginx + annotations: + nginx.ingress.kubernetes.io/auth-signin: https://$host/login?returnto=$escaped_request_uri + nginx.ingress.kubernetes.io/rewrite-target: "/$1/$2" + +deployment: + replicas: 1 diff --git a/qliksense/charts/data-connector-nfs/.helmignore b/qliksense/charts/data-connector-nfs/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/data-connector-nfs/Chart.yaml b/qliksense/charts/data-connector-nfs/Chart.yaml new file mode 100644 index 0000000..1ef2314 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +description: This service implements a new connector which will provide engine access + to the files in one or more configurable mounted disk volumes. +home: https://www.qlik.com +keywords: +- data-connector-nfs +name: data-connector-nfs +sources: +- https://github.com/qlik-trial/data-connector-nfs +version: 1.2.1 diff --git a/qliksense/charts/data-connector-nfs/README.md b/qliksense/charts/data-connector-nfs/README.md new file mode 100644 index 0000000..22bcd88 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/README.md @@ -0,0 +1,183 @@ + +# Data-Connector-Nfs + +[data-connector-nfs](https://github.com/qlik-trial/data-connector-nfs) provides data files upload and management api. + +## Introduction + +This chart bootstraps a data-connector-nfs service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/data-connector-nfs +``` + +The command deploys `data-connector-nfs` on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the `data-connector-nfs` chart and their default values. + +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | ---------------------------------------------------------- | +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil` | +| `image.registry` | The default registry where the repositories are pulled from | `qliktech-docker.jfrog.io` | +| `image.repository` | image name with no registry | `data-connector-nfs` | +| `image.tag` | image version | `0.0.12` | +| `image.pullPolicy` | image pull policy | `Always` if `image.tag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | a list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `replicaCount` | number of replicas | `1` | +| `service.type` | service type | `ClusterIP` | +| `service.port` | data-connector-nfs listen port | `8080` | +| `resources.requests.cpu` | CPU reservation | `0.1` | +| `resources.requests.memory` | Memory reservation | `128Mi` | +| `resources.limits.cpu` | CPU limit | `0.5` | +| `resources.limits.memory` | Memory limit | `512Mi` | +| `metrics.prometheus.enabled` | enable annotations for prometheus scraping | `true` | +| `configs.data.keysUri` | URI where the JWKS to validate JWTs is located | `http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal` | +| `secrets.stringData.tokenAuthPrivateKey` | The private key that corresponds to the JWKS in the authentication service | See [values](./values.yaml) | +| `secrets.stringData.tokenAuthPrivateKeyId` | The key ID that corresponds to the JWKS in the authentication service | `zpiZ-klS65lfcq1K0-o29Sa0AAZYYr4ON_1VCtAbMEA` | +| `configs.data.tokenAuthUri` | The URI to the authentication service to get an internal token | `http://{{ .Release.Name }}-edge-auth:8080` | +| `persistence.enabled` | Configure persistent volume claims for NFS connections | `false` | +| `configs.data.spacesUri` | URI where the Spaces service is located | `http://{.Release.Name}-spaces:6080` | +| `configs.data.pdsUri` | URI where the Policy Decisions service is located | `http://{.Release.Name}-policy-decisions:5080` | +| `hpa.enabled` | Toggle horizontal pod autoscaler. | `false` | +| `hpa.minReplicas` | min replicas for pod autoscaler. | `3` | +| `hpa.maxReplicas` | max replicas for pod autoscaler. | `6` | +| `hpa.targetAverageUtilizationCpu` | Cpu utilization target for pod autoscaler. | `80` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install --name my-release -f values.yaml qlik/data-connector-nfs +``` + +## Configuring Volume Mounts and Declaring NFS Connections +### Persistent Volumes +Persistent volumes are created using standard Kubernetes manifests. The type of storage provisioners that are allowed are dependent on the Kubernetes environment that QSE is being deployed to, as each environment has it's own set of supported provisioners. Many Kubernetes environments have NFS, or similar networked drive, provisioners. The requirement, from the NFS connector side of things, is that and persistent volume must support either the ReadWriteMany or ReadOnlyMany access modes. The folowing is an example of declaring a persistent volume using the docker 'hostpath' provisioner: + +``` +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: localstorage +provisioner: docker.io/hostpath +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-connector-nfs-test-pv + labels: + type: local +spec: + storageClassName: localstorage + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + # This is the path to the directory on your laptop that you want + # to give the NFS connector access to. + path: "/some/data/directory" + type: "Directory" +``` + +The manifest gets deployed using the ```kubectl apply -f manifest.yaml``` command. + +### Persistent Volume Claims +Persistent volume claims which bind to the previously declared peristent volumes are defined in the values.yaml override file used to configure the data-connector-nfs pods. The claims are defined in the ```persistence``` section of the values.yaml file. Here is an example of declaring 2 persistent volume claims: + +``` +persistence: + enabled: true + pvc-1: + storageClass: localstorage + accessMode: ReadWriteMany + size: 5Gi + pvc-2: + storageClass: localstorage + accessMode: ReadOnlyMany + size: 5Gi +``` + +The claim names (in this example, pvc-1, pvc-2) can be whatever you want them to be. What is important is that the properties declared for these PVCs (storageClass, accessMode, etc.) match to properties in the persistent volumes that were declared previously. This property matching is used by Kubernetes to bind the PVCs to the existing PVs. + +### Volume Mounts +Next, the PVCs declared in the previous section need to get mounted into the data-connector-nfs pod containers. This is also done in the values.yaml override file in the ```deployment``` section. Here is an example of declaring 2 volume mounts which mount in the above 2 PVCs: + +``` +deployment: + container: + volumeMounts: + pvc-1: + mountPath: /tmp/MyReadWriteDir/ + pvc-2: + readOnly: true + mountPath: /tmp/MyReadOnlyDir/ +``` + +Here, the volume mount names (pvc-1, pvc-2) must match the PVC names declared in the previous section. The mountPath can be any directory, but using subdirectories of /tmp/ is a reasonable convetion to use. Declaring a volume mount as readOnly will allow the connector to enforce the read-only-ness of this directory. + +### NFS Connection Delcarations +The NFS connections are defined at deployment time. IE, users do not create these connections. Each connection is declared to live in a shared space. The permissons on the shared space define the access that users have to the NFS connections within that space. NFS connections are also defined in the values.yaml override file in the ```configs``` section. Here is an example of declaring 2 NFS connections which provide access to the volume mounts defined in the previous section: + +``` +configs: + data: + nfsConnections_0_Name: "NfsConnection1" + nfsConnections_0_Path: "/tmp/MyReadWriteDir" + nfsConnections_0_SpaceId: "5e5422dcb6dfec00014ffaea" + nfsConnections_1_Name: "NfsConnection2" + nfsConnections_1_Path: "/tmp/MyReadOnlyDir/SomeSubDir" + nfsConnections_1_SpaceId: "5e5422dcb6dfec00014ffaea" + +``` + +Each connection is defined by 3 properties: Name, Path, and SpaceId. The space ID defines which shared space that the NFS connection lives in. In this example, both NFS connections reside in the same space. The Path must be the root directory of one of the volume mounts declared in the previous section, or a subdirectory of one of those root directories. It is possible to define multiple NFS connections which point to multiple subdirectories within the same volume mount root directory. The 3 properties for each NFS connection declaration are grouped together using an ascending numerical index for each connection that is declared (in this example, 0 & 1). + +### Update DCaaS config to make sure it recognizes the NFS Connector +The dcaas service may not be recognizing the data-connector-nfs service be default. If this is the case, then it's configuration needs to be updated to recognize it using a values.yaml override file in the ```env``` section. Here is an example of this that updates dcaas in a qsefe deployment: + +``` +dcaas: + env: + connector_service: "{{ .Release.Name }}-data-connector-rest-rld:{{ .Release.Name }}-data-connector-rest-cmd:50060 {{ .Release.Name }}-data-connector-qwc-rld:{{ .Release.Name }}-data-connector-qwc-cmd:50060 {{ .Release.Name }}-data-connector-odbc-rld:{{ .Release.Name }}-data-connector-odbc-cmd:50060 {{ .Release.Name }}-data-connector-sap-sql-rld:{{ .Release.Name }}-data-connector-sap-sql-cmd:50060 {{ .Release.Name }}-qix-datafiles:50051 {{ .Release.Name }}-data-connector-nfs:50051" +``` + +It can be seen that a pointer to data-connector-nfs running on port 50051 was added to the end of the `connector_service` list. + +DCaaS also requires that a data-connector-nfs feature flag is enabled in order to recognize the connector. In a qsefe deployment, this feature flag would be enabled as follows: + +``` +feature-flags: + configmaps: + create: true + featureFlagsConfig: + { + "globalFeatures": { + "data-connector-nfs": true, + ... + } + } +``` + +If you run into any issues with configuring dcaas, here is a good guide for troubleshooting: . + +In order for the changes to the values.yaml file to be applied, the data-connector-nfs and dcaas helm charts (either standalone, or as part of the qsefe chart-of-charts) need to be redepolyed (using the ```helm upgrade --install``` command). The delared NFS connections will then be available in both the Data Load Editor and Data Manager in QSE. + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/.helmignore b/qliksense/charts/data-connector-nfs/charts/qlikcommon/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/Chart.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/Chart.yaml new file mode 100644 index 0000000..4a92272 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +appVersion: 1.0.14 +description: Qlik resource contract chartbuilding components and helpers +home: https://github.com/qlik-trial/resource-contract +maintainers: +- email: boris.kuschel@qlik.com + name: bkuschel +name: qlikcommon +version: 1.2.5 diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/README.md b/qliksense/charts/data-connector-nfs/charts/qlikcommon/README.md new file mode 100644 index 0000000..664b529 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/README.md @@ -0,0 +1,837 @@ +# Qlik Common + +This chart is based off of the Common helper chart hosts in the kubernetes incubator +helm chart repo. Documentation below. + +## Common: The Helm Helper Chart + +This chart is designed to make it easier for you to build and maintain Helm +charts. + +It provides utilities that reflect best practices of Kubernetes chart development, +making it faster for you to write charts. + +## Tips + +A few tips for working with Common: + +- Be careful when using functions that generate random data (like `common.fullname.unique`). + They may trigger unwanted upgrades or have other side effects. + +In this document, we use `RELEASE-NAME` as the name of the release. + +## Resource Kinds + +Kubernetes defines a variety of resource kinds, from `Secret` to `StatefulSet`. +We define some of the most common kinds in a way that lets you easily work with +them. + +The resource kind templates are designed to make it much faster for you to +define _basic_ versions of these resources. They allow you to extend and modify +just what you need, without having to copy around lots of boilerplate. + +To make use of these templates you must define a template that will extend the +base template (though it can be empty). The name of this template is then passed +to the base template, for example: + +```yaml +{{- template "common.service" (list . "mychart.service") -}} +{{- define "mychart.service" -}} +## Define overrides for your Service resource here, e.g. +# metadata: +# labels: +# custom: label +# spec: +# ports: +# - port: 8080 +{{- end -}} +``` + +Note that the `common.service` template defines two parameters: + + - The root context (usually `.`) + - A template name containing the service definition overrides + +A limitation of the Go template library is that a template can only take a +single argument. The `list` function is used to workaround this by constructing +a list or array of arguments that is passed to the template. + +The `common.service` template is responsible for rendering the templates with +the root context and merging any overrides. As you can see, this makes it very +easy to create a basic `Service` resource without having to copy around the +standard metadata and labels. + +Each implemented base resource is described in greater detail below. + +### `common.service` + +The `common.service` template creates a basic `Service` resource with the +following defaults: + +- Service type (ClusterIP, NodePort, LoadBalancer) made configurable by `.Values.service.type` +- Named port `http` configured on port 80 +- Selector set to `app: {{ template "common.name" }}, release: {{ .Release.Name | quote }}` to match the default used in the `Deployment` resource + +Example template: + +```yaml +{{- template "common.service" (list . "mychart.mail.service") -}} +{{- define "mychart.mail.service" -}} +metadata: + name: {{ template "common.fullname" . }}-mail # overrides the default name to add a suffix + labels: # appended to the labels section + protocol: mail +spec: + ports: # composes the `ports` section of the service definition. + - name: smtp + port: 25 + targetPort: 25 + - name: imaps + port: 993 + targetPort: 993 + selector: # this is appended to the default selector + protocol: mail +{{- end -}} +--- +{{ template "common.service" (list . "mychart.web.service") -}} +{{- define "mychart.web.service" -}} +metadata: + name: {{ template "common.fullname" . }}-www # overrides the default name to add a suffix + labels: # appended to the labels section + protocol: www +spec: + ports: # composes the `ports` section of the service definition. + - name: www + port: 80 + targetPort: 8080 +{{- end -}} +``` + +The above template defines _two_ services: a web service and a mail service. + +The most important part of a service definition is the `ports` object, which +defines the ports that this service will listen on. Most of the time, +`selector` is computed for you. But you can replace it or add to it. + +The output of the example above is: + +```yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app: service + chart: service-0.1.0 + heritage: Tiller + protocol: mail + release: release-name + name: release-name-service-mail +spec: + ports: + - name: smtp + port: 25 + targetPort: 25 + - name: imaps + port: 993 + targetPort: 993 + selector: + app: service + release: release-name + protocol: mail + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: service + chart: service-0.1.0 + heritage: Tiller + protocol: www + release: release-name + name: release-name-service-www +spec: + ports: + - name: www + port: 80 + targetPort: 8080 + type: ClusterIP +``` + +## `common.deployment` + +The `common.deployment` template defines a basic `Deployment`. Underneath the +hood, it uses `common.container` (see next section). + +By default, the pod template within the deployment defines the labels `app: {{ template "common.name" . }}` +and `release: {{ .Release.Name | quote }` as this is also used as the selector. The +standard set of labels are not used as some of these can change during upgrades, +which causes the replica sets and pods to not correctly match. + +Example use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +## Define overrides for your Deployment resource here, e.g. +spec: + replicas: {{ .Values.replicaCount }} +{{- end -}} +``` + +## `common.container` + +The `common.container` template creates a basic `Container` spec to be used +within a `Deployment` or `ReplicaSet`. It holds the following defaults: + +- The name is set to `main` +- Uses `.Values.image` to describe the image to run, with the following spec: + ```yaml + image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + ``` +- Exposes the named port `http` as port 80 +- Lays out the compute resources using `.Values.resources` + +Example use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +## Define overrides for your Deployment resource here, e.g. +spec: + template: + spec: + containers: + - +{{ include "common.container" (list (set . "container" .Values.deployment.container) "mychart.deployment.container") | indent 8}} +{{- end -}} +{{- define "mychart.deployment.container" -}} +## Define overrides for your Container here, e.g. +livenessProbe: + httpGet: + path: / + port: 80 +readinessProbe: + httpGet: + path: / + port: 80 +{{- end -}} +``` + +The above example creates a `Deployment` resource which makes use of the +`common.container` template to populate the PodSpec's container list. The usage +of this template is similar to the other resources, you must define and +reference a template that contains overrides for the container object. + +The most important part of a container definition is the image you want to run. +As mentioned above, this is derived from `.Values.image` by default. It is a +best practice to define the image, tag and pull policy in your charts' values as +this makes it easy for an operator to change the image registry, or use a +specific tag or version. Another example of configuration that should be exposed +to chart operators is the container's required compute resources, as this is +also very specific to an operators environment. An example `values.yaml` for +your chart could look like: + +```yaml +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +``` + +The output of running the above values through the earlier template is: + +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: deployment + chart: deployment-0.1.0 + heritage: Tiller + release: release-name + name: release-name-deployment +spec: + template: + metadata: + labels: + app: deployment + spec: + containers: + - image: nginx:stable + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: / + port: 80 + name: deployment + ports: + - containerPort: 80 + name: http + readinessProbe: + httpGet: + path: / + port: 80 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +``` + +## `common.configmap` + +The `common.configmap` template creates an empty `ConfigMap` resource that you +can override with your configuration. + +Example use: + +```yaml +{{- template "common.configmap" (list . "mychart.configmap") -}} +{{- define "mychart.configmap" -}} +data: + zeus: cat + athena: cat + julius: cat + one: |- + {{ .Files.Get "file1.txt" }} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +data: + athena: cat + julius: cat + one: This is a file. + zeus: cat +kind: ConfigMap +metadata: + labels: + app: configmap + chart: configmap-0.1.0 + heritage: Tiller + release: release-name + name: release-name-configmap +``` + +## `common.secret` + +The `common.secret` template creates an empty `Secret` resource that you +can override with your secrets. + +Example use: + +```yaml +{{- template "common.secret" (list . "mychart.secret") -}} +{{- define "mychart.secret" -}} +data: + zeus: {{ print "cat" | b64enc }} + athena: {{ print "cat" | b64enc }} + julius: {{ print "cat" | b64enc }} + one: |- + {{ .Files.Get "file1.txt" | b64enc }} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +data: + athena: Y2F0 + julius: Y2F0 + one: VGhpcyBpcyBhIGZpbGUuCg== + zeus: Y2F0 +kind: Secret +metadata: + labels: + app: secret + chart: secret-0.1.0 + heritage: Tiller + release: release-name + name: release-name-secret +type: Opaque +``` + +## `common.ingress` + +The `common.ingress` template is designed to give you a well-defined `Ingress` +resource, that can be configured using `.Values.ingress`. An example values file +that can be used to configure the `Ingress` resource is: + +```yaml +ingress: + hosts: + - chart-example.local + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + tls: + - secretName: chart-example-tls + hosts: + - chart-example.local +``` + +Example use: + +```yaml +{{- template "common.ingress" (list . "mychart.ingress") -}} +{{- define "mychart.ingress" -}} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + labels: + app: ingress + chart: ingress-0.1.0 + heritage: Tiller + release: release-name + name: release-name-ingress +spec: + rules: + - host: chart-example.local + http: + paths: + - backend: + serviceName: release-name-ingress + servicePort: 80 + path: / + tls: + - hosts: + - chart-example.local + secretName: chart-example-tls +``` + +## `common.persistentvolumeclaim` + +`common.persistentvolumeclaim` can be used to easily add a +`PersistentVolumeClaim` resource to your chart that can be configured using +`.Values.persistence`: + +| Value | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------- | +| persistence.enabled | Whether or not to claim a persistent volume. If false, `common.volume.pvc` will use an emptyDir instead | +| persistence.storageClass | `StorageClass` name | +| persistence.accessMode | Access mode for persistent volume | +| persistence.size | Size of persistent volume | +| persistence.existingClaim | If defined, `PersistentVolumeClaim` is not created and `common.volume.pvc` helper uses this claim | + +An example values file that can be used to configure the +`PersistentVolumeClaim` resource is: + +```yaml +persistence: + enabled: true + storageClass: fast + accessMode: ReadWriteOnce + size: 8Gi +``` + +Example use: + +```yaml +{{- template "common.persistentvolumeclaim" (list . "mychart.persistentvolumeclaim") -}} +{{- define "mychart.persistentvolumeclaim" -}} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app: persistentvolumeclaim + chart: persistentvolumeclaim-0.1.0 + heritage: Tiller + release: release-name + name: release-name-persistentvolumeclaim +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + storageClassName: "fast" +``` + +## Partial API Objects + +When writing Kubernetes resources, you may find the following helpers useful to +construct parts of the spec. + +### EnvVar + +Use the EnvVar helpers within a container spec to simplify specifying key-value +environment variables or referencing secrets as values. + +Example Use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +spec: + template: + spec: + containers: + - {{ template "common.container" (list . "mychart.deployment.container") }} +{{- end -}} +{{- define "mychart.deployment.container" -}} +{{- $fullname := include "common.fullname" . -}} +env: +- {{ template "common.envvar.value" (list "ZEUS" "cat") }} +- {{ template "common.envvar.secret" (list "ATHENA" "secret-name" "athena") }} +{{- end -}} +``` + +Output: + +```yaml +... + spec: + containers: + - env: + - name: ZEUS + value: cat + - name: ATHENA + valueFrom: + secretKeyRef: + key: athena + name: secret-name +... +``` + +### Volume + +Use the Volume helpers within a `Deployment` spec to help define ConfigMap and +PersistentVolumeClaim volumes. + +Example Use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +spec: + template: + spec: + volumes: + - {{ template "common.volume.configMap" (list "config" "configmap-name") }} + - {{ template "common.volume.pvc" (list "data" "pvc-name" .Values.persistence) }} +{{- end -}} +``` + +Output: + +```yaml +... + spec: + volumes: + - configMap: + name: configmap-name + name: config + - name: data + persistentVolumeClaim: + claimName: pvc-name +... +``` + +The `common.volume.pvc` helper uses the following configuration from the `.Values.persistence` object: + +| Value | Description | +| ------------------------- | ----------------------------------------------------- | +| persistence.enabled | If false, creates an `emptyDir` instead | +| persistence.existingClaim | If set, uses this instead of the passed in claim name | + +## Utilities + +### `common.fullname` + +The `common.fullname` template generates a name suitable for the `name:` field +in Kubernetes metadata. It is used like this: + +```yaml +name: {{ template "common.fullname" . }} +``` + +The following different values can influence it: + +```yaml +# By default, fullname uses '{{ .Release.Name }}-{{ .Chart.Name }}'. This +# overrides that and uses the given string instead. +fullnameOverride: "some-name" + +# This adds a prefix +fullnamePrefix: "pre-" +# This appends a suffix +fullnameSuffix: "-suf" + +# Global versions of the above +global: + fullnamePrefix: "pp-" + fullnameSuffix: "-ps" +``` + +Example output: + +```yaml +--- +# with the values above +name: pp-pre-some-name-suf-ps + +--- +# the default, for release "happy-panda" and chart "wordpress" +name: happy-panda-wordpress +``` + +Output of this function is truncated at 54 characters, which leaves 9 additional +characters for customized overriding. Thus you can easily extend this name +in your own charts: + +```yaml +{{- define "my.fullname" -}} + {{ template "common.fullname" . }}-my-stuff +{{- end -}} +``` + +### `common.fullname.unique` + +The `common.fullname.unique` variant of fullname appends a unique seven-character +sequence to the end of the common name field. + +This takes all of the same parameters as `common.fullname` + +Example template: + +```yaml +uniqueName: {{ template "common.fullname.unique" . }} +``` + +Example output: + +```yaml +uniqueName: release-name-fullname-jl0dbwx +``` + +It is also impacted by the prefix and suffix definitions, as well as by +`.Values.fullnameOverride` + +Note that the effective maximum length of this function is 63 characters, not 54. + +### `common.name` + +The `common.name` template generates a name suitable for the `app` label. It is used like this: + +```yaml +app: {{ template "common.name" . }} +``` + +The following different values can influence it: + +```yaml +# By default, name uses '{{ .Chart.Name }}'. This +# overrides that and uses the given string instead. +nameOverride: "some-name" + +# This adds a prefix +namePrefix: "pre-" +# This appends a suffix +nameSuffix: "-suf" + +# Global versions of the above +global: + namePrefix: "pp-" + nameSuffix: "-ps" +``` + +Example output: + +```yaml +--- +# with the values above +name: pp-pre-some-name-suf-ps + +--- +# the default, for chart "wordpress" +name: wordpress +``` + +Output of this function is truncated at 54 characters, which leaves 9 additional +characters for customized overriding. Thus you can easily extend this name +in your own charts: + +```yaml +{{- define "my.name" -}} + {{ template "common.name" . }}-my-stuff +{{- end -}} +``` + +### `common.metadata` + +The `common.metadata` helper generates the `metadata:` section of a Kubernetes +resource. + +This takes three objects: + - .top: top context + - .fullnameOverride: override the fullname with this name + - .metadata + - .labels: key/value list of labels + - .annotations: key/value list of annotations + - .hook: name(s) of hook(s) + +It generates standard labels, annotations, hooks, and a name field. + +Example template: + +```yaml +{{ template "common.metadata" (dict "top" . "metadata" .Values.bio) }} +--- +{{ template "common.metadata" (dict "top" . "metadata" .Values.pet "fullnameOverride" .Values.pet.fullnameOverride) }} +``` + +Example values: + +```yaml +bio: + name: example + labels: + first: matt + last: butcher + nick: technosophos + annotations: + format: bio + destination: archive + hook: pre-install + +pet: + fullnameOverride: Zeus + +``` + +Example output: + +```yaml +metadata: + name: release-name-metadata + labels: + app: metadata + heritage: "Tiller" + release: "RELEASE-NAME" + chart: metadata-0.1.0 + first: "matt" + last: "butcher" + nick: "technosophos" + annotations: + "destination": "archive" + "format": "bio" + "helm.sh/hook": "pre-install" +--- +metadata: + name: Zeus + labels: + app: metadata + heritage: "Tiller" + release: "RELEASE-NAME" + chart: metadata-0.1.0 + annotations: +``` + +Most of the common templates that define a resource type (e.g. `common.configmap` +or `common.job`) use this to generate the metadata, which means they inherit +the same `labels`, `annotations`, `nameOverride`, and `hook` fields. + +### `common.labelize` + +`common.labelize` turns a map into a set of labels. + +Example template: + +```yaml +{{- $map := dict "first" "1" "second" "2" "third" "3" -}} +{{- template "common.labelize" $map -}} +``` + +Example output: + +```yaml +first: "1" +second: "2" +third: "3" +``` + +### `common.labels.standard` + +`common.labels.standard` prints the standard set of labels. + +Example usage: + +``` +{{ template "common.labels.standard" . }} +``` + +Example output: + +```yaml +app: labelizer +heritage: "Tiller" +release: "RELEASE-NAME" +chart: labelizer-0.1.0 +``` + +### `common.hook` + +The `common.hook` template is a convenience for defining hooks. + +Example template: + +```yaml +{{ template "common.hook" "pre-install,post-install" }} +``` + +Example output: + +```yaml +"helm.sh/hook": "pre-install,post-install" +``` + +### `common.chartref` + +The `common.chartref` helper prints the chart name and version, escaped to be +legal in a Kubernetes label field. + +Example template: + +```yaml +chartref: {{ template "common.chartref" . }} +``` + +For the chart `foo` with version `1.2.3-beta.55+1234`, this will render: + +```yaml +chartref: foo-1.2.3-beta.55_1234 +``` + +(Note that `+` is an illegal character in label values) diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_certificates.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_certificates.tpl new file mode 100644 index 0000000..d385098 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_certificates.tpl @@ -0,0 +1,32 @@ +{{- define "common.ca-certificates.volume" -}} +{{- if .Values.certs }} +{{- if .Values.global }}{{- if .Values.global.certs }} +{{- if .Values.global.certs.volume }} +- name: ca-certificates + {{- if .Values.global.certs.volume.hostPath }} + hostPath: + path: {{ .Values.global.certs.volume.hostPath }} + type: Directory + {{- end }} + {{- if .Values.global.certs.volume.existingVolumeClaim }} + persistentVolumeClaim: + claimName: {{ .Values.global.certs.volume.existingVolumeClaim }} + {{- end }} +{{- else }} +- name: ca-certificates + persistentVolumeClaim: + claimName: {{ .Release.Name }}-certs-pvc +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "common.ca-certificates.volumeMount" -}} +{{- if .Values.certs }} +{{- if .Values.global }}{{- if .Values.global.certs }} +- name: ca-certificates + mountPath: {{ default "/etc/ssl/certs" .Values.certs.mountPath | quote }} + readOnly: true +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_chartref.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_chartref.tpl new file mode 100644 index 0000000..e6c1486 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_chartref.tpl @@ -0,0 +1,14 @@ +{{- /* +common.chartref prints a chart name and version. + +It does minimal escaping for use in Kubernetes labels. + +Example output: + + zookeeper-1.2.3 + wordpress-3.2.1_20170219 + +*/ -}} +{{- define "common.chartref" -}} + {{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_configmap.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_configmap.yaml new file mode 100644 index 0000000..f04def2 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_configmap.yaml @@ -0,0 +1,32 @@ +{{- define "common.configmap.tpl" -}} +apiVersion: v1 +kind: ConfigMap +{{ template "common.metadata.configs" . }} +data: + {{- $root := . -}} + {{- if .Values.deployment -}} + {{- range $name, $container:= .Values.deployment }} + {{- if kindIs "map" $container }}{{- if $container.configs }} + {{- range $key, $value := $container.configs.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | quote }} + {{- else }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }}{{- end }} + {{- end }} + {{- end }} + {{- if .Values.configs -}} + {{- range $key, $value := .Values.configs.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | quote }} + {{- else }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- define "common.configmap" -}} +{{- template "common.util.merge" (append . "common.configmap.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_container.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_container.yaml new file mode 100644 index 0000000..2f69e01 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_container.yaml @@ -0,0 +1,98 @@ +{{- define "common.container.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +name: {{ include "common.name" . }} +image: {{ include "common.image" (set . "container" .container ) }} +{{- $pullPolicy := .Values.image.pullPolicy }} +{{- if .container }}{{- if .container.image }}{{- if .container.image.pullPolicy }} + {{- $pullPolicy = .container.image.pullPolicy }} +{{- end }}{{- end }}{{- end }} +imagePullPolicy: {{ $pullPolicy }} +{{- if or .Values.configs .Values.secrets }} +env: +{{- if .Values.configs.data.natsUri }} + - name: NATS_CLIENT_ID + valueFrom: + fieldRef: + fieldPath: metadata.name +{{- end }} +{{ include "common.transformers" (set . "container" .container ) | indent 2 }} +{{- end }} +ports: +{{- $port := .Values.service.port }} +{{- if .container }}{{- if .container.port }} + {{- $port = .container.port }} +{{- end }}{{- end }} +- containerPort: {{ $port }} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end }}{{- end }} +{{- $hasSecret := false -}} +{{- if $secrets.stringData }} +{{- $hasSecret = true -}} +{{- end }} +{{- if $secrets.data }} +{{- $hasSecret = true -}} +{{- end }} +{{- $hasVolumeMounts := false -}} +{{- if $root.Values.persistence }}{{- if $root.Values.persistence.enabled }}{{- if .container }}{{- if .container.volumeMounts }} +{{- $hasVolumeMounts = true -}} +{{- end -}}{{- end -}}{{- end -}}{{- end -}} +{{- if or $hasSecret $hasVolumeMounts }} +volumeMounts: +{{- end }} +{{- if $hasSecret }} +{{- if contains $name .Release.Name }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +{{- else }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +- name: {{ .Release.Name }}-secrets + mountPath: "/run/secrets/qlik.com/{{ .Release.Name }}" + readOnly: true +{{- end }} +{{- end }} +{{- if $hasVolumeMounts }} +{{- range $key, $val:= .container.volumeMounts }} +{{- if kindIs "map" $val }} +{{- if eq $key "default" }} +{{ include "common.volume.mount" (list (include "common.fullname" $root) $root.Values.deployment.container.volumeMounts.mountPath) }} +{{- else }} +{{ include "common.volume.mount" (list (printf "%s-%s" (include "common.fullname" $root) $key) $val) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- include "common.ca-certificates.volumeMount" . }} +{{- $probes := .Values.probes }} +{{- if .container }}{{- if .container.probes }} + {{- $probes = .container.probes }} +{{- end }}{{- end }} +livenessProbe: + httpGet: + path: /health + port: {{ $port }} +{{- if $probes }} +{{ toYaml $probes | indent 2 }} +{{- end }} +readinessProbe: + httpGet: + path: /ready + port: {{ $port }} +{{- if $probes }} +{{ toYaml $probes | indent 2 }} +{{- end }} +{{- if .container }}{{- if .container.resources }} +resources: +{{ toYaml .container.resources | indent 2 }} +{{- end -}}{{- end -}} +{{- end -}} +{{- define "common.container" -}} +{{- /* clear new line so indentation works correctly */ -}} +{{- println "" -}} +{{- include "common.util.merge" (append . "common.container.tpl") | indent 8 -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_deployment.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_deployment.yaml new file mode 100644 index 0000000..0a46af0 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_deployment.yaml @@ -0,0 +1,93 @@ +{{- define "common.deployment.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +apiVersion: apps/v1 +kind: Deployment +{{ template "common.metadata.workload" . }} +spec: +{{- if .Values.deployment.replicas}} + replicas: {{ .Values.deployment.replicas }} +{{- end}} + template: + metadata: + annotations: + checksum/configs: {{ (print (include "common.configmap.tpl" .)) | sha256sum }} + checksum/secrets: {{ (print (include "common.secret.tpl" .)) | sha256sum }} +{{- if .Values.deployment }}{{- if .Values.deployment.annotations }} +{{ include "common.annote" (dict "annotations" .Values.deployment.annotations "root" . ) | indent 8 }} +{{- end }}{{- end }} + labels: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} +{{- if .Values.deployment }}{{- if .Values.deployment.labels }} +{{ include "common.labelize" .Values.deployment.labels | indent 8 }} +{{- end }}{{- end }} +{{- if .Values.configs }}{{- if .Values.configs.data }}{{- if .Values.configs.data.natsUri }} + {{ tpl .Values.configs.data.natsUri . | regexFind "//.*:" | trimAll ":" | trimAll "/" }}: "true" +{{- end }}{{- end }}{{- end }} + spec: +{{- if ne (.Values.rbacEnabled | default false) false }} + serviceAccountName: {{ template "common.fullname" . }} +{{- end }} +{{- if .Values.image }}{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }}{{- end }} + volumes: +{{- if contains $name .Release.Name }} + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} +{{- else }} + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} + - {{ template "common.volume.secret" (list (printf "%s-secrets" (.Release.Name)) (printf "%s" (.Release.Name))) }} +{{- end }} + +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.persistentVolumeClaim }} +{{- range $name, $claim:= .Values.persistence.persistentVolumeClaim }} +{{- if kindIs "map" $claim }} +{{- if eq $name "default" }} + - {{ template "common.volume.pvc" (list (include "common.fullname" $root) (include "common.fullname" $root) $claim) }} +{{- else }} + - {{ template "common.volume.pvc" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $claim) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.hostPath }} +{{- range $name, $hostPath:= .Values.persistence.hostPath }} +{{- if kindIs "map" $hostPath }} + - {{ template "common.volume.hostpath" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $hostPath) }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.emptyDir }} +{{- range $name, $dir:= .Values.persistence.emptyDir }} +{{- if kindIs "map" $dir }} +{{- if $dir.create }} + - {{ template "common.volume.emptydir" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $root.Values.persistence) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{ include "common.ca-certificates.volume" . | nindent 6 }} +{{- if .Values.configs }}{{- if .Values.configs.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} +{{- end }}{{- end }} +{{- if .Values.deployment }}{{- if .Values.deployment.initContainer }} + initContainers: + - +{{ include "common.initContainer.tpl" (set . "container" .Values.deployment.initContainer ) | indent 8 }} +{{- end }}{{- end }} + containers: + - +{{ include "common.container.tpl" (set . "container" .Values.deployment.container ) | indent 8 }} + selector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} +{{- end -}} +{{- define "common.deployment" -}} +{{- $top := first . -}} +{{- if and $top.Values.deployment }} +{{- template "common.util.merge" (append . "common.deployment.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_envvar.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_envvar.tpl new file mode 100644 index 0000000..39a997a --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_envvar.tpl @@ -0,0 +1,32 @@ +{{- define "common.envvar.value" -}} + {{- $name := index . 0 -}} + {{- $value := index . 1 -}} + + name: {{ $name }} + value: {{ default "" $value | quote }} +{{- end -}} + +{{- define "common.envvar.configmap" -}} + {{- $name := index . 0 -}} + {{- $configMapName := index . 1 -}} + {{- $configMapKey := index . 2 -}} + + name: {{ $name }} + valueFrom: + configMapKeyRef: + name: {{ $configMapName }}-configs + key: {{ $configMapKey }} +{{- end -}} + +{{- define "common.envvar.secret" -}} + {{- $name := index . 0 -}} + {{- $secretName := index . 1 -}} + {{- $secretKey := index . 2 -}} + + name: {{ $name }} + valueFrom: + secretKeyRef: + name: {{ $secretName }}-secrets + key: {{ $secretKey }} +{{- end -}} + diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_fullname.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_fullname.tpl new file mode 100644 index 0000000..0f6bc77 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_fullname.tpl @@ -0,0 +1,42 @@ +{{- /* +fullname defines a suitably unique name for a resource by combining +the release name and the chart name. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.fullnameOverride: Replaces the computed name with this given name +- .Values.fullnamePrefix: Prefix +- .Values.global.fullnamePrefix: Global prefix +- .Values.fullnameSuffix: Suffix +- .Values.global.fullnameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "common.fullname" . -}}"' +*/ -}} +{{- define "common.fullname" -}} + {{- $global := default (dict) .Values.global -}} + {{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- $name := default .Chart.Name .Values.nameOverride -}} + {{- if contains $name .Release.Name -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{- /* +common.fullname.unique adds a random suffix to the unique name. + +This takes the same parameters as common.fullname + +*/ -}} +{{- define "common.fullname.unique" -}} + {{ template "common.fullname" . }}-{{ randAlphaNum 7 | lower }} +{{- end }} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_hpa.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_hpa.yaml new file mode 100644 index 0000000..be4215d --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_hpa.yaml @@ -0,0 +1,31 @@ +{{- define "common.hpa.tpl" -}} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +{{ template "common.metadata" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "common.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: +{{ if .Values.hpa.targetAverageUtilizationCpu }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationCpu }} +{{- end }} +{{ if .Values.hpa.targetAverageUtilizationMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationMemory }} +{{- end }} +{{- end -}} +{{- define "common.hpa" -}} +{{- $top := first . -}} +{{- if and $top.Values.hpa }} +{{- template "common.util.merge" (append . "common.hpa.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_image.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_image.tpl new file mode 100644 index 0000000..6a2335a --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_image.tpl @@ -0,0 +1,21 @@ +{{/* Return the proper collections image name */}} +{{- define "common.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{ $image := .Values.image }} + {{- if .container }}{{- if .container.image }} + {{ $image = .container.image }} + {{- end -}}{{- end -}} + {{- $registry := default "docker.io" (default .Values.image.registry $image.registry) -}} + {{- $repository := $image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default .Values.image.tag $image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_ingress.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_ingress.yaml new file mode 100644 index 0000000..ab9a75d --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_ingress.yaml @@ -0,0 +1,49 @@ +{{- define "common.ingress.tpl" -}} +apiVersion: extensions/v1beta1 +kind: Ingress +{{ template "common.metadata" . }} + annotations: + kubernetes.io/ingress.class: {{ template "common.ingress.class" . }} + {{- if .Values.configs }}{{- if .Values.configs.data }}{{- if .Values.configs.data.ingressAuthUrl }} + nginx.ingress.kubernetes.io/auth-url: {{ tpl .Values.configs.data.ingressAuthUrl . | quote }} + {{- end }}{{- end }}{{- end }} + {{- if .Values.ingress}}{{- if .Values.ingress.annotations }} + {{ include "common.annote" (dict "annotations" .Values.ingress.annotations "root" . ) | indent 4 }} + {{- end }}{{- end }} +spec: + {{- if .Values.ingress }} + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: / + backend: + serviceName: {{ template "common.fullname" $ }} + servicePort: 80 + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} + {{- end }} +{{- define "common.ingress" -}} +{{- $top := first . -}} +{{- if and $top.Values.ingress }} +{{- template "common.util.merge" (append . "common.ingress.tpl") -}} +{{- end -}} +{{- end -}} + +{{- define "common.ingress.class" -}} + {{- $ingressClass := "nginx" }} + {{- if .Values.ingress }}{{- if .Values.ingress.class }} + {{- $ingressClass = .Values.ingress.class -}} + {{- end -}}{{- end -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_initContainer.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_initContainer.yaml new file mode 100644 index 0000000..3b12f55 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_initContainer.yaml @@ -0,0 +1,74 @@ +{{- define "common.initContainer.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +name: {{ .container.name }} +image: {{ include "common.image" (set . "container" .container ) }} +{{- $pullPolicy := .Values.image.pullPolicy }} +{{- if .container }}{{- if .container.image }}{{- if .container.image.pullPolicy }} + {{- $pullPolicy = .container.image.pullPolicy }} +{{- end }}{{- end }}{{- end }} +imagePullPolicy: {{ $pullPolicy }} +env: + - name: SERVICE_NAME + value: {{ .Chart.Name }} +{{- if or .container.configs .container.secrets }} +{{ include "common.transformers" (set . "container" .container ) | indent 2 }} +{{- end }} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end }}{{- end }} +{{- $hasSecret := false -}} +{{- if $secrets.stringData }} +{{- $hasSecret = true -}} +{{- end }} +{{- if $secrets.data }} +{{- $hasSecret = true -}} +{{- end }} +{{- $hasVolumeMounts := false -}} +{{- if .container }}{{- if .container.volumeMounts }} +{{- $hasVolumeMounts = true -}} +{{- end -}}{{- end -}} +{{- if or $hasSecret $hasVolumeMounts }} +volumeMounts: +{{- end }} +{{- if $hasSecret }} +{{- if contains $name .Release.Name }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +{{- else }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +- name: {{ .Release.Name }}-secrets + mountPath: "/run/secrets/qlik.com/{{ .Release.Name }}" + readOnly: true +{{- end }} +{{- end }} +{{- if $hasVolumeMounts }} +{{- range $key, $val:= .container.volumeMounts }} +{{- if kindIs "map" $val }} +{{- if eq $key "default" }} +{{ include "common.volume.mount" (list (include "common.fullname" $root) $root.Values.deployment.container.volumeMounts.mountPath) }} +{{- else }} +{{ include "common.volume.mount" (list (printf "%s-%s" (include "common.fullname" $root) $key) $val) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- include "common.ca-certificates.volumeMount" . }} +{{- $probes := .Values.probes }} +{{- if .container }}{{- if .container.probes }} + {{- $probes = .container.probes }} +{{- end }}{{- end }} +{{- if .container }}{{- if .container.resources }} +resources: +{{ toYaml .container.resources | indent 2 }} +{{- end -}}{{- end -}} +{{- end -}} +{{- define "common.initContainer" -}} +{{- /* clear new line so indentation works correctly */ -}} +{{- println "" -}} +{{- include "common.util.merge" (append . "common.initContainer.tpl") | indent 8 -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata.yaml new file mode 100644 index 0000000..83c42d5 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata.yaml @@ -0,0 +1,35 @@ +{{- /* +common.metadata creates a standard metadata header. +It creates a 'metadata:' section with name and labels. +*/ -}} +{{ define "common.metadata" -}} +metadata: + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.configs" -}} +metadata: + name: {{ template "common.fullname" . }}-configs + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.secrets" -}} +metadata: + name: {{ template "common.fullname" . }}-secrets + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.workload" -}} +metadata: + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata_annotations.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata_annotations.tpl new file mode 100644 index 0000000..ed28474 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata_annotations.tpl @@ -0,0 +1,23 @@ +{{- /* +common.hook defines a hook. + +This is to be used in a 'metadata.annotations' section. + +This should be called as 'template "common.metadata.hook" "post-install"' + +Any valid hook may be passed in. Separate multiple hooks with a ",". +*/ -}} +{{- define "common.hook" -}} +"helm.sh/hook": {{printf "%s" . | quote}} +{{- end -}} + +{{- define "common.annote" -}} +{{ $root := .root}} +{{- range $k, $v := .annotations }} +{{- if kindIs "string" $v }} +{{ $k | quote }}: {{ tpl $v $root | quote }} +{{- else -}} +{{ $k | quote }}: {{ $v }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata_labels.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata_labels.tpl new file mode 100644 index 0000000..15fe00c --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_metadata_labels.tpl @@ -0,0 +1,28 @@ +{{- /* +common.labelize takes a dict or map and generates labels. + +Values will be quoted. Keys will not. + +Example output: + + first: "Matt" + last: "Butcher" + +*/ -}} +{{- define "common.labelize" -}} +{{- range $k, $v := . }} +{{ $k }}: {{ $v | quote }} +{{- end -}} +{{- end -}} + +{{- /* +common.labels.standard prints the standard Helm labels. + +The standard labels are frequently used in metadata. +*/ -}} +{{- define "common.labels.standard" -}} +app: {{ template "common.name" . }} +chart: {{ template "common.chartref" . }} +heritage: {{ .Release.Service | quote }} +release: {{ .Release.Name | quote }} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_name.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_name.tpl new file mode 100644 index 0000000..1d42fb0 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_name.tpl @@ -0,0 +1,29 @@ +{{- /* +name defines a template for the name of the chart. It should be used for the `app` label. +This is common practice in many Kubernetes manifests, and is not Helm-specific. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.nameOverride: Replaces the computed name with this given name +- .Values.namePrefix: Prefix +- .Values.global.namePrefix: Global prefix +- .Values.nameSuffix: Suffix +- .Values.global.nameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "common.name" . -}}"' +*/ -}} +{{- define "common.name"}} + {{- $global := default (dict) .Values.global -}} + {{- $base := default .Chart.Name .Values.nameOverride -}} + {{- $gpre := default "" $global.namePrefix -}} + {{- $pre := default "" .Values.namePrefix -}} + {{- $suf := default "" .Values.nameSuffix -}} + {{- $gsuf := default "" $global.nameSuffix -}} + {{- $name := print $gpre $pre $base $suf $gsuf -}} + {{- $name | lower | trunc 54 | trimSuffix "-" -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_networkpolicy.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_networkpolicy.yaml new file mode 100644 index 0000000..e0c4922 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_networkpolicy.yaml @@ -0,0 +1,52 @@ +{{- define "common.networkpolicy.tpl" -}} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +{{ template "common.metadata" . }} +spec: + podSelector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} + policyTypes: + - Egress + egress: + - to: +{{- if .Values.configs }}{{- if .Values.configs.data }} +{{- if .Values.configs.data.natsUri }} + - podSelector: + matchLabels: + app: "nats" + release: {{ .Values.natsRelease | default .Release.Name | quote }} + - podSelector: + matchLabels: + app: "nats-streaming" + release: {{ .Values.natsRelease | default .Release.Name | quote }} +{{- end }} +{{- if or .Values.configs.data.tokenAuthUri .Values.configs.data.ingressAuthUrl }} + - podSelector: + matchLabels: + app: "edge-auth" + release: {{ .Values.edgeAuthRelease | default .Release.Name | quote }} +{{- end }} +{{- if .Values.configs.data.keysUri }} + - podSelector: + matchLabels: + app: "keys" + release: {{ .Values.keysRelease | default .Release.Name | quote }} +{{- end }} +{{- if .Values.configs.data.pdsUri }} + - podSelector: + matchLabels: + app: "policy-decisions" + release: {{ .Values.pdsRelease | default .Release.Name | quote }} +{{- end }} +{{- end }}{{- end }} + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP +{{- end }} +{{- define "common.networkpolicy" -}} +{{- template "common.util.merge" (append . "common.networkpolicy.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_persistentvolumeclaim.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_persistentvolumeclaim.yaml new file mode 100644 index 0000000..4c2ed62 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_persistentvolumeclaim.yaml @@ -0,0 +1,47 @@ +{{- define "common.persistentvolumeclaim.tpl" -}} +{{- $persistence := default .Values.persistence .claim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +{{ template "common.metadata" . }} +spec: + accessModes: + - {{ $persistence.accessMode | quote }} + resources: + requests: + storage: {{ $persistence.size | quote }} +{{- if $persistence.matchLabels }} + selector: + matchLabels: +{{- include "common.labelize" $persistence.matchLabels | indent 6 -}} +{{- end -}} +{{- if $persistence.storageClass }} +{{- if (eq "-" $persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ $persistence.storageClass }}" +{{- end }} +{{- else -}} + {{- if .Values.global }} + {{- if .Values.global.persistence }} + {{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) -}} + storageClassName: "" + {{- else -}} + storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- define "common.persistentvolumeclaim" -}} +{{- $top := first . -}} +{{- if $top.Values.persistence -}}{{- if $top.Values.persistence.enabled -}}{{- if $top.Values.persistence.persistentVolumeClaim -}} + {{- if not $top.claim -}} + {{- $top = set $top "claim" $top.Values.persistence.persistentVolumeClaim.default -}} + {{- end -}} + {{- if not $top.claim.existingClaim -}} + {{- template "common.util.merge" (append . "common.persistentvolumeclaim.tpl") -}} + {{- end -}} +{{- end -}}{{- end -}}{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_persistentvolumeclaims.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_persistentvolumeclaims.yaml new file mode 100644 index 0000000..2cb894b --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_persistentvolumeclaims.yaml @@ -0,0 +1,27 @@ +{{- define "common.persistentvolumeclaims" -}} +{{- $root := . -}} +{{- if .Values.persistence -}}{{- if .Values.persistence.enabled -}} + {{- if .Values.persistence.persistentVolumeClaim -}} + {{- range $name, $claim:= .Values.persistence.persistentVolumeClaim }} + {{- if kindIs "map" $claim }} + {{- if eq $name "default" }} + {{- $root = set $root "claim" $claim -}} + {{- include "common.persistentvolumeclaim" (list $root "mychart.persistentvolumeclaim") -}} + {{- else -}} + {{- $values := set $root.Values "fullnameOverride" (printf "%s-%s" (include "common.fullname" $root) $name) -}} + {{- $root = set (set $root "claim" $claim) "Values" $values -}} + {{- include "common.persistentvolumeclaim" (list $root "mychart.persistentvolumeclaim") -}} + {{- end -}} + {{- end -}} + {{- printf "\n" -}}{{- printf "\n" -}} + {{- printf "---" -}} + {{- printf "\n" -}} + {{- $_:= unset $root.Values "fullnameOverride" -}} + {{- end -}} + {{- end -}} +{{- end -}}{{- end -}} +{{- end -}} + +## No override templates are needed for the case of defining multiple PVCs +{{- define "mychart.persistentvolumeclaim" -}} +{{- end }} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_podSecurityPolicy.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_podSecurityPolicy.yaml new file mode 100644 index 0000000..c06f607 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_podSecurityPolicy.yaml @@ -0,0 +1,55 @@ +{{- define "common.podsecuritypolicy.tpl" -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +{{ template "common.metadata" . }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end -}} +{{- define "common.podsecuritypolicy" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}}{{- if ne ($top.Values.podSecurityPolicy | default false) false -}} +{{- template "common.util.merge" (append . "common.podsecuritypolicy.tpl") -}} +{{- end -}}{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_role.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_role.yaml new file mode 100644 index 0000000..cf1d6f6 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_role.yaml @@ -0,0 +1,23 @@ +{{- define "common.role.tpl" -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +{{ template "common.metadata" . }} +rules: +{{- if .Values.podSecurityPolicy | default false }} +- apiGroups: + - policy + resourceNames: + - {{ template "common.fullname" . }} + resources: + - podsecuritypolicies + verbs: + - use +{{- end }} +{{- end -}} +{{- define "common.role" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.role.tpl") -}} +{{- end -}} +{{- end -}} + diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_rolebinding.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_rolebinding.yaml new file mode 100644 index 0000000..021e896 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_rolebinding.yaml @@ -0,0 +1,19 @@ +{{- define "common.rolebinding.tpl" -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +{{ template "common.metadata" . }} +roleRef: + kind: Role + name: {{ template "common.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} +{{- define "common.rolebinding" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.rolebinding.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_secret.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_secret.yaml new file mode 100644 index 0000000..45ec55f --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_secret.yaml @@ -0,0 +1,45 @@ +{{- define "common.secret.tpl" -}} +apiVersion: v1 +kind: Secret +{{ template "common.metadata.secrets" . }} +type: Opaque +data: + {{- $root := . -}} + {{- if .Values.deployment -}} + {{- range $name, $container:= .Values.deployment }} + {{- if kindIs "map" $container }}{{- if $container.secrets }} + {{- range $key, $value := $container.secrets.stringData }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | b64enc }} + {{- else }} + {{ $key }}: {{ $value | b64enc }} + {{- end }} + {{- end }} + {{- range $key, $value := $container.secrets.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }}{{- end }} + {{- end }} + {{- end }} + {{- if .Values.secrets -}} + {{- if .Values.secrets.stringData -}} + {{- range $key, $value := .Values.secrets.stringData }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | b64enc }} + {{- else }} + {{ $key }}: {{ $value | b64enc }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.secrets.data -}} + {{- range $key, $value := .Values.secrets.data }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- define "common.secret" -}} +{{- template "common.util.merge" (append . "common.secret.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_service.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_service.yaml new file mode 100644 index 0000000..fb4a9e8 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_service.yaml @@ -0,0 +1,25 @@ +{{- define "common.service.tpl" -}} +apiVersion: v1 +kind: Service +{{ template "common.metadata" . }} + annotations: + {{- if .Values.service }}{{- if .Values.service.annotations }} + {{ include "common.annote" (dict "annotations" .Values.service.annotations "root" . ) | indent 4 }} + {{- end }}{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: {{ template "common.name" . }} + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} +{{- end -}} +{{- define "common.service" -}} +{{- $top := first . -}} +{{- if and $top.Values.service}} +{{- template "common.util.merge" (append . "common.service.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_serviceaccount.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_serviceaccount.yaml new file mode 100644 index 0000000..534a4bf --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- define "common.serviceaccount.tpl" -}} +apiVersion: v1 +kind: ServiceAccount +{{ template "common.metadata" . }} +{{- end -}} +{{- define "common.serviceaccount" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.serviceaccount.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_statefulset.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_statefulset.yaml new file mode 100644 index 0000000..04155e3 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_statefulset.yaml @@ -0,0 +1,44 @@ +{{- define "common.statefulset.tpl" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +apiVersion: apps/v1 +kind: statefulset +{{ template "common.metadata.workload" . }} +spec: +{{- if .Values.statefulset.replicas}} + replicas: {{ .Values.statefulset.replicas }} +{{- end}} + template: + metadata: + labels: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} + spec: +{{- if ne (.Values.rbacEnabled | default false) false }} + serviceAccountName: {{ template "common.fullname" . }} +{{- end }} +{{- if .Values.image }}{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }}{{- end }} + volumes: + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} +{{- if .Values.persistence }} + - {{ template "common.volume.pvc" (list (include "common.fullname" .) (include "common.fullname" .) .Values.persistence) }} +{{- end }} +{{- if .Values.configs.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} +{{- end }} + containers: + - +{{ include "common.container.tpl" (set . "container" .Values.statefulset.container ) | indent 8 }} + selector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} +{{- end -}} +{{- define "common.statefulset" -}} +{{- $top := first . -}} +{{- if and $top.Values.statefulset }} +{{- template "common.util.merge" (append . "common.statefulset.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_transformers.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_transformers.tpl new file mode 100644 index 0000000..f42e742 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_transformers.tpl @@ -0,0 +1,41 @@ +{{- define "common.transformers" -}} +{{- $fullname := include "common.fullname" . -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $release := .Release.Name -}} +{{- $commonSecretList := list "mongodbUri" "redisUri" "redisPassword" -}} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end -}}{{- end -}} +{{- if $secrets -}} +{{- range $key, $value := $secrets.stringData }} +{{- if has $key $commonSecretList -}}{{- if not (contains $name $release) }} +{{- $fullname = $release -}} +{{- end }}{{- end }} +- {{ template "common.envvar.value" (list (print $key "_FILE" | snakecase | upper) ( print "/run/secrets/qlik.com/" $fullname ( print "/" $key ) )) }} +{{- $fullname = include "common.fullname" $root -}} +{{- end }} +{{- range $key, $value := $secrets.data }} +{{- if has $key $commonSecretList -}}{{- if not (contains $name $release) }} +{{- $fullname = $release -}} +{{- end }}{{- end }} +- {{ template "common.envvar.value" (list (print $key "_FILE" | snakecase | upper) ( print "/run/secrets/qlik.com/" $fullname ( print "/" $key ) )) }} +{{- $fullname = include "common.fullname" $root -}} +{{- end }} +{{- end }} +{{ $configs := .Values.configs}} +{{- if .container }}{{- if .container.configs }} +{{ $configs = .container.configs}} +{{- end -}}{{- end -}} +{{- if $configs -}} +{{- range $key, $value := $configs.data }} +- {{ template "common.envvar.configmap" (list (print $key | snakecase | upper) $fullname $key ) }} +{{- end }} +{{- range $key, $value := $configs }} +{{- if ne $key "data" }} +- {{ template "common.envvar.value" (list (print $key | snakecase | upper) $value ) }} +{{- end }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_util.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_util.tpl new file mode 100644 index 0000000..6abeec0 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_util.tpl @@ -0,0 +1,15 @@ +{{- /* +common.util.merge will merge two YAML templates and output the result. + +This takes an array of three values: +- the top context +- the template name of the overrides (destination) +- the template name of the base (source) + +*/ -}} +{{- define "common.util.merge" -}} +{{- $top := first . -}} +{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}} +{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}} +{{- regexReplaceAll ".*: null|.*: nil" (toYaml (merge $overrides $tpl)) "${1}" -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_volume.tpl b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_volume.tpl new file mode 100644 index 0000000..360e239 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/templates/_volume.tpl @@ -0,0 +1,62 @@ +{{- define "common.volume.configmap" -}} + {{- $name := index . 0 -}} + {{- $configMapName := index . 1 -}} + + name: {{ $name }} + configMap: + name: {{ $configMapName }}-configs +{{- end -}} + +{{- define "common.volume.secret" -}} + {{- $name := index . 0 -}} + {{- $secretName := index . 1 -}} + + name: {{ $name }} + secret: + secretName: {{ $secretName }}-secrets +{{- end -}} + +{{- define "common.volume.pvc" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $claim := index . 2 -}} + + name: {{ $name }} + {{- if $claim }} + persistentVolumeClaim: + claimName: {{ $claim.existingClaim | default $claimName }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end -}} + +{{- define "common.volume.emptydir" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $persistence := index . 2 -}} + + {{- if $persistence.emptyDir }} + name: {{ $name }} + emptyDir: {} + {{- end -}} +{{- end -}} + +{{- define "common.volume.hostpath" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $persistence := index . 2 -}} + + name: {{ $name }} + hostPath: + path: {{ $persistence.path }} + type: {{ $persistence.type }} +{{- end -}} + + +{{- define "common.volume.mount" -}} +{{- $volume := index . 0 -}} +{{- $mountPath := index . 1 -}} +- name: {{ $volume }} + mountPath: {{ default "/tmp" $mountPath.mountPath | quote }} + readOnly: {{ default false $mountPath.readOnly }} +{{- end -}} diff --git a/qliksense/charts/data-connector-nfs/charts/qlikcommon/values.yaml b/qliksense/charts/data-connector-nfs/charts/qlikcommon/values.yaml new file mode 100644 index 0000000..b7cf514 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/charts/qlikcommon/values.yaml @@ -0,0 +1,4 @@ +# Default values for commons. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value diff --git a/qliksense/charts/data-connector-nfs/requirements.yaml b/qliksense/charts/data-connector-nfs/requirements.yaml new file mode 100644 index 0000000..94311d9 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: qlikcommon + version: "1.2.5" + repository: "@qlik" + condition: global.component-common-imports diff --git a/qliksense/charts/data-connector-nfs/templates/manifest.yaml b/qliksense/charts/data-connector-nfs/templates/manifest.yaml new file mode 100644 index 0000000..7e5cc22 --- /dev/null +++ b/qliksense/charts/data-connector-nfs/templates/manifest.yaml @@ -0,0 +1,65 @@ +{{- template "common.configmap" (list . "data-connector-nfs.configmap") -}} +{{- define "data-connector-nfs.configmap" -}} +{{- end }} + +--- +{{ template "common.secret" (list . "data-connector-nfs.secret") -}} +{{- define "data-connector-nfs.secret" -}} +{{- end }} + +--- +{{ template "common.service" (list . "data-connector-nfs.service") -}} +{{- define "data-connector-nfs.service" -}} +spec: + ports: + - name: {{ template "common.name" . }} + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + - name: grpc + port: {{ .Values.service.grpcPort }} + targetPort: {{ .Values.service.grpcPort }} + protocol: TCP +{{- end }} + +--- +{{ template "common.deployment" (list . "data-connector-nfs.deployment") -}} +{{- define "data-connector-nfs.deployment" -}} +spec: + template: + spec: + containers: + - +{{ include "common.container" (list (set . "container" .Values.deployment.container) "data-connector-nfs.deployment.container") | indent 8}} +{{- end }} + +{{- define "data-connector-nfs.deployment.container" -}} +ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.service.grpcPort }} + protocol: TCP +{{- end }} + +--- +{{ template "common.persistentvolumeclaims". -}} + +--- +{{ template "common.networkpolicy" (list . "data-connector-nfs.networkpolicy") -}} +{{- define "data-connector-nfs.networkpolicy" -}} +{{- end }} + +--- +{{ template "common.ingress" (list . "data-connector-nfs.ingress") -}} +{{- define "data-connector-nfs.ingress" -}} +spec: + rules: + - http: + paths: + - backend: + serviceName: {{ template "common.fullname" . }} + servicePort: {{ .Values.service.port }} + path: /customdata/64/data-connector-nfs +{{- end }} diff --git a/qliksense/charts/data-connector-nfs/values.yaml b/qliksense/charts/data-connector-nfs/values.yaml new file mode 100644 index 0000000..8bcd8cb --- /dev/null +++ b/qliksense/charts/data-connector-nfs/values.yaml @@ -0,0 +1,104 @@ +# Default values for data-connector-nfs. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + ## data-connector-nfs image + ## + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + repository: qlik-download/data-connector-nfs + + ## data-connector-nfs image version. + ## + ## + tag: 1.0.1 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + + ## Secrets for pulling images from a private docker registry. + pullSecrets: + - name: artifactory-docker-secret + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 8080 + grpcPort: 50051 + ## Metrics configuration + ## Prometheus configuration + ## The annotations for prometheus scraping are included + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.port }}" + +# Currently templates a "main" container +deployment: + ## Number of replicas. + ## + replicas: 1 + container: + ## deployment resources + resources: + limits: + cpu: null + memory: null + requests: + cpu: null + memory: null + +configs: + ## Hard-coded Defaults (only for dev) + # + ## Number of seconds to wait during pod termination after sending SIGTERM + ## until SIGKILL. + # terminationGracePerionSeconds: "30" + # + ## toggle JWT validation using retrieved keys from the configured + ## JWKS endpoint + # authEnabled: "false" + ## expected `audience` value within the JWT claims + # authJwtAud: "qlik.api.internal" + ## expected `issuer` value within the JWT claims + # authJwtIss: "qlik.api.internal" + + ## Access Control / rules enforcement setup + # accessControlEnabled: "true" + ## when enabled, rules are enforced + # accessControlQueryTimeout: "30" + # accessControlEvaluateTimeout: "30" + # + ### Rollbar configuration + ## Enables a rollbar error tracking + # rollbarToken: "" + # + ## Environment + # env: "localdev" + authEnabled: "true" + data: + ## Log level (debug|info|warn|error) + logLevel: "debug" + keysUri: "http://{{ .Release.Name }}-keys:8080" + pdsUri: "http://{{ .Release.Name }}-policy-decisions:5080" + spacesUri: "http://{{ .Release.Name }}-spaces:6080" + +persistence: + enabled: false + +ingress: + ## class provides an kubernetes.io/ingress.class override of default nginx + class: "nginx" + ## Annotations to be added to the ingress. + ## + annotations: + nginx.ingress.kubernetes.io/auth-response-headers: Authorization + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite (?i)/customdata/64/data-connector-nfs/(.*) /$1 break; diff --git a/qliksense/charts/data-connector-odbc/.helmignore b/qliksense/charts/data-connector-odbc/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/data-connector-odbc/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/data-connector-odbc/Chart.yaml b/qliksense/charts/data-connector-odbc/Chart.yaml new file mode 100644 index 0000000..e020bc1 --- /dev/null +++ b/qliksense/charts/data-connector-odbc/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: Custom connector to access odbc data sources through gRPC +home: https://www.qlik.com +name: data-connector-odbc +sources: +- https://github.com/qlik-trial/data-connector-odbc +version: 1.4.10 diff --git a/qliksense/charts/data-connector-odbc/README.md b/qliksense/charts/data-connector-odbc/README.md new file mode 100644 index 0000000..3490d6f --- /dev/null +++ b/qliksense/charts/data-connector-odbc/README.md @@ -0,0 +1,55 @@ +# data-connector-odbc + +[data-connector-odbc](https://github.com/qlik-trial/qvodbcconnector) is the service that provides qlik odbc resources for custom connectors. + +## Introduction + +This chart bootstraps a data-connector-odbc service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release qlik/data-connector-odbc +``` + +The command deploys data-connector-odbc on the Kubernetes cluster in the default configuration. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | ---------------------------------------------------------- | +| `env.loglevel` | Log level (ERROR, INFO, DEBUG) | `INFO` | +| `env.qsEnvironment` | Environment configuration (QSEOK, QCS). Basing on this value appropriate config and metadata files are applied. | `QCS` | +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil`| +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io`| +| `image.repository` | image name with no registry | `data-connector-odbc`| +| `image.tag` | image version | `6.54.0` | +| `image.pullPolicy` | image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `metrics.prometheus.enabled` | whether prometheus metrics are enabled | true | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | Data Connector ODBC external port (rest) | `3005` | +| `service.grpc` | Data Connector ODBC external port (grpc) | `50060` | +| `shutdownDrain.enabled` | Toggle Graceful termination logic allowing active connections to finish | `false` | +| `terminationGracePeriodSeconds` | maximum time k8s will wait before sending SIGKILL | `30` | + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml data-connector-odbc +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/data-connector-odbc/templates/_helpers.tpl b/qliksense/charts/data-connector-odbc/templates/_helpers.tpl new file mode 100644 index 0000000..cd69f2c --- /dev/null +++ b/qliksense/charts/data-connector-odbc/templates/_helpers.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "data-connector-odbc.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "data-connector-odbc.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "data-connector-odbc.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "data-connector-odbc-networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* Return data-connector-odbc image name */}} +{{- define "data-connector-odbc.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-odbc/templates/deployment-cmd.yaml b/qliksense/charts/data-connector-odbc/templates/deployment-cmd.yaml new file mode 100644 index 0000000..459175e --- /dev/null +++ b/qliksense/charts/data-connector-odbc/templates/deployment-cmd.yaml @@ -0,0 +1,79 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-odbc.fullname" . }}-cmd + labels: + app: {{ template "data-connector-odbc.name" . }} + chart: {{ template "data-connector-odbc.chart" . }} + action: "command" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.command.replicaCount }} + selector: + matchLabels: + app: {{ template "data-connector-odbc.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "data-connector-odbc.name" . }} + release: {{ .Release.Name }} + action: "command" + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-odbc.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: STANDALONE + value: "false" + - name: DATA_HOST + value: 0.0.0.0 + - name: DATA_PORT_RANGE_START + value: "{{ .Values.service.grpc }}" + - name: DATA_PORT_RANGE_END + value: "{{ .Values.service.grpc }}" + - name: CONFIG + value: /opt/runner/Odbc_dotnetcore.json + - name: LOG_LEVEL + value: {{ .Values.env.loglevel | quote }} + - name: SERVICE_MODE + value: "true" + - name: QS_ENVIRONMENT + value: "{{ .Values.env.qsEnvironment }}" + ports: + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + securityContext: + supplementalGroups: + - 13000 + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} + selector: + matchLabels: + app: {{ template "data-connector-odbc.name" . }} + release: {{ .Release.Name }} + action: "command" diff --git a/qliksense/charts/data-connector-odbc/templates/deployment-rld.yaml b/qliksense/charts/data-connector-odbc/templates/deployment-rld.yaml new file mode 100644 index 0000000..5b4b3f2 --- /dev/null +++ b/qliksense/charts/data-connector-odbc/templates/deployment-rld.yaml @@ -0,0 +1,85 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-odbc.fullname" . }}-rld + labels: + app: {{ template "data-connector-odbc.name" . }} + chart: {{ template "data-connector-odbc.chart" . }} + action: "reload" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.reload.replicaCount }} + selector: + matchLabels: + app: {{ template "data-connector-odbc.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "data-connector-odbc.name" . }} + release: {{ .Release.Name }} + action: "reload" + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-odbc.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: STANDALONE + value: "false" + - name: DATA_HOST + value: 0.0.0.0 + - name: DATA_PORT_RANGE_START + value: "{{ .Values.service.grpc }}" + - name: DATA_PORT_RANGE_END + value: "{{ .Values.service.grpc }}" + - name: CONFIG + value: /opt/runner/Odbc_dotnetcore.json + - name: LOG_LEVEL + value: {{ .Values.env.loglevel | quote }} + - name: SERVICE_MODE + value: "true" + - name: QS_ENVIRONMENT + value: "{{ .Values.env.qsEnvironment }}" + - name: ENABLE_SHUTDOWN_DRAIN + value: "{{ .Values.shutdownDrain.enabled }}" + - name: SHUTDOWN_TIMEOUT + value: "{{ .Values.terminationGracePeriodSeconds }}" + volumeMounts: + ports: + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + securityContext: + supplementalGroups: + - 13000 + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} + selector: + matchLabels: + app: {{ template "data-connector-odbc.name" . }} + release: {{ .Release.Name }} + action: "reload" diff --git a/qliksense/charts/data-connector-odbc/templates/hpa-rld.yaml b/qliksense/charts/data-connector-odbc/templates/hpa-rld.yaml new file mode 100644 index 0000000..b568399 --- /dev/null +++ b/qliksense/charts/data-connector-odbc/templates/hpa-rld.yaml @@ -0,0 +1,32 @@ +{{ if .Values.reload.useHpaReload }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "data-connector-odbc.fullname" . }}{{ "-rld" }} +spec: + scaleTargetRef: + apiVersion: apps/v1beta2 + kind: Deployment + name: {{ template "data-connector-odbc.fullname" . }}{{ "-rld" }} + minReplicas: {{ .Values.hpaReload.minReplicas }} + maxReplicas: {{ .Values.hpaReload.maxReplicas }} + metrics: +{{ if .Values.hpaReload.targetAverageUtilizationCpuEnabled }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpaReload.targetAverageUtilizationCpu }} +{{- end }} +{{ if .Values.hpaReload.targetAverageUtilizationMemoryEnabled }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpaReload.targetAverageUtilizationMemory }} +{{- end }} +{{ if .Values.hpaReload.targetAverageValueEnabled }} + - type: Pods + pods: + metricName: {{ .Values.hpaReload.metricName }} + targetAverageValue: {{ .Values.hpaReload.targetAverageValue }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-connector-odbc/templates/network.yaml b/qliksense/charts/data-connector-odbc/templates/network.yaml new file mode 100644 index 0000000..60a465a --- /dev/null +++ b/qliksense/charts/data-connector-odbc/templates/network.yaml @@ -0,0 +1,35 @@ + +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "data-connector-odbc-networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "data-connector-odbc.name" . }}-network-policy + namespace: {{ .Values.networkPolicy.namespace }} +spec: + podSelector: + matchLabels: + app: {{ template "data-connector-odbc.name" . }} + policyTypes: + - Egress + - Ingress + ingress: + - {} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - ipBlock: + cidr: {{ .Values.networkPolicy.externalcidr }} + except: + {{- if .Values.networkPolicy.blockedcidrs }} + {{- range .Values.networkPolicy.blockedcidrs }} + - {{ . }} + {{- end }} + {{- else }} + - {{ .Values.networkPolicy.clustercidr }} + - {{ .Values.networkPolicy.vpccidr }} + {{- end }} +{{- end -}} diff --git a/qliksense/charts/data-connector-odbc/templates/service-cmd.yaml b/qliksense/charts/data-connector-odbc/templates/service-cmd.yaml new file mode 100644 index 0000000..148561e --- /dev/null +++ b/qliksense/charts/data-connector-odbc/templates/service-cmd.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-odbc.fullname" . }}{{ "-cmd" }} + labels: + app: {{ template "data-connector-odbc.name" . }} + chart: {{ template "data-connector-odbc.chart" . }} + action: "command" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP + name: grpc + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: {{ template "data-connector-odbc.name" . }} + release: {{ .Release.Name }} + action: "command" diff --git a/qliksense/charts/data-connector-odbc/templates/service-rld.yaml b/qliksense/charts/data-connector-odbc/templates/service-rld.yaml new file mode 100644 index 0000000..a047609 --- /dev/null +++ b/qliksense/charts/data-connector-odbc/templates/service-rld.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-odbc.fullname" . }}{{ "-rld" }} + labels: + app: {{ template "data-connector-odbc.name" . }} + chart: {{ template "data-connector-odbc.chart" . }} + action: "reload" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP + name: grpc + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: {{ template "data-connector-odbc.name" . }} + release: {{ .Release.Name }} + action: "reload" diff --git a/qliksense/charts/data-connector-odbc/values.yaml b/qliksense/charts/data-connector-odbc/values.yaml new file mode 100644 index 0000000..8864407 --- /dev/null +++ b/qliksense/charts/data-connector-odbc/values.yaml @@ -0,0 +1,94 @@ +# Default values for Data Connector ODBC Helm Chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Override the name of the Chart. +## +# nameOverride: + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + + ## Data Connector ODBC image. + ## + repository: qlik-download/data-connector-odbc + + ## Data Connector ODBC image version. + ## ref: https://hub.docker.com/r/qlik/data-connector-odbc/tags/ + ## + tag: 6.54.0 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + ## pullPolicy: Always + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Other environment variables +env: + ## log level + loglevel: "INFO" + ## defines the environment (QSEOK / QCS) + qsEnvironment: "QSEOK" + +## The network policy associated with this service. +networkPolicy: + enabled: false + +## Settings for pods serving command requests +command: + replicaCount: 1 + +## Settings for pods serving GetData requests +reload: + ## scale reload connectors elastically or not + useHpaReload: false + ## replicaCount to be used if useHpaReload is false + replicaCount: 1 + +## Graceful termination logic allowing active connections to finish +shutdownDrain: + enabled: false + +## Number of seconds to wait during pod termination until SIGKILL. +## +terminationGracePeriodSeconds: 30 +## Settings for elastically scaling pods serving GetData requests +## Requires reload.useHpa true +hpaReload: + minReplicas: 2 + maxReplicas: 5 + targetAverageUtilizationCpuEnabled: false + # targetAverageUtilizationCpu: 80 + targetAverageUtilizationMemoryEnabled: false + # targetAverageUtilizationMemory: 80 + targetAverageValueEnabled: false + # Name of the custom metric to use for HPA + # metricName: "connector_active_getdata_requests" + # Average target value of the metric to trigger auto scaling + # targetAverageValue: 3 + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + grpc: 50060 + port: 3005 + +## Metrics configuration +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + +## deployment resources +resources: {} diff --git a/qliksense/charts/data-connector-qwc/.helmignore b/qliksense/charts/data-connector-qwc/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/data-connector-qwc/Chart.yaml b/qliksense/charts/data-connector-qwc/Chart.yaml new file mode 100644 index 0000000..e12ae1a --- /dev/null +++ b/qliksense/charts/data-connector-qwc/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: Custom connector to access web data sources through gRPC +home: https://www.qlik.com +name: data-connector-qwc +sources: +- https://github.com/qlik-trial/data-connector-qwc +version: 3.0.3 diff --git a/qliksense/charts/data-connector-qwc/README.md b/qliksense/charts/data-connector-qwc/README.md new file mode 100644 index 0000000..d61b475 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/README.md @@ -0,0 +1,66 @@ +# data-connector-qwc + +[data-connector-qwc](https://github.com/qlik-trial/qwc-integrated) is the service that provides connectors to multiple online data source (Facebook, Twitter etc.). + +## Introduction + +This chart bootstraps a data-connector-qwc service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release qlik/data-connector-qwc +``` + +The command deploys data-connector-qwc on the Kubernetes cluster in the default configuration. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | ---------------------------------------------------------- | +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil`| +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io`| +| `image.repository` | image name with no registry | `data-connector-qwc`| +| `image.webrepository` | web server image name with no registry | `data-connector-qwc-web`| +| `image.tag` | image version | `0.80.0` | +| `log_level` | QWC logging level. This can be TRACE, INFO, WARN, ERROR or OFF | `INFO` | +| `allow_reference_connector` | Whether to allow the Reference Connector for debugging and testing | false | +| `image.pullPolicy` | image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `metrics.prometheus.enabled` | whether prometheus metrics are enabled | true | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | Data Connector QWC external port (qwc) | `3005` | +| `service.grpc` | Data Connector QWC external port (grpc) | `50060` | +| `redis.enabled` | Whether this chart should install its own 'local' redis. *Note* if this is set to false the redis details are expected to be found in a value named {{ redis_secret_name or, if this is not present, {{ .Release.Name }}-dcaas-redis-secret, with keys redis-addr and redis-password. If set to true, a default redis instance is installed without password authentication configured (not recommended). | true | +| `secrets.enc_key_for_temp_state` | The key used by QWC to encrypt temporary state. | null - NOT RECOMMENDED for production use (falls back on internal key). | +| `secrets.enc_key_for_params` | The key used by QWC to encrypt params in the connection. | null - NOT RECOMMENDED for production use (falls back on internal key). | +| `ingress.authURL` | The URL to use for nginx's `auth-url` configuration to authenticate `/api` requests | `http://{.Release.Name}-edge-auth.{.Release.Namespace}.svc.cluster.local:8080/v1/auth` | +| `ingress.class` | Ingress `kubernetes.io/ingress.class` to use | `nginx` | +| `ingress.annotations` | Ingress additional annotations | `[]` +| `ingress.tls` | Ingress TLS configuration | `nil` | +| `ingress.host` | host for ingress | `nil` | +| `web.replicaCount` | Number of pods serving static web content | 2 | +| `shutdownDrain.enabled` | Toggle graceful termination logic allowing active connections to finish | `false` | +| `terminationGracePeriodSeconds` | maximum time Kubernetes will wait before sending SIGKILL | `30` | + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml data-connector-qwc +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + diff --git a/qliksense/charts/data-connector-qwc/charts/redis/.helmignore b/qliksense/charts/data-connector-qwc/charts/redis/.helmignore new file mode 100644 index 0000000..b2767ae --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/.helmignore @@ -0,0 +1,3 @@ +.git +# OWNERS file for Kubernetes +OWNERS diff --git a/qliksense/charts/data-connector-qwc/charts/redis/Chart.yaml b/qliksense/charts/data-connector-qwc/charts/redis/Chart.yaml new file mode 100644 index 0000000..0b1ce8a --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 5.0.7 +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: http://redis.io/ +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +version: 10.5.6 diff --git a/qliksense/charts/data-connector-qwc/charts/redis/README.md b/qliksense/charts/data-connector-qwc/charts/redis/README.md new file mode 100644 index 0000000..72eb836 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/README.md @@ -0,0 +1,497 @@ + +# Redis + +[Redis](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +## TL;DR; + +```bash +# Testing configuration +$ helm install my-release stable/redis +``` + +```bash +# Production configuration +$ helm install my-release stable/redis --values values-production.yaml +``` + +## Introduction + +This chart bootstraps a [Redis](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release stable/redis +``` + +The command deploys Redis on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the Redis chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | Redis password (overrides `password`) | `nil` | +| `image.registry` | Redis Image registry | `docker.io` | +| `image.repository` | Redis Image name | `bitnami/redis` | +| `image.tag` | Redis Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `1` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | Redis password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common Redis node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.runAsUser` | User ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.sysctls` | Set namespaced sysctls for the container (both redis master and slave pods) | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Redis exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | {} | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.podLabels` | Additional labels for Redis master pod | {} | +| `master.podAnnotations` | Additional annotations for Redis master pod | {} | +| `redisPort` | Redis port (in both master and slaves) | `6379` | +| `master.command` | Redis master entrypoint string. The command `redis-server` is executed if this is not provided. | `/run.sh` | +| `master.configmap` | Additional Redis configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of Redis commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | Redis master additional command line flags | [] | +| `master.nodeSelector` | Redis master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for Redis master pod assignment | [] | +| `master.affinity` | Affinity settings for Redis master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.priorityClassName` | Redis Master pod priorityClassName | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.command` | Redis slave entrypoint array. The docker image's ENTRYPOINT is used if this is not provided. | `/run.sh` | +| `slave.configmap` | Additional Redis configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of Redis commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | Redis slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `30` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `10` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.priorityClassName` | Redis Slave pod priorityClassName | {} | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | Redis Sentinel port | `26379` | +| `sentinel.configmap` | Additional Redis configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for Redis read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | Redis sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | Redis Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | Redis Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + stable/redis +``` + +The above command sets the Redis server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml stable/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis pod as it attempts to write to the `/bitnami` directory. Consider installing Redis with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of slaves: +```diff +- cluster.slaveCount: 2 ++ cluster.slaveCount: 3 +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a Redis master StatefulSet (only one master node allowed) and a Redis slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - Redis Master service: Points to the master, where read-write operations can be performed + - Redis Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis master StatefulSet (only one master allowed) and a Redis slave StatefulSet. In this case, the pods will contain en extra container with Redis Sentinel. This container will form a cluster of Redis Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - Redis service: Exposes port 6379 for Redis read-only operations and port 26379 for accesing Redis Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for Redis you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Host Kernel Settings +Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME stable/redis +``` + +## NetworkPolicy + +To enable network policy for Redis, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to Redis. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: +* Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +* Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + + - Recommended: Create a clone of the Redis Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release stable/redis --set persistence.existingClaim= + ``` + + - Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install stable/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + + - `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) + - `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis StatefulSet before upgrading: +```bash +$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` +And edit the Redis slave (and metrics if enabled) deployment: +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Notable changes + +### 9.0.0 +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### 7.0.0 +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis Sentinel containers inside of the Redis Pods (feature disabled by default). In case the master crashes, a new Redis node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/qliksense/charts/data-connector-qwc/charts/redis/ci/default-values.yaml b/qliksense/charts/data-connector-qwc/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/qliksense/charts/data-connector-qwc/charts/redis/ci/dev-values.yaml b/qliksense/charts/data-connector-qwc/charts/redis/ci/dev-values.yaml new file mode 100644 index 0000000..be01913 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/ci/dev-values.yaml @@ -0,0 +1,9 @@ +master: + persistence: + enabled: false + +cluster: + enabled: true + slaveCount: 1 + +usePassword: false diff --git a/qliksense/charts/data-connector-qwc/charts/redis/ci/extra-flags-values.yaml b/qliksense/charts/data-connector-qwc/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/qliksense/charts/data-connector-qwc/charts/redis/ci/insecure-sentinel-values.yaml b/qliksense/charts/data-connector-qwc/charts/redis/ci/insecure-sentinel-values.yaml new file mode 100644 index 0000000..2e9174f --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/ci/insecure-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: false + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/data-connector-qwc/charts/redis/ci/production-sentinel-values.yaml b/qliksense/charts/data-connector-qwc/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..36a00e3 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/data-connector-qwc/charts/redis/ci/production-values.yaml b/qliksense/charts/data-connector-qwc/charts/redis/ci/production-values.yaml new file mode 100644 index 0000000..6fa9c88 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/ci/production-values.yaml @@ -0,0 +1,525 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/data-connector-qwc/charts/redis/ci/redis-lib-values.yaml b/qliksense/charts/data-connector-qwc/charts/redis/ci/redis-lib-values.yaml new file mode 100644 index 0000000..e03382b --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/ci/redis-lib-values.yaml @@ -0,0 +1,13 @@ +## Redis library image +## ref: https://hub.docker.com/r/library/redis/ +## +image: + registry: docker.io + repository: redis + tag: '5.0.5' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/data-connector-qwc/charts/redis/ci/redisgraph-module-values.yaml b/qliksense/charts/data-connector-qwc/charts/redis/ci/redisgraph-module-values.yaml new file mode 100644 index 0000000..8096020 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/ci/redisgraph-module-values.yaml @@ -0,0 +1,10 @@ +image: + registry: docker.io + repository: redislabs/redisgraph + tag: '1.0.0' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/NOTES.txt b/qliksense/charts/data-connector-qwc/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..5b1089e --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/NOTES.txt @@ -0,0 +1,104 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis server: + +1. Run a Redis pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash + +2. Connect using the Redis CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/_helpers.tpl b/qliksense/charts/data-connector-qwc/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..3397a7b --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/_helpers.tpl @@ -0,0 +1,355 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/configmap.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..d17ec26 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{ tpl .Values.configmap . | indent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{ tpl .Values.master.configmap . | indent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{ tpl .Values.slave.configmap . | indent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{ tpl .Values.sentinel.configmap . | indent 4 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/headless-svc.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..909cbce --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis +{{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel +{{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/health-configmap.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..35c61b5 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,134 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_SENTINEL_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/metrics-prometheus.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..3f33454 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,30 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/metrics-svc.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..74f6fa8 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,30 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.service.labels -}} + {{ toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" -}} {{ if .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/networkpolicy.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..da05552 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,73 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/prometheusrule.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..500c3b3 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/psp.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..28ae22a --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/psp.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-master-statefulset.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..b61c539 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,419 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master +{{- if .Values.master.podLabels }} +{{ toYaml .Values.master.podLabels | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} +{{ toYaml .Values.master.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" + {{- end }} + {{- with .Values.master.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: "{{ .Values.master.schedulerName }}" + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: "{{ template "redis.image" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.command }} + {{ .Values.master.command }} ${ARGS[@]} + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.master.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + echo "Getting information about current running sentinels" + # Get information from existing sentinels + existing_sentinels=$(timeout -s 9 {{ .Values.sentinel.initialCheckTimeout }} redis-cli --raw -h {{ template "redis.fullname" . }} -a "$REDIS_PASSWORD" -p {{ .Values.sentinel.service.sentinelPort }} SENTINEL sentinels {{ .Values.sentinel.masterSet }}) + echo "$existing_sentinels" | awk -f /health/parse_sentinels.awk | tee -a /opt/bitnami/redis-sentinel/etc/sentinel.conf + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc/ + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel-tmp-conf + emptyDir: {} + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.master.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.master.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-master-svc.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..3a98e66 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,39 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{ toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{ toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-role.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..71f75ef --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{ toYaml .Values.rbac.role.rules | indent 2 }} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-rolebinding.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..aceb258 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-serviceaccount.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..f027176 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-slave-statefulset.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..d5a8db5 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,437 @@ +{{- if .Values.cluster.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: +{{ toYaml .Values.slave.updateStrategy | indent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} +{{ toYaml .Values.slave.podLabels | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} +{{ toYaml .Values.slave.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: "{{ .Values.slave.schedulerName }}" + {{- end }} + {{- with .Values.slave.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.command }} + {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.slave.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.slave.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.slave.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-slave-svc.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..052ecea --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{ toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: +{{ toYaml .Values.slave.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-with-sentinel-svc.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..5017c22 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,40 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{ toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: +{{ toYaml .Values.sentinel.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/templates/secret.yaml b/qliksense/charts/data-connector-qwc/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..ead9c61 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/templates/secret.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/values-production.yaml b/qliksense/charts/data-connector-qwc/charts/redis/values-production.yaml new file mode 100644 index 0000000..cae2af1 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/values-production.yaml @@ -0,0 +1,630 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/data-connector-qwc/charts/redis/values.schema.json b/qliksense/charts/data-connector-qwc/charts/redis/values.schema.json new file mode 100644 index 0000000..2138e45 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "condition": false, + "value": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "master.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "slave.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/qliksense/charts/data-connector-qwc/charts/redis/values.yaml b/qliksense/charts/data-connector-qwc/charts/redis/values.yaml new file mode 100644 index 0000000..2649466 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/charts/redis/values.yaml @@ -0,0 +1,631 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/data-connector-qwc/requirements.yaml b/qliksense/charts/data-connector-qwc/requirements.yaml new file mode 100644 index 0000000..21a2d9c --- /dev/null +++ b/qliksense/charts/data-connector-qwc/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: redis + version: 10.5.6 + repository: "@stable" + condition: redis.enabled diff --git a/qliksense/charts/data-connector-qwc/templates/_helpers.tpl b/qliksense/charts/data-connector-qwc/templates/_helpers.tpl new file mode 100644 index 0000000..b48bb1f --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/_helpers.tpl @@ -0,0 +1,78 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "data-connector-qwc.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "data-connector-qwc.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "data-connector-qwc.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* Return data-connector-qwc image name */}} +{{- define "data-connector-qwc.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} + +{{/* Return data-connector-qwc-web image name */}} +{{- define "data-connector-qwc-web.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $webrepository := required "A valid image.webrepository entry required!" .Values.image.webrepository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $webrepository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $webrepository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $webrepository $tag -}} + {{- end -}} +{{- end -}} + +{{- define "ingressClass" -}} + {{- $ingressClass := .Values.ingress.class -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-qwc/templates/deployment-cmd.yaml b/qliksense/charts/data-connector-qwc/templates/deployment-cmd.yaml new file mode 100644 index 0000000..61d6768 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/deployment-cmd.yaml @@ -0,0 +1,118 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-cmd" }} + labels: + app: {{ template "data-connector-qwc.name" . }} + chart: {{ template "data-connector-qwc.chart" . }} + action: "command" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.command.replicaCount }} + selector: + matchLabels: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + action: "command" + spec: + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.secrets.use_connector_settings }} + - name: "qwc-secrets-volume" + secret: + secretName: "qwc-secrets" +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-qwc.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.secrets.use_connector_settings }} + - name: qwc-secrets-volume + mountPath: "/etc/qwc-secrets" + readOnly: true +{{- end }} + env: + - name: DATA_HOST + value: 0.0.0.0 + - name: DATA_PORT_RANGE_START + value: "{{ .Values.service.grpc }}" + - name: DATA_PORT_RANGE_END + value: "{{ .Values.service.grpc }}" +{{- if .Values.redis.enabled }} + - name: QWC_REDIS_CONFIG_STRING + value: "{{ .Release.Name }}-redis-master" + - name: QWC_REDIS_PASSWORD + value: "" +{{- else}} + - name: QWC_REDIS_CONFIG_STRING + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-dcaas-redis-secret" .Release.Name ) .Values.redis_secret_name }} + key: redis-addr + - name: QWC_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-dcaas-redis-secret" .Release.Name ) .Values.redis_secret_name }} + key: redis-password +{{- end}} +{{- if .Values.secrets.enc_key_for_connector_settings }} + - name: QWC_ENC_KEY_FOR_CONNECTOR_SETTINGS + valueFrom: + secretKeyRef: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-encryption-keys" }} + key: enc_key_for_connector_settings +{{- end}} +{{- if .Values.secrets.enc_key_for_temp_state }} + - name: QWC_ENC_KEY_FOR_TEMP_STATE + valueFrom: + secretKeyRef: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-encryption-keys" }} + key: enc_key_for_temp_state +{{- end}} +{{- if .Values.secrets.enc_key_for_params }} + - name: QWC_ENC_KEY_FOR_PARAMS + valueFrom: + secretKeyRef: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-encryption-keys" }} + key: enc_key_for_params +{{- end}} + - name: QWC_LOG_LEVEL + value: "{{ .Values.log_level }}" + - name: QWC_ALLOW_REFERENCE_CONNECTOR + value: "{{ .Values.allow_reference_connector }}" + ports: + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} + {{- if .Values.resourcesCmdPods }} + resources: +{{ toYaml .Values.resourcesCmdPods | indent 12 }} + {{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + selector: + matchLabels: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + action: "command" diff --git a/qliksense/charts/data-connector-qwc/templates/deployment-rld.yaml b/qliksense/charts/data-connector-qwc/templates/deployment-rld.yaml new file mode 100644 index 0000000..4ec05c7 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/deployment-rld.yaml @@ -0,0 +1,133 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-rld" }} + labels: + app: {{ template "data-connector-qwc.name" . }} + chart: {{ template "data-connector-qwc.chart" . }} + action: "reload" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.reload.replicaCount }} + selector: + matchLabels: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + action: "reload" + spec: + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.secrets.use_connector_settings }} + - name: "qwc-secrets-volume" + secret: + secretName: "qwc-secrets" +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-qwc.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.secrets.use_connector_settings }} + - name: qwc-secrets-volume + mountPath: "/etc/qwc-secrets" + readOnly: true +{{- end }} + env: + - name: DATA_HOST + value: 0.0.0.0 + - name: DATA_PORT_RANGE_START + value: "{{ .Values.service.grpc }}" + - name: DATA_PORT_RANGE_END + value: "{{ .Values.service.grpc }}" + - name: MAX_ACTIVE_RELOADS + value: "{{ .Values.reload.maxActiveReloads }}" + - name: FILE_TRANSFER_ABOVE_MAX_ALLOWANCE + value: "{{ .Values.reload.fileTransferAboveMaxAllowance }}" +{{- if .Values.redis.enabled }} + - name: QWC_REDIS_CONFIG_STRING + value: "{{ .Release.Name }}-redis-master" + - name: QWC_REDIS_PASSWORD + value: "" +{{- else}} + - name: QWC_REDIS_CONFIG_STRING + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-dcaas-redis-secret" .Release.Name ) .Values.redis_secret_name }} + key: redis-addr + - name: QWC_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-dcaas-redis-secret" .Release.Name ) .Values.redis_secret_name }} + key: redis-password +{{- end}} +{{- if .Values.secrets.enc_key_for_connector_settings }} + - name: QWC_ENC_KEY_FOR_CONNECTOR_SETTINGS + valueFrom: + secretKeyRef: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-encryption-keys" }} + key: enc_key_for_connector_settings +{{- end}} +{{- if .Values.secrets.enc_key_for_temp_state }} + - name: QWC_ENC_KEY_FOR_TEMP_STATE + valueFrom: + secretKeyRef: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-encryption-keys" }} + key: enc_key_for_temp_state +{{- end}} +{{- if .Values.secrets.enc_key_for_params }} + - name: QWC_ENC_KEY_FOR_PARAMS + valueFrom: + secretKeyRef: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-encryption-keys" }} + key: enc_key_for_params +{{- end}} + - name: QWC_LOG_LEVEL + value: "{{ .Values.log_level }}" + - name: QWC_ALLOW_REFERENCE_CONNECTOR + value: "{{ .Values.allow_reference_connector }}" + - name: ENABLE_SHUTDOWN_DRAIN + value: "{{ .Values.shutdownDrain.enabled }}" + - name: SHUTDOWN_TIMEOUT + value: "{{ .Values.terminationGracePeriodSeconds }}" + ports: + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} + readinessProbe: + httpGet: + path: /readiness + port: {{ .Values.service.port }} + initialDelaySeconds: 2 + periodSeconds: 10 +{{- if .Values.resourcesRldPods }} + resources: +{{ toYaml .Values.resourcesRldPods | indent 12 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + selector: + matchLabels: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + action: "reload" diff --git a/qliksense/charts/data-connector-qwc/templates/deployment-web.yaml b/qliksense/charts/data-connector-qwc/templates/deployment-web.yaml new file mode 100644 index 0000000..1c6c5aa --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/deployment-web.yaml @@ -0,0 +1,48 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-web" }} + labels: + app: {{ template "data-connector-qwc.name" . }} + chart: {{ template "data-connector-qwc.chart" . }} + action: "web" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.web.replicaCount }} + selector: + matchLabels: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + action: "web" + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-qwc-web.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 6384 + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 6384 + {{- if .Values.resourcesWebPods }} + resources: +{{ toYaml .Values.resourcesWebPods | indent 12 }} + {{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + selector: + matchLabels: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + action: "web" diff --git a/qliksense/charts/data-connector-qwc/templates/hpa-rld.yaml b/qliksense/charts/data-connector-qwc/templates/hpa-rld.yaml new file mode 100644 index 0000000..8e3d956 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/hpa-rld.yaml @@ -0,0 +1,32 @@ +{{ if .Values.reload.useHpaReload }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-rld" }} +spec: + scaleTargetRef: + apiVersion: apps/v1beta2 + kind: Deployment + name: {{ template "data-connector-qwc.fullname" . }}{{ "-rld" }} + minReplicas: {{ .Values.hpaReload.minReplicas }} + maxReplicas: {{ .Values.hpaReload.maxReplicas }} + metrics: +{{ if .Values.hpaReload.targetAverageUtilizationCpuEnabled }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpaReload.targetAverageUtilizationCpu }} +{{- end }} +{{ if .Values.hpaReload.targetAverageUtilizationMemoryEnabled }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpaReload.targetAverageUtilizationMemory }} +{{- end }} +{{ if .Values.hpaReload.targetAverageValueEnabled }} + - type: Pods + pods: + metricName: {{ .Values.hpaReload.metricName }} + targetAverageValue: {{ .Values.hpaReload.targetAverageValue }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-connector-qwc/templates/ingress.yaml b/qliksense/charts/data-connector-qwc/templates/ingress.yaml new file mode 100644 index 0000000..81c4cec --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/ingress.yaml @@ -0,0 +1,36 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "data-connector-qwc.fullname" . }}-web + labels: + app: {{ template "data-connector-qwc.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + kubernetes.io/ingress.class: {{ template "ingressClass" . }} + nginx.ingress.kubernetes.io/auth-url: {{ default (printf "http://%s-edge-auth.%s.svc.cluster.local:8080/v1/auth" .Release.Name .Release.Namespace ) .Values.ingress.authURL | quote }} + nginx.ingress.kubernetes.io/auth-signin: https://$host/login?returnto=$escaped_request_uri + nginx.ingress.kubernetes.io/rewrite-target: "/$1/$2" + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - http: + paths: + - path: /customdata/64/(QvWebConnectorPackage)/(.+) + backend: + serviceName: {{ template "data-connector-qwc.fullname" . }}-web + servicePort: 6384 + - path: /customdata/64/(QvWebStorageProviderConnectorPackage)/(.+) + backend: + serviceName: {{ template "data-connector-qwc.fullname" . }}-web + servicePort: 6384 + {{- if .Values.ingress.host }} + host: {{ .Values.ingress.host }} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} diff --git a/qliksense/charts/data-connector-qwc/templates/network.yaml b/qliksense/charts/data-connector-qwc/templates/network.yaml new file mode 100644 index 0000000..069605a --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/network.yaml @@ -0,0 +1,78 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "data-connector-qwc.name" . }}-network-policy + namespace: {{ .Values.networkPolicy.namespace }} +spec: + podSelector: + matchLabels: + app: {{ template "data-connector-qwc.name" . }} + policyTypes: + - Ingress + - Egress + ingress: + # QCWPI-1120 TODO limit ingress to known pods that will connect. + - {} + egress: + # Allow DNS to other pods (need to resolve hostname for Redis pod). + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + to: + - namespaceSelector: {} + # Allow access to Redis pod. + - ports: + - port: 6379 + protocol: TCP + to: + - podSelector: + matchLabels: + # These two are enough to identify Redis pod uniquely. + app: dcaas-redis + role: master + # Allow specific ports externally. + - ports: + # Allow DNS. + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Certificate authorities. + - port: 80 + protocol: TCP + # Note we deny access via port 80 (HTTP) but allow 443 (HTTPS). + - port: 443 + protocol: TCP + # IMAP encrypted + - port: 993 + protocol: TCP + # POP3 encrypted + #- port: 995 + # protocol: TCP + # SMTP common ports + - port: 465 + protocol: TCP + - port: 587 + protocol: TCP + - port: 2525 + protocol: TCP + - port: 2526 + protocol: TCP + to: + - ipBlock: + cidr: {{ .Values.networkPolicy.allnetworkscidr }} + # Deny access within cluster. + except: + {{- if .Values.networkPolicy.blockedcidrs }} + {{- range .Values.networkPolicy.blockedcidrs }} + - {{ . }} + {{- end }} + {{- else }} + - {{ .Values.networkPolicy.clustercidr }} + - {{ .Values.networkPolicy.vpccidr }} + {{- end }} + # Assumption is that we cannot deny access to localhost (and anyway QWC has code to prevent access to localhost). +{{- end -}} diff --git a/qliksense/charts/data-connector-qwc/templates/secrets-connector-cfg.yaml b/qliksense/charts/data-connector-qwc/templates/secrets-connector-cfg.yaml new file mode 100644 index 0000000..de0a6d4 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/secrets-connector-cfg.yaml @@ -0,0 +1,9 @@ +{{- if .Values.secrets.connector_settings }} +apiVersion: v1 +kind: Secret +metadata: + name: qwc-secrets +type: Opaque +data: + qcs_secrets.config: {{ .Values.secrets.connector_settings | b64enc }} +{{- end}} diff --git a/qliksense/charts/data-connector-qwc/templates/secrets-keys.yaml b/qliksense/charts/data-connector-qwc/templates/secrets-keys.yaml new file mode 100644 index 0000000..ca9f184 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/secrets-keys.yaml @@ -0,0 +1,17 @@ +{{- if or .Values.secrets.enc_key_for_connector_settings .Values.secrets.enc_key_for_temp_state .Values.secrets.enc_key_for_params }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-encryption-keys" }} +type: Opaque +data: +{{- if .Values.secrets.enc_key_for_connector_settings }} + enc_key_for_connector_settings: {{ .Values.secrets.enc_key_for_connector_settings | b64enc }} +{{- end}} +{{- if .Values.secrets.enc_key_for_temp_state }} + enc_key_for_temp_state: {{ .Values.secrets.enc_key_for_temp_state | b64enc }} +{{- end}} +{{- if .Values.secrets.enc_key_for_params }} + enc_key_for_params: {{ .Values.secrets.enc_key_for_params | b64enc }} +{{- end}} +{{- end}} diff --git a/qliksense/charts/data-connector-qwc/templates/service-cmd.yaml b/qliksense/charts/data-connector-qwc/templates/service-cmd.yaml new file mode 100644 index 0000000..3683764 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/service-cmd.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-cmd" }} + labels: + app: {{ template "data-connector-qwc.name" . }} + chart: {{ template "data-connector-qwc.chart" . }} + action: "command" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP + name: grpc + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + action: "command" diff --git a/qliksense/charts/data-connector-qwc/templates/service-rld.yaml b/qliksense/charts/data-connector-qwc/templates/service-rld.yaml new file mode 100644 index 0000000..a251828 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/service-rld.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-qwc.fullname" . }}{{ "-rld" }} + labels: + app: {{ template "data-connector-qwc.name" . }} + chart: {{ template "data-connector-qwc.chart" . }} + action: "reload" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP + name: grpc + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + action: "reload" diff --git a/qliksense/charts/data-connector-qwc/templates/service-web.yaml b/qliksense/charts/data-connector-qwc/templates/service-web.yaml new file mode 100644 index 0000000..381af13 --- /dev/null +++ b/qliksense/charts/data-connector-qwc/templates/service-web.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-qwc.fullname" . }}-web + labels: + app: {{ template "data-connector-qwc.name" . }} + chart: {{ template "data-connector-qwc.chart" . }} + action: "web" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: "9121" +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: 6384 + targetPort: 6384 + protocol: TCP + name: http + selector: + app: {{ template "data-connector-qwc.name" . }} + release: {{ .Release.Name }} + action: "web" diff --git a/qliksense/charts/data-connector-qwc/values.yaml b/qliksense/charts/data-connector-qwc/values.yaml new file mode 100644 index 0000000..a1366cf --- /dev/null +++ b/qliksense/charts/data-connector-qwc/values.yaml @@ -0,0 +1,179 @@ +# Default values for Data Connector QWC Helm Chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Override the name of the Chart. +## +# nameOverride: + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + + ## Data Connector QWC images. + ## + repository: qlik-download/data-connector-qwc + webrepository: qlik-download/data-connector-qwc-web + + + ## Data Connector QWC image version. + ## ref: https://qliktech.jfrog.io/qliktech/webapp/#/artifacts/browse/tree/General/docker/data-connector-qwc + ## + tag: 0.80.0 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + ## pullPolicy: Always + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Settings for pods serving static web files. +web: + replicaCount: 1 + +## Settings for pods serving command requests +command: + replicaCount: 1 + +## Settings for pods serving GetData requests +reload: + ## scale reload connectors elastically or not + useHpaReload: false + ## replicaCount to be used if useHpaReload is false + replicaCount: 1 + ## maximum number of active reloads - this limits the number of GetData requests + ## accepted by a pod. It also governs the readiness endpoint (becomes not ready when limit reached) + ## if you change this limit, you should configure cpu and memory resources appropriately + maxActiveReloads: 3 + ## additional active reloads (uploads or downloads) for file hosting connectors only + fileTransferAboveMaxAllowance: 2 + +## Settings for elastically scaling pods serving GetData requests +## Requires reload.useHpa true +hpaReload: + minReplicas: 2 + maxReplicas: 5 + targetAverageUtilizationCpuEnabled: false + # targetAverageUtilizationCpu: 80 + targetAverageUtilizationMemoryEnabled: false + # targetAverageUtilizationMemory: 80 + targetAverageValueEnabled: false + # Name of the custom metric to use for HPA + # metricName: "connector_active_getdata_requests" + # Average target value of the metric to trigger auto scaling + # targetAverageValue: 3 + +## Graceful termination logic allowing active connections to finish +shutdownDrain: + enabled: false + +## Number of seconds to wait during pod termination until SIGKILL. +terminationGracePeriodSeconds: 30 + +## The network policy associated with this service. +networkPolicy: + enabled: false + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + grpc: 50060 + port: 3005 + +## Metrics configuration +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + +## deployment resources +resourcesCmdPods: {} +resourcesRldPods: {} +resourcesWebPods: {} + +secrets: + ## Set this flag to use a connector settings file. + use_connector_settings: false + + ## The key used by QWC to decrypt connector settings. + enc_key_for_connector_settings: null + + ## The key used by QWC to encrypt temporary state. + ## If you leave it blank a key in the image will be used - this is + ## NOT RECOMMENDED for production. + enc_key_for_temp_state: null + + ## The key used by QWC to encrypt params in the connection. + ## If you leave it blank a key in the image will be used - this is + ## NOT RECOMMENDED for production. + enc_key_for_params: null + +## Redis configuration +## +redis: + ## Enables a Redis chart by default (for local development for example) + enabled: true + ## Image pull policy for Redis chart + image: + pullPolicy: IfNotPresent + ## Disable password authentication by default (for local development for example) + usePassword: false + ## Disable master-secondary topology by default (for local development for example) + cluster: + enabled: false + ## master node configurations + master: + securityContext: + enabled: false + statefulset: + ## Updating all Pods in a StatefulSet, in reverse ordinal order, while respecting the StatefulSet guarantees + updateStrategy: RollingUpdate + slave: + securityContext: + enabled: false + ## metrics configurations + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + +# Log level for the QWC connectors. This can be TRACE | INFO | WARN | ERROR | OFF +log_level: INFO + +allow_reference_connector: "true" + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + + ## class provides an kubernetes.io/ingress.class override of default nginx + class: nginx + + ## authURL override of default http://{.Release.Name}.{.Release.Namespace}.svc.cluster.local:8080/v1/auth + # authURL: + + ## Annotations to be added to the ingress. + ## + annotations: [] + + ## Default host. Ingress will not work unless this host is resolved. + # host: elastic.example + + ## TLS configuration. + ## + # tls: + # - secretName: elastic-infra-elastic-infra-tls-secret + # hosts: + # - elastic.example diff --git a/qliksense/charts/data-connector-rest/.helmignore b/qliksense/charts/data-connector-rest/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/data-connector-rest/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/data-connector-rest/Chart.yaml b/qliksense/charts/data-connector-rest/Chart.yaml new file mode 100644 index 0000000..a88528c --- /dev/null +++ b/qliksense/charts/data-connector-rest/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: Custom connector to access rest data sources through gRPC +home: https://www.qlik.com +name: data-connector-rest +sources: +- https://github.com/qlik-trial/data-connector-rest +version: 1.2.9 diff --git a/qliksense/charts/data-connector-rest/README.md b/qliksense/charts/data-connector-rest/README.md new file mode 100644 index 0000000..90426d2 --- /dev/null +++ b/qliksense/charts/data-connector-rest/README.md @@ -0,0 +1,54 @@ +# data-connector-rest + +[data-connector-rest](https://github.com/qlik-trial/qvrestconnector) is the service that provides static web resources for custom connectors. + +## Introduction + +This chart bootstraps a data-connector-rest service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release qlik/data-connector-rest +``` + +The command deploys data-connector-rest on the Kubernetes cluster in the default configuration. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +|---------------------------------|----------------------------------------------------------------------------------------|---------------------------------------------------------| +| `env.loglevel` | Log level (ERROR, INFO, DEBUG) | `INFO` | +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil` | +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io` | +| `image.repository` | image name with no registry | `data-connector-rest` | +| `image.tag` | image version | `2.30.0` | +| `image.pullPolicy` | image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | Data Connector Rest external port (rest) | `3005` | +| `service.grpc` | Data Connector Rest external port (grpc) | `50060` | +| `metrics.prometheus.enabled` | whether prometheus metrics are enabled | true | +| `shutdownDrain.enabled` | Toggle Graceful termination logic allowing active connections to finish | `false` | +| `terminationGracePeriodSeconds` | maximum time k8s will wait before sending SIGKILL | `30` | + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml data-connector-rest +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/data-connector-rest/templates/_helpers.tpl b/qliksense/charts/data-connector-rest/templates/_helpers.tpl new file mode 100644 index 0000000..6566633 --- /dev/null +++ b/qliksense/charts/data-connector-rest/templates/_helpers.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "data-connector-rest.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "data-connector-rest.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "data-connector-rest.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "data-connector-rest-networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* Return data-connector-rest image name */}} +{{- define "data-connector-rest.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-rest/templates/deployment-cmd.yaml b/qliksense/charts/data-connector-rest/templates/deployment-cmd.yaml new file mode 100644 index 0000000..c1c9234 --- /dev/null +++ b/qliksense/charts/data-connector-rest/templates/deployment-cmd.yaml @@ -0,0 +1,81 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-rest.fullname" . }}{{ "-cmd" }} + labels: + app: {{ template "data-connector-rest.name" . }} + chart: {{ template "data-connector-rest.chart" . }} + action: "command" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.command.replicaCount }} + template: + metadata: + labels: + app: {{ template "data-connector-rest.name" . }} + release: {{ .Release.Name }} + action: "command" + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-rest.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: STANDALONE + value: "false" + - name: DATA_HOST + value: 0.0.0.0 + - name: DATA_PORT_RANGE_START + value: "{{ .Values.service.grpc }}" + - name: DATA_PORT_RANGE_END + value: "{{ .Values.service.grpc }}" + - name: CONFIG + value: /opt/runner/Rest_dotnetcore.json + - name: LOG_LEVEL + value: {{ .Values.env.loglevel | quote }} + - name: SERVICE_MODE + value: "true" + - name: QS_ENVIRONMENT + value: "{{ .Values.env.qsEnvironment }}" + {{- if .Values.networkPolicy.enabled }} + - name: BLOCKED_CIDRS + value: {{ join "," .Values.networkPolicy.blockedcidrs }} + - name: ALLOW_SOURCES + value: {{ .Values.networkPolicy.allowRestSource }} + {{- end }} + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} + ports: + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + securityContext: + supplementalGroups: + - 13100 + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} + selector: + matchLabels: + app: {{ template "data-connector-rest.name" . }} + release: {{ .Release.Name }} + action: "command" diff --git a/qliksense/charts/data-connector-rest/templates/deployment-rld.yaml b/qliksense/charts/data-connector-rest/templates/deployment-rld.yaml new file mode 100644 index 0000000..ea68803 --- /dev/null +++ b/qliksense/charts/data-connector-rest/templates/deployment-rld.yaml @@ -0,0 +1,94 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-rest.fullname" . }}{{ "-rld" }} + labels: + app: {{ template "data-connector-rest.name" . }} + chart: {{ template "data-connector-rest.chart" . }} + action: "reload" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.reload.replicaCount }} + template: + metadata: + labels: + app: {{ template "data-connector-rest.name" . }} + release: {{ .Release.Name }} + action: "reload" + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-rest.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: STANDALONE + value: "false" + - name: DATA_HOST + value: 0.0.0.0 + - name: DATA_PORT_RANGE_START + value: "{{ .Values.service.grpc }}" + - name: DATA_PORT_RANGE_END + value: "{{ .Values.service.grpc }}" + - name: CONFIG + value: /opt/runner/Rest_dotnetcore.json + - name: LOG_LEVEL + value: {{ .Values.env.loglevel | quote }} + - name: SERVICE_MODE + value: "true" + - name: MAX_ACTIVE_RELOADS + value: "{{ .Values.env.maxActiveReloads }}" + - name: QS_ENVIRONMENT + value: "{{ .Values.env.qsEnvironment }}" + - name: ENABLE_SHUTDOWN_DRAIN + value: "{{ .Values.shutdownDrain.enabled }}" + - name: SHUTDOWN_TIMEOUT + value: "{{ .Values.terminationGracePeriodSeconds }}" + {{- if .Values.networkPolicy.enabled }} + - name: BLOCKED_CIDRS + value: {{ join "," .Values.networkPolicy.blockedcidrs }} + - name: ALLOW_SOURCES + value: {{ .Values.networkPolicy.allowRestSource }} + {{- end }} + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} + ports: + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} + readinessProbe: + httpGet: + path: /readiness + port: {{ .Values.service.port }} + initialDelaySeconds: 2 + periodSeconds: 10 +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + securityContext: + supplementalGroups: + - 13100 + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} + selector: + matchLabels: + app: {{ template "data-connector-rest.name" . }} + release: {{ .Release.Name }} + action: "reload" diff --git a/qliksense/charts/data-connector-rest/templates/hpa-rld.yaml b/qliksense/charts/data-connector-rest/templates/hpa-rld.yaml new file mode 100644 index 0000000..26d27ea --- /dev/null +++ b/qliksense/charts/data-connector-rest/templates/hpa-rld.yaml @@ -0,0 +1,32 @@ +{{ if .Values.reload.useHpaReload }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "data-connector-rest.fullname" . }}{{ "-rld" }} +spec: + scaleTargetRef: + apiVersion: apps/v1beta2 + kind: Deployment + name: {{ template "data-connector-rest.fullname" . }}{{ "-rld" }} + minReplicas: {{ .Values.hpaReload.minReplicas }} + maxReplicas: {{ .Values.hpaReload.maxReplicas }} + metrics: +{{ if .Values.hpaReload.targetAverageUtilizationCpuEnabled }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpaReload.targetAverageUtilizationCpu }} +{{- end }} +{{ if .Values.hpaReload.targetAverageUtilizationMemoryEnabled }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpaReload.targetAverageUtilizationMemory }} +{{- end }} +{{ if .Values.hpaReload.targetAverageValueEnabled }} + - type: Pods + pods: + metricName: {{ .Values.hpaReload.metricName }} + targetAverageValue: {{ .Values.hpaReload.targetAverageValue }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-connector-rest/templates/network.yaml b/qliksense/charts/data-connector-rest/templates/network.yaml new file mode 100644 index 0000000..59291fe --- /dev/null +++ b/qliksense/charts/data-connector-rest/templates/network.yaml @@ -0,0 +1,38 @@ + +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "data-connector-rest-networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "data-connector-rest.name" . }}-network-policy + namespace: {{ .Values.networkPolicy.namespace }} +spec: + podSelector: + matchLabels: + app: {{ template "data-connector-rest.name" . }} + policyTypes: + - Egress + - Ingress + ingress: + - {} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - ipBlock: + cidr: {{ .Values.networkPolicy.externalcidr }} + except: + {{- if .Values.networkPolicy.blockedcidrs }} + {{- range .Values.networkPolicy.blockedcidrs }} + - {{ . }} + {{- end }} + {{- else }} + - {{ .Values.networkPolicy.clustercidr }} + - {{ .Values.networkPolicy.vpccidr }} + {{- end }} + - podSelector: + matchLabels: + app: {{ .Values.networkPolicy.allowRestSource }} +{{- end -}} diff --git a/qliksense/charts/data-connector-rest/templates/service-cmd.yaml b/qliksense/charts/data-connector-rest/templates/service-cmd.yaml new file mode 100644 index 0000000..a81a8d0 --- /dev/null +++ b/qliksense/charts/data-connector-rest/templates/service-cmd.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-rest.fullname" . }}{{ "-cmd" }} + labels: + app: {{ template "data-connector-rest.name" . }} + chart: {{ template "data-connector-rest.chart" . }} + action: "command" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP + name: grpc + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: {{ template "data-connector-rest.name" . }} + release: {{ .Release.Name }} + action: "command" diff --git a/qliksense/charts/data-connector-rest/templates/service-rld.yaml b/qliksense/charts/data-connector-rest/templates/service-rld.yaml new file mode 100644 index 0000000..f92f74d --- /dev/null +++ b/qliksense/charts/data-connector-rest/templates/service-rld.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-rest.fullname" . }}{{ "-rld" }} + labels: + app: {{ template "data-connector-rest.name" . }} + chart: {{ template "data-connector-rest.chart" . }} + action: "reload" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP + name: grpc + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: {{ template "data-connector-rest.name" . }} + release: {{ .Release.Name }} + action: "reload" diff --git a/qliksense/charts/data-connector-rest/values.yaml b/qliksense/charts/data-connector-rest/values.yaml new file mode 100644 index 0000000..a2ef643 --- /dev/null +++ b/qliksense/charts/data-connector-rest/values.yaml @@ -0,0 +1,99 @@ +# Default values for Data Connector Rest Helm Chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Override the name of the Chart. +## +# nameOverride: + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + + ## Data Connector Rest image. + ## + repository: qlik-download/data-connector-rest + + ## Data Connector Rest image version. + ## ref: https://hub.docker.com/r/qlik/data-connector-rest/tags/ + ## + tag: 2.39.0 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + ## pullPolicy: Always + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Other environment variables +env: + ## log level + loglevel: "INFO" + ## maximum number of active reloads - this limits the number of GetData requests + ## accepted by a pod. It also governs the readiness endpoint (becomes not ready when limit reached) + ## if you change this limit, you should configure cpu and memory resources appropriately + maxActiveReloads: 3 + ## defines the environment (QSEOK / QCS) + qsEnvironment: "QSEOK" + +## The network policy associated with this service. +networkPolicy: + enabled: false + +## Settings for pods serving command requests +command: + replicaCount: 1 + +## Settings for pods serving GetData requests +reload: + ## scale reload connectors elastically or not + useHpaReload: false + ## replicaCount to be used if useHpaReload is false + replicaCount: 1 + +## Graceful termination logic allowing active connections to finish +shutdownDrain: + enabled: false + +## Number of seconds to wait during pod termination until SIGKILL. +## +terminationGracePeriodSeconds: 30 + +## Settings for elastically scaling pods serving GetData requests +## Requires reload.useHpa true +hpaReload: + minReplicas: 2 + maxReplicas: 5 + targetAverageUtilizationCpuEnabled: false + # targetAverageUtilizationCpu: 80 + targetAverageUtilizationMemoryEnabled: false + # targetAverageUtilizationMemory: 80 + targetAverageValueEnabled: false + # Name of the custom metric to use for HPA + # metricName: "connector_active_getdata_requests" + # Average target value of the metric to trigger auto scaling + # targetAverageValue: 3 + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + grpc: 50060 + port: 3005 + +## Metrics configuration +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + +## deployment resources +resources: {} diff --git a/qliksense/charts/data-connector-sap-sql/.helmignore b/qliksense/charts/data-connector-sap-sql/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/data-connector-sap-sql/Chart.yaml b/qliksense/charts/data-connector-sap-sql/Chart.yaml new file mode 100644 index 0000000..374ee8d --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: Custom connector to access sap-sql data sources through gRPC +home: https://www.qlik.com +name: data-connector-sap-sql +sources: +- https://github.com/qlik-trial/data-connector-sap-sql +version: 1.1.18 diff --git a/qliksense/charts/data-connector-sap-sql/README.md b/qliksense/charts/data-connector-sap-sql/README.md new file mode 100644 index 0000000..a354b49 --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/README.md @@ -0,0 +1,51 @@ +# data-connector-sap-sql + +[data-connector-sap-sql](https://github.com/qlik-trial/sap-connector-sql) is the service that provides SAP SQL Connector for connecting to SAP ERP & BW Systems. + +## Introduction + +This chart bootstraps a data-connector-sap-sql service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release qlik/data-connector-sap-sql +``` + +The command deploys data-connector-sap-sql on the Kubernetes cluster in the default configuration. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | ---------------------------------------------------------- | +| `env.loglevel` | Log level (ERROR, INFO, DEBUG) | `INFO` | +| `env.licensesUrl` | licenses service URL | `http://licenses:9200` | +| `image.repository` | image name | `qliktech-docker.jfrog.io/data-connector-sap-sql`| +| `image.tag` | image version | `7.0.18` | +| `image.pullPolicy` | image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | Data Connector SAP-SQL external port (rest) | `3005` | +| `service.port` | Data Connector SAP-SQL external port (grpc) | `50060` | +| `metrics.prometheus.enabled` | whether prometheus metrics are enabled | true | + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml data-connector-sap-sql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/data-connector-sap-sql/templates/_helpers.tpl b/qliksense/charts/data-connector-sap-sql/templates/_helpers.tpl new file mode 100644 index 0000000..0eab884 --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/templates/_helpers.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "data-connector-sap-sql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "data-connector-sap-sql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "data-connector-sap-sql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "data-connector-sap-sql-networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* Return data-connector-sap-sql image name */}} +{{- define "data-connector-sap-sql.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-sap-sql/templates/deployment-cmd.yaml b/qliksense/charts/data-connector-sap-sql/templates/deployment-cmd.yaml new file mode 100644 index 0000000..a08e1b8 --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/templates/deployment-cmd.yaml @@ -0,0 +1,72 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-sap-sql.fullname" . }}{{ "-cmd" }} + labels: + app: {{ template "data-connector-sap-sql.name" . }} + chart: {{ template "data-connector-sap-sql.chart" . }} + action: "command" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.command.replicaCount }} + template: + metadata: + labels: + app: {{ template "data-connector-sap-sql.name" . }} + release: {{ .Release.Name }} + action: "command" + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-sap-sql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: STANDALONE + value: "false" + - name: LICENSES_URL + value: {{ default (printf "http://%s-licenses:9200" .Release.Name ) .Values.env.licensesUrl | quote }} + - name: DATA_HOST + value: 0.0.0.0 + - name: DATA_PORT_RANGE_START + value: "{{ .Values.service.grpc }}" + - name: DATA_PORT_RANGE_END + value: "{{ .Values.service.grpc }}" + - name: CONFIG + value: /opt/runner/SAP-sql_dotnetcore.json + - name: LOG_LEVEL + value: {{ .Values.env.loglevel | quote }} + - name: SERVICE_MODE + value: "true" + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} + ports: + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} + selector: + matchLabels: + app: {{ template "data-connector-sap-sql.name" . }} + release: {{ .Release.Name }} + action: "command" diff --git a/qliksense/charts/data-connector-sap-sql/templates/deployment-rld.yaml b/qliksense/charts/data-connector-sap-sql/templates/deployment-rld.yaml new file mode 100644 index 0000000..d1e1213 --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/templates/deployment-rld.yaml @@ -0,0 +1,80 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-sap-sql.fullname" . }}{{ "-rld" }} + labels: + app: {{ template "data-connector-sap-sql.name" . }} + chart: {{ template "data-connector-sap-sql.chart" . }} + action: "reload" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.reload.replicaCount }} + template: + metadata: + labels: + app: {{ template "data-connector-sap-sql.name" . }} + release: {{ .Release.Name }} + action: "reload" + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-sap-sql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: STANDALONE + value: "false" + - name: LICENSES_URL + value: {{ default (printf "http://%s-licenses:9200" .Release.Name ) .Values.env.licensesUrl | quote }} + - name: DATA_HOST + value: 0.0.0.0 + - name: DATA_PORT_RANGE_START + value: "{{ .Values.service.grpc }}" + - name: DATA_PORT_RANGE_END + value: "{{ .Values.service.grpc }}" + - name: CONFIG + value: /opt/runner/SAP-sql_dotnetcore.json + - name: LOG_LEVEL + value: {{ .Values.env.loglevel | quote }} + - name: SERVICE_MODE + value: "true" + - name: MAX_ACTIVE_RELOADS + value: "{{ .Values.env.maxActiveReloads }}" + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} + ports: + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} + readinessProbe: + httpGet: + path: /readiness + port: {{ .Values.service.port }} + initialDelaySeconds: 2 + periodSeconds: 10 +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} + selector: + matchLabels: + app: {{ template "data-connector-sap-sql.name" . }} + release: {{ .Release.Name }} + action: "reload" diff --git a/qliksense/charts/data-connector-sap-sql/templates/hpa-rld.yaml b/qliksense/charts/data-connector-sap-sql/templates/hpa-rld.yaml new file mode 100644 index 0000000..7c09068 --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/templates/hpa-rld.yaml @@ -0,0 +1,32 @@ +{{ if .Values.reload.useHpaReload }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "data-connector-sap-sql.fullname" . }}{{ "-rld" }} +spec: + scaleTargetRef: + apiVersion: apps/v1beta2 + kind: Deployment + name: {{ template "data-connector-sap-sql.fullname" . }}{{ "-rld" }} + minReplicas: {{ .Values.hpaReload.minReplicas }} + maxReplicas: {{ .Values.hpaReload.maxReplicas }} + metrics: +{{ if .Values.hpaReload.targetAverageUtilizationCpuEnabled }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpaReload.targetAverageUtilizationCpu }} +{{- end }} +{{ if .Values.hpaReload.targetAverageUtilizationMemoryEnabled }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpaReload.targetAverageUtilizationMemory }} +{{- end }} +{{ if .Values.hpaReload.targetAverageValueEnabled }} + - type: Pods + pods: + metricName: {{ .Values.hpaReload.metricName }} + targetAverageValue: {{ .Values.hpaReload.targetAverageValue }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-connector-sap-sql/templates/network.yaml b/qliksense/charts/data-connector-sap-sql/templates/network.yaml new file mode 100644 index 0000000..bf0f840 --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/templates/network.yaml @@ -0,0 +1,44 @@ + +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "data-connector-sap-sql-networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "data-connector-sap-sql.name" . }}-network-policy + namespace: {{ .Values.networkPolicy.namespace }} +spec: + podSelector: + matchLabels: + app: {{ template "data-connector-sap-sql.name" . }} + policyTypes: + - Egress + - Ingress + ingress: + - {} + egress: + # Allow DNS to other pods + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow access to licenses pod. + - ports: + - port: 9200 + protocol: TCP + to: + - podSelector: + matchLabels: + app: licenses + - to: + - ipBlock: + cidr: {{ .Values.networkPolicy.externalcidr }} + except: + {{- if .Values.networkPolicy.blockedcidrs }} + {{- range .Values.networkPolicy.blockedcidrs }} + - {{ . }} + {{- end }} + {{- else }} + - {{ .Values.networkPolicy.clustercidr }} + - {{ .Values.networkPolicy.vpccidr }} + {{- end }} +{{- end -}} diff --git a/qliksense/charts/data-connector-sap-sql/templates/service-cmd.yaml b/qliksense/charts/data-connector-sap-sql/templates/service-cmd.yaml new file mode 100644 index 0000000..d0683f3 --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/templates/service-cmd.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-sap-sql.fullname" . }}{{ "-cmd" }} + labels: + app: {{ template "data-connector-sap-sql.name" . }} + chart: {{ template "data-connector-sap-sql.chart" . }} + action: "command" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP + name: grpc + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: {{ template "data-connector-sap-sql.name" . }} + release: {{ .Release.Name }} + action: "command" diff --git a/qliksense/charts/data-connector-sap-sql/templates/service-rld.yaml b/qliksense/charts/data-connector-sap-sql/templates/service-rld.yaml new file mode 100644 index 0000000..81565ae --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/templates/service-rld.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-sap-sql.fullname" . }}{{ "-rld" }} + labels: + app: {{ template "data-connector-sap-sql.name" . }} + chart: {{ template "data-connector-sap-sql.chart" . }} + action: "reload" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP + name: grpc + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: {{ template "data-connector-sap-sql.name" . }} + release: {{ .Release.Name }} + action: "reload" diff --git a/qliksense/charts/data-connector-sap-sql/values.yaml b/qliksense/charts/data-connector-sap-sql/values.yaml new file mode 100644 index 0000000..e36c020 --- /dev/null +++ b/qliksense/charts/data-connector-sap-sql/values.yaml @@ -0,0 +1,88 @@ +# Default values for Data Connector SAP-SQL Helm Chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Override the name of the Chart. +## +# nameOverride: + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + + ## Data Connector SAP-SQL image. + ## + repository: qlik-download/data-connector-sap-sql + + ## Data Connector SAP-SQL image version. + ## + tag: 7.0.18 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Other environment variables +env: + ## log level + loglevel: "INFO" + ## maximum number of active reloads - this limits the number of GetData requests + ## accepted by a pod. It also governs the readiness endpoint (becomes not ready when limit reached) + ## if you change this limit, you should configure cpu and memory resources appropriately + maxActiveReloads: 3 + +## The network policy associated with this service. +networkPolicy: + enabled: false + +## Settings for pods serving command requests +command: + replicaCount: 1 + +## Settings for pods serving GetData requests +reload: + ## scale reload connectors elastically or not + useHpaReload: false + ## replicaCount to be used if useHpaReload is false + replicaCount: 1 + +## Settings for elastically scaling pods serving GetData requests +## Requires reload.useHpa true +hpaReload: + minReplicas: 2 + maxReplicas: 5 + targetAverageUtilizationCpuEnabled: false + # targetAverageUtilizationCpu: 80 + targetAverageUtilizationMemoryEnabled: false + # targetAverageUtilizationMemory: 80 + targetAverageValueEnabled: false + # Name of the custom metric to use for HPA + # metricName: "connector_active_getdata_requests" + # Average target value of the metric to trigger auto scaling + # targetAverageValue: 3 + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + grpc: 50060 + port: 3005 + +## Metrics configuration +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + +## deployment resources +resources: {} diff --git a/qliksense/charts/data-connector-sfdc/.helmignore b/qliksense/charts/data-connector-sfdc/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/data-connector-sfdc/Chart.yaml b/qliksense/charts/data-connector-sfdc/Chart.yaml new file mode 100644 index 0000000..1e8a8b9 --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: Custom connector to access salesforce.com data sources through gRPC +home: https://www.qlik.com +name: data-connector-sfdc +sources: +- https://github.com/qlik-trial/qvsalesforceconnector +version: 1.6.3 diff --git a/qliksense/charts/data-connector-sfdc/README.md b/qliksense/charts/data-connector-sfdc/README.md new file mode 100644 index 0000000..c203165 --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/README.md @@ -0,0 +1,54 @@ +# data-connector-sfdc + +[data-connector-sfdc](https://github.com/qlik-trial/qvsalesforceconnector) is the service that provides static web resources for custom connectors. + +## Introduction + +This chart bootstraps a data-connector-sfdc service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release qlik/data-connector-sfdc +``` + +The command deploys data-connector-sfdc on the Kubernetes cluster in the default configuration. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | ---------------------------------------------------------- | +| `env.loglevel` | Log level (ERROR, INFO, DEBUG) | `INFO` | +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil`| +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io`| +| `image.repository` | image name with no registry | `data-connector-sfdc`| +| `image.tag` | image version | `15.24.0` | +| `image.pullPolicy` | image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | Data Connector Sfdc external port (sfdc) | `3005` | +| `service.port` | Data Connector Sfdc external port (grpc) | `50060` | +| `metrics.prometheus.enabled` | whether prometheus metrics are enabled | true | +| `shutdownDrain.enabled` | Toggle Graceful termination logic allowing active connections to finish | `false` | +| `terminationGracePeriodSeconds` | maximum time k8s will wait before sending SIGKILL | `30` | + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml data-connector-sfdc +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/data-connector-sfdc/templates/_helpers.tpl b/qliksense/charts/data-connector-sfdc/templates/_helpers.tpl new file mode 100644 index 0000000..f2d0d8f --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/templates/_helpers.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "data-connector-sfdc.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "data-connector-sfdc.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "data-connector-sfdc.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "data-connector-sfdc-networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* Return data-connector-sfdc image name */}} +{{- define "data-connector-sfdc.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-connector-sfdc/templates/deployment-cmd.yaml b/qliksense/charts/data-connector-sfdc/templates/deployment-cmd.yaml new file mode 100644 index 0000000..1938b17 --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/templates/deployment-cmd.yaml @@ -0,0 +1,75 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-sfdc.fullname" . }}{{ "-cmd" }} + labels: + app: {{ template "data-connector-sfdc.name" . }} + chart: {{ template "data-connector-sfdc.chart" . }} + action: "command" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.command.replicaCount }} + template: + metadata: + labels: + app: {{ template "data-connector-sfdc.name" . }} + release: {{ .Release.Name }} + action: "command" + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-sfdc.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: STANDALONE + value: "false" + - name: DATA_HOST + value: 0.0.0.0 + - name: DATA_PORT_RANGE_START + value: "{{ .Values.service.grpc }}" + - name: DATA_PORT_RANGE_END + value: "{{ .Values.service.grpc }}" + - name: CONFIG + value: /opt/runner/Sfdc_dotnetcore.json + - name: LOG_LEVEL + value: {{ .Values.env.loglevel | quote }} + - name: SERVICE_MODE + value: "true" + - name: QS_ENVIRONMENT + value: "{{ .Values.env.qsEnvironment }}" + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} + ports: + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + securityContext: + supplementalGroups: + - 13200 + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} + selector: + matchLabels: + app: {{ template "data-connector-sfdc.name" . }} + release: {{ .Release.Name }} + action: "command" diff --git a/qliksense/charts/data-connector-sfdc/templates/deployment-rld.yaml b/qliksense/charts/data-connector-sfdc/templates/deployment-rld.yaml new file mode 100644 index 0000000..3f4dfc7 --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/templates/deployment-rld.yaml @@ -0,0 +1,88 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-connector-sfdc.fullname" . }}{{ "-rld" }} + labels: + app: {{ template "data-connector-sfdc.name" . }} + chart: {{ template "data-connector-sfdc.chart" . }} + action: "reload" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.reload.replicaCount }} + template: + metadata: + labels: + app: {{ template "data-connector-sfdc.name" . }} + release: {{ .Release.Name }} + action: "reload" + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-connector-sfdc.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: STANDALONE + value: "false" + - name: DATA_HOST + value: 0.0.0.0 + - name: DATA_PORT_RANGE_START + value: "{{ .Values.service.grpc }}" + - name: DATA_PORT_RANGE_END + value: "{{ .Values.service.grpc }}" + - name: CONFIG + value: /opt/runner/Sfdc_dotnetcore.json + - name: LOG_LEVEL + value: {{ .Values.env.loglevel | quote }} + - name: SERVICE_MODE + value: "true" + - name: MAX_ACTIVE_RELOADS + value: "{{ .Values.env.maxActiveReloads }}" + - name: QS_ENVIRONMENT + value: "{{ .Values.env.qsEnvironment }}" + - name: ENABLE_SHUTDOWN_DRAIN + value: "{{ .Values.shutdownDrain.enabled }}" + - name: SHUTDOWN_TIMEOUT + value: "{{ .Values.terminationGracePeriodSeconds }}" + volumeMounts: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volumeMount" . | nindent 12 }} +{{- end }}{{- end }}{{- end }} + ports: + - name: grpc + containerPort: {{ .Values.service.grpc }} + protocol: TCP + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} + readinessProbe: + httpGet: + path: /readiness + port: {{ .Values.service.port }} + initialDelaySeconds: 2 + periodSeconds: 10 +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + securityContext: + supplementalGroups: + - 13200 + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + volumes: +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} +{{- include "qlik.ca-certificates.volume" . | nindent 8 }} +{{- end }}{{- end }}{{- end }} + selector: + matchLabels: + app: {{ template "data-connector-sfdc.name" . }} + release: {{ .Release.Name }} + action: "reload" diff --git a/qliksense/charts/data-connector-sfdc/templates/hpa-rld.yaml b/qliksense/charts/data-connector-sfdc/templates/hpa-rld.yaml new file mode 100644 index 0000000..6978af2 --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/templates/hpa-rld.yaml @@ -0,0 +1,32 @@ +{{ if .Values.reload.useHpaReload }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "data-connector-sfdc.fullname" . }}{{ "-rld" }} +spec: + scaleTargetRef: + apiVersion: apps/v1beta2 + kind: Deployment + name: {{ template "data-connector-sfdc.fullname" . }}{{ "-rld" }} + minReplicas: {{ .Values.hpaReload.minReplicas }} + maxReplicas: {{ .Values.hpaReload.maxReplicas }} + metrics: +{{ if .Values.hpaReload.targetAverageUtilizationCpuEnabled }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpaReload.targetAverageUtilizationCpu }} +{{- end }} +{{ if .Values.hpaReload.targetAverageUtilizationMemoryEnabled }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpaReload.targetAverageUtilizationMemory }} +{{- end }} +{{ if .Values.hpaReload.targetAverageValueEnabled }} + - type: Pods + pods: + metricName: {{ .Values.hpaReload.metricName }} + targetAverageValue: {{ .Values.hpaReload.targetAverageValue }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-connector-sfdc/templates/network.yaml b/qliksense/charts/data-connector-sfdc/templates/network.yaml new file mode 100644 index 0000000..ee0a45e --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/templates/network.yaml @@ -0,0 +1,34 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "data-connector-sfdc-networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "data-connector-sfdc.name" . }}-network-policy + namespace: {{ .Values.networkPolicy.namespace }} +spec: + podSelector: + matchLabels: + app: {{ template "data-connector-sfdc.name" . }} + policyTypes: + - Egress + - Ingress + ingress: + - {} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - ipBlock: + cidr: {{ .Values.networkPolicy.externalcidr }} + except: + {{- if .Values.networkPolicy.blockedcidrs }} + {{- range .Values.networkPolicy.blockedcidrs }} + - {{ . }} + {{- end }} + {{- else }} + - {{ .Values.networkPolicy.clustercidr }} + - {{ .Values.networkPolicy.vpccidr }} + {{- end }} +{{- end -}} diff --git a/qliksense/charts/data-connector-sfdc/templates/service-cmd.yaml b/qliksense/charts/data-connector-sfdc/templates/service-cmd.yaml new file mode 100644 index 0000000..b6e657a --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/templates/service-cmd.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-sfdc.fullname" . }}{{ "-cmd" }} + labels: + app: {{ template "data-connector-sfdc.name" . }} + chart: {{ template "data-connector-sfdc.chart" . }} + action: "command" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP + name: grpc + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: {{ template "data-connector-sfdc.name" . }} + release: {{ .Release.Name }} + action: "command" diff --git a/qliksense/charts/data-connector-sfdc/templates/service-rld.yaml b/qliksense/charts/data-connector-sfdc/templates/service-rld.yaml new file mode 100644 index 0000000..b2f8bf9 --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/templates/service-rld.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-connector-sfdc.fullname" . }}{{ "-rld" }} + labels: + app: {{ template "data-connector-sfdc.name" . }} + chart: {{ template "data-connector-sfdc.chart" . }} + action: "reload" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.grpc }} + targetPort: {{ .Values.service.grpc }} + protocol: TCP + name: grpc + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: {{ template "data-connector-sfdc.name" . }} + release: {{ .Release.Name }} + action: "reload" diff --git a/qliksense/charts/data-connector-sfdc/values.yaml b/qliksense/charts/data-connector-sfdc/values.yaml new file mode 100644 index 0000000..15d6365 --- /dev/null +++ b/qliksense/charts/data-connector-sfdc/values.yaml @@ -0,0 +1,98 @@ +# Default values for Data Connector Sfdc Helm Chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Override the name of the Chart. +## +# nameOverride: + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + + ## Data Connector Sfdc image. + ## + repository: qlik-download/data-connector-sfdc + + ## Data Connector Sfdc image version. + ## ref: https://hub.docker.com/r/qlik/data-connector-sfdc/tags/ + ## + tag: 15.24.0 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + ## pullPolicy: Always + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Other environment variables +env: + ## log level + loglevel: "INFO" + ## maximum number of active reloads - this limits the number of GetData requests + ## accepted by a pod. It also governs the readiness endpoint (becomes not ready when limit reached) + ## if you change this limit, you should configure cpu and memory resources appropriately + maxActiveReloads: 3 + ## defines the environment (QSEOK / QCS) + qsEnvironment: "QSEOK" +## The network policy associated with this service. +networkPolicy: + enabled: false + +## Settings for pods serving command requests +command: + replicaCount: 1 + +## Settings for pods serving GetData requests +reload: + ## scale reload connectors elastically or not + useHpaReload: false + ## replicaCount to be used if useHpaReload is false + replicaCount: 1 + +## Graceful termination logic allowing active connections to finish +shutdownDrain: + enabled: false + ## Number of seconds to wait during pod termination until SIGKILL. + ## + terminationGracePeriodSeconds: 30 + + +## Settings for elastically scaling pods serving GetData requests +## Requires reload.useHpa true +hpaReload: + minReplicas: 2 + maxReplicas: 5 + targetAverageUtilizationCpuEnabled: false + # targetAverageUtilizationCpu: 80 + targetAverageUtilizationMemoryEnabled: false + # targetAverageUtilizationMemory: 80 + targetAverageValueEnabled: false + # Name of the custom metric to use for HPA + # metricName: "connector_active_getdata_requests" + # Average target value of the metric to trigger auto scaling + # targetAverageValue: 3 + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + grpc: 50060 + port: 3005 + +## Metrics configuration +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + +## deployment resources +resources: {} diff --git a/qliksense/charts/data-prep/.helmignore b/qliksense/charts/data-prep/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/data-prep/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/data-prep/Chart.yaml b/qliksense/charts/data-prep/Chart.yaml new file mode 100644 index 0000000..462ded2 --- /dev/null +++ b/qliksense/charts/data-prep/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +description: DataPrep service enables client applications to access application load + model (metadata), make modifications to the load model (rename fields, add datasources + etc) and request/recommends better association between tables +home: www.qlik.com +name: data-prep +sources: +- https://github.com/qlik-trial/dataprep-service +version: 2.0.7 diff --git a/qliksense/charts/data-prep/README.md b/qliksense/charts/data-prep/README.md new file mode 100644 index 0000000..272dc31 --- /dev/null +++ b/qliksense/charts/data-prep/README.md @@ -0,0 +1,78 @@ +# data-prep + +[dataprepservice](https://github.com/qlik-trial/dataprep-service) is a service to make recommendations and prepare data. + +## Introduction + +This chart bootstraps a dataprep-service deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release qlik/data-prep +``` + +The command deploys dataprepservice on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +| --------------------- | ------------------------------------------------------------- | -------------------------------------------------------- | +| `global.imageRegistry`| The global image registry (overrides default `image.registry`)| `nil` | +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io` | +| `image.repository` | image name with no registry | `data-prep-service` | +| `image.tag` | image version | `2.194.0` | +| `image.pullPolicy` | image pull policy | `Always` if `image.tag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `replicaCount` | Number of dataprepservice replicas | `1` | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | DataPrepService listen port | `9072` | +| `service.bindaddress` | Address to listen on, set to `0.0.0.0` for IPv4-only | `::` (for dual-stack IPv6 and IPv4) | +| `service.engineaddress` | Address for the Engine service | `{{ .Release.Name }}-qix-sessions` | +| `service.engineport` | Port for the Engine service | `8080` | +| `service.datafileshost` | Address for the Datafiles service | `{{ .Release.Name }}-datafiles` | +| `service.datafilesport` | Port for the Datafiles service | `8080` | +| `service.qixdatafileshost` | Address for the Qix Datafiles service | `{{ .Release.Name }}-qix-datafiles` | +| `service.qixdatafilesport` | Port for the Qix Datafiles service | `8080` | +| `service.precedentshost` | Address for the Precedents service | `{{ .Release.Name }}-precedents` | +| `service.precedentsport` | Port for the Precedents service | `9054` | +| `spaces.uri` | the endpoint used to communicate with the spaces service | `http://{.Release.Name}-spaces:6080` | +| `ingress.annotations` | DataPrepService default annotations | `{kubernetes.io/ingress.class: nginx}` | +| `ingress.authURL` | The URL to use for nginx's auth-url configuration to authenticate /api requests | `http://{{ .Release.Name }}-edge-auth.{{ .Release.Namespace }}.svc.cluster.local:8080/v1/auth` | +| `ingress.host` | DataPrepService external URL | `elastic.example` | +| `ingress.tls` | DataPrepService ingress TLS configuration | `[]` | +| `auth.jwksURI` | the endpoint used to retrieve the JWKS | `http://{{ .Release.Name }}-edge-auth:8080/.well-known/jwks.json` | +| `featureFlags.url` | the endpoint used to retrieve the feature flags | `http://{{ .Release.Name }}-feature-flags.{{ .Release.Namespace }}:8080/v0/features` | +| `resources.limits.cpu` | CPU limit | `nil` | +| `resources.limits.memory` | Memory limit | `nil` | +| `resources.requests.cpu` | CPU reservation | `nil` | +| `resources.requests.memory` | Memory reservation | `nil` | +| `hpa.enabled` | Toggle horizontal pod autoscaler. | `false` | +| `hpa.minReplicas` | Minimum number of replicas | `3` | +| `hpa.maxReplicas` | Maximum number of replicas | `6` | +| `hpa.targetAverageUtilizationCpu` | The average CPU utilization to target before scaling (in % of requested) | `80` | +| `hpa.targetAverageUtilizationMemory` | The average memory utilization to target before scaling (in % of requested) | `80` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml qlik/dataprepservice +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/data-prep/charts/redis/.helmignore b/qliksense/charts/data-prep/charts/redis/.helmignore new file mode 100644 index 0000000..b2767ae --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/.helmignore @@ -0,0 +1,3 @@ +.git +# OWNERS file for Kubernetes +OWNERS diff --git a/qliksense/charts/data-prep/charts/redis/Chart.yaml b/qliksense/charts/data-prep/charts/redis/Chart.yaml new file mode 100644 index 0000000..0b1ce8a --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 5.0.7 +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: http://redis.io/ +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +version: 10.5.6 diff --git a/qliksense/charts/data-prep/charts/redis/README.md b/qliksense/charts/data-prep/charts/redis/README.md new file mode 100644 index 0000000..72eb836 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/README.md @@ -0,0 +1,497 @@ + +# Redis + +[Redis](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +## TL;DR; + +```bash +# Testing configuration +$ helm install my-release stable/redis +``` + +```bash +# Production configuration +$ helm install my-release stable/redis --values values-production.yaml +``` + +## Introduction + +This chart bootstraps a [Redis](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release stable/redis +``` + +The command deploys Redis on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the Redis chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | Redis password (overrides `password`) | `nil` | +| `image.registry` | Redis Image registry | `docker.io` | +| `image.repository` | Redis Image name | `bitnami/redis` | +| `image.tag` | Redis Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `1` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | Redis password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common Redis node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.runAsUser` | User ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.sysctls` | Set namespaced sysctls for the container (both redis master and slave pods) | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Redis exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | {} | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.podLabels` | Additional labels for Redis master pod | {} | +| `master.podAnnotations` | Additional annotations for Redis master pod | {} | +| `redisPort` | Redis port (in both master and slaves) | `6379` | +| `master.command` | Redis master entrypoint string. The command `redis-server` is executed if this is not provided. | `/run.sh` | +| `master.configmap` | Additional Redis configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of Redis commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | Redis master additional command line flags | [] | +| `master.nodeSelector` | Redis master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for Redis master pod assignment | [] | +| `master.affinity` | Affinity settings for Redis master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.priorityClassName` | Redis Master pod priorityClassName | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.command` | Redis slave entrypoint array. The docker image's ENTRYPOINT is used if this is not provided. | `/run.sh` | +| `slave.configmap` | Additional Redis configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of Redis commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | Redis slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `30` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `10` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.priorityClassName` | Redis Slave pod priorityClassName | {} | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | Redis Sentinel port | `26379` | +| `sentinel.configmap` | Additional Redis configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for Redis read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | Redis sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | Redis Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | Redis Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + stable/redis +``` + +The above command sets the Redis server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml stable/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis pod as it attempts to write to the `/bitnami` directory. Consider installing Redis with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of slaves: +```diff +- cluster.slaveCount: 2 ++ cluster.slaveCount: 3 +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a Redis master StatefulSet (only one master node allowed) and a Redis slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - Redis Master service: Points to the master, where read-write operations can be performed + - Redis Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis master StatefulSet (only one master allowed) and a Redis slave StatefulSet. In this case, the pods will contain en extra container with Redis Sentinel. This container will form a cluster of Redis Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - Redis service: Exposes port 6379 for Redis read-only operations and port 26379 for accesing Redis Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for Redis you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Host Kernel Settings +Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME stable/redis +``` + +## NetworkPolicy + +To enable network policy for Redis, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to Redis. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: +* Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +* Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + + - Recommended: Create a clone of the Redis Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release stable/redis --set persistence.existingClaim= + ``` + + - Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install stable/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + + - `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) + - `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis StatefulSet before upgrading: +```bash +$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` +And edit the Redis slave (and metrics if enabled) deployment: +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Notable changes + +### 9.0.0 +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### 7.0.0 +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis Sentinel containers inside of the Redis Pods (feature disabled by default). In case the master crashes, a new Redis node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/qliksense/charts/data-prep/charts/redis/ci/default-values.yaml b/qliksense/charts/data-prep/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/qliksense/charts/data-prep/charts/redis/ci/dev-values.yaml b/qliksense/charts/data-prep/charts/redis/ci/dev-values.yaml new file mode 100644 index 0000000..be01913 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/ci/dev-values.yaml @@ -0,0 +1,9 @@ +master: + persistence: + enabled: false + +cluster: + enabled: true + slaveCount: 1 + +usePassword: false diff --git a/qliksense/charts/data-prep/charts/redis/ci/extra-flags-values.yaml b/qliksense/charts/data-prep/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/qliksense/charts/data-prep/charts/redis/ci/insecure-sentinel-values.yaml b/qliksense/charts/data-prep/charts/redis/ci/insecure-sentinel-values.yaml new file mode 100644 index 0000000..2e9174f --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/ci/insecure-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: false + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/data-prep/charts/redis/ci/production-sentinel-values.yaml b/qliksense/charts/data-prep/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..36a00e3 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/data-prep/charts/redis/ci/production-values.yaml b/qliksense/charts/data-prep/charts/redis/ci/production-values.yaml new file mode 100644 index 0000000..6fa9c88 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/ci/production-values.yaml @@ -0,0 +1,525 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/data-prep/charts/redis/ci/redis-lib-values.yaml b/qliksense/charts/data-prep/charts/redis/ci/redis-lib-values.yaml new file mode 100644 index 0000000..e03382b --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/ci/redis-lib-values.yaml @@ -0,0 +1,13 @@ +## Redis library image +## ref: https://hub.docker.com/r/library/redis/ +## +image: + registry: docker.io + repository: redis + tag: '5.0.5' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/data-prep/charts/redis/ci/redisgraph-module-values.yaml b/qliksense/charts/data-prep/charts/redis/ci/redisgraph-module-values.yaml new file mode 100644 index 0000000..8096020 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/ci/redisgraph-module-values.yaml @@ -0,0 +1,10 @@ +image: + registry: docker.io + repository: redislabs/redisgraph + tag: '1.0.0' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/data-prep/charts/redis/templates/NOTES.txt b/qliksense/charts/data-prep/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..5b1089e --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/NOTES.txt @@ -0,0 +1,104 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis server: + +1. Run a Redis pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash + +2. Connect using the Redis CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/_helpers.tpl b/qliksense/charts/data-prep/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..3397a7b --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/_helpers.tpl @@ -0,0 +1,355 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-prep/charts/redis/templates/configmap.yaml b/qliksense/charts/data-prep/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..d17ec26 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{ tpl .Values.configmap . | indent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{ tpl .Values.master.configmap . | indent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{ tpl .Values.slave.configmap . | indent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{ tpl .Values.sentinel.configmap . | indent 4 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/headless-svc.yaml b/qliksense/charts/data-prep/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..909cbce --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis +{{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel +{{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/health-configmap.yaml b/qliksense/charts/data-prep/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..35c61b5 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,134 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_SENTINEL_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/qliksense/charts/data-prep/charts/redis/templates/metrics-prometheus.yaml b/qliksense/charts/data-prep/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..3f33454 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,30 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/data-prep/charts/redis/templates/metrics-svc.yaml b/qliksense/charts/data-prep/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..74f6fa8 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,30 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.service.labels -}} + {{ toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" -}} {{ if .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/networkpolicy.yaml b/qliksense/charts/data-prep/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..da05552 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,73 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/prometheusrule.yaml b/qliksense/charts/data-prep/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..500c3b3 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/psp.yaml b/qliksense/charts/data-prep/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..28ae22a --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/psp.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/redis-master-statefulset.yaml b/qliksense/charts/data-prep/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..b61c539 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,419 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master +{{- if .Values.master.podLabels }} +{{ toYaml .Values.master.podLabels | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} +{{ toYaml .Values.master.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" + {{- end }} + {{- with .Values.master.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: "{{ .Values.master.schedulerName }}" + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: "{{ template "redis.image" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.command }} + {{ .Values.master.command }} ${ARGS[@]} + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.master.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + echo "Getting information about current running sentinels" + # Get information from existing sentinels + existing_sentinels=$(timeout -s 9 {{ .Values.sentinel.initialCheckTimeout }} redis-cli --raw -h {{ template "redis.fullname" . }} -a "$REDIS_PASSWORD" -p {{ .Values.sentinel.service.sentinelPort }} SENTINEL sentinels {{ .Values.sentinel.masterSet }}) + echo "$existing_sentinels" | awk -f /health/parse_sentinels.awk | tee -a /opt/bitnami/redis-sentinel/etc/sentinel.conf + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc/ + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel-tmp-conf + emptyDir: {} + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.master.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.master.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/redis-master-svc.yaml b/qliksense/charts/data-prep/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..3a98e66 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,39 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{ toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{ toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/redis-role.yaml b/qliksense/charts/data-prep/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..71f75ef --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/redis-role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{ toYaml .Values.rbac.role.rules | indent 2 }} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-prep/charts/redis/templates/redis-rolebinding.yaml b/qliksense/charts/data-prep/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..aceb258 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/qliksense/charts/data-prep/charts/redis/templates/redis-serviceaccount.yaml b/qliksense/charts/data-prep/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..f027176 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/qliksense/charts/data-prep/charts/redis/templates/redis-slave-statefulset.yaml b/qliksense/charts/data-prep/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..d5a8db5 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,437 @@ +{{- if .Values.cluster.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: +{{ toYaml .Values.slave.updateStrategy | indent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} +{{ toYaml .Values.slave.podLabels | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} +{{ toYaml .Values.slave.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: "{{ .Values.slave.schedulerName }}" + {{- end }} + {{- with .Values.slave.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.command }} + {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.slave.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.slave.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.slave.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/redis-slave-svc.yaml b/qliksense/charts/data-prep/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..052ecea --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{ toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: +{{ toYaml .Values.slave.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/redis-with-sentinel-svc.yaml b/qliksense/charts/data-prep/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..5017c22 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,40 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{ toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: +{{ toYaml .Values.sentinel.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/data-prep/charts/redis/templates/secret.yaml b/qliksense/charts/data-prep/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..ead9c61 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/templates/secret.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/qliksense/charts/data-prep/charts/redis/values-production.yaml b/qliksense/charts/data-prep/charts/redis/values-production.yaml new file mode 100644 index 0000000..cae2af1 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/values-production.yaml @@ -0,0 +1,630 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/data-prep/charts/redis/values.schema.json b/qliksense/charts/data-prep/charts/redis/values.schema.json new file mode 100644 index 0000000..2138e45 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "condition": false, + "value": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "master.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "slave.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/qliksense/charts/data-prep/charts/redis/values.yaml b/qliksense/charts/data-prep/charts/redis/values.yaml new file mode 100644 index 0000000..2649466 --- /dev/null +++ b/qliksense/charts/data-prep/charts/redis/values.yaml @@ -0,0 +1,631 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/data-prep/requirements.yaml b/qliksense/charts/data-prep/requirements.yaml new file mode 100644 index 0000000..e56f3e1 --- /dev/null +++ b/qliksense/charts/data-prep/requirements.yaml @@ -0,0 +1,6 @@ +dependencies: + - name: redis + version: 10.5.6 + repository: "@stable" + condition: redis.enabled + \ No newline at end of file diff --git a/qliksense/charts/data-prep/templates/_helpers.tpl b/qliksense/charts/data-prep/templates/_helpers.tpl new file mode 100644 index 0000000..b058083 --- /dev/null +++ b/qliksense/charts/data-prep/templates/_helpers.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "data-prep.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "data-prep.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "data-prep.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* Return data-prep image name */}} +{{- define "data-prep.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} + +{{- define "ingressClass" -}} + {{- $ingressClass := .Values.ingress.class -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} + diff --git a/qliksense/charts/data-prep/templates/deployment.yaml b/qliksense/charts/data-prep/templates/deployment.yaml new file mode 100644 index 0000000..b1aff43 --- /dev/null +++ b/qliksense/charts/data-prep/templates/deployment.yaml @@ -0,0 +1,119 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "data-prep.fullname" . }} + labels: + app: {{ template "data-prep.name" . }} + chart: {{ template "data-prep.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "data-prep.name" . }} + release: {{ .Release.Name }} + spec: +{{- if .Values.optionalPodAffinity }} +{{ toYaml .Values.optionalPodAffinity | indent 6 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-prep.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: ["{{ .Values.command }}"] + {{- end }} + env: + - name: PORT + value: "{{ .Values.service.port }}" + - name: BINDADDRESS + value: "{{ .Values.service.bindaddress }}" + - name: ENGINEPORT + value: "{{ .Values.service.engineport }}" + - name: TTL + value: "{{ .Values.service.ttl }}" + - name: KEEPALIVE + value: "{{ .Values.service.keepalive }}" + - name: DATAFILESHOST + value: {{ default (printf "%s-datafiles" .Release.Name) .Values.service.datafileshost | quote }} + - name: DATAFILESPORT + value: "{{ .Values.service.datafilesport}}" + - name: QIXDATAFILESHOST + value: {{ default (printf "%s-qix-datafiles" .Release.Name) .Values.service.qixdatafileshost | quote }} + - name: QIXDATAFILESPORT + value: "{{ .Values.service.qixdatafilesport}}" + - name: PRECEDENTSHOST + value: {{ default (printf "%s-precedents" .Release.Name) .Values.service.precedentshost | quote }} + - name: PRECEDENTSPORT + value: "{{ .Values.service.precedentsport}}" + - name: LOGICALAPPSFOLDER + value: /qlik/apps + - name: LOGICALGEOFOLDER + value: /geo + - name: ENGINEADDRESS + value: {{ default (printf "%s-qix-sessions" .Release.Name) .Values.service.engineaddress | quote }} + - name: LOGPATH + value: /logs + - name: DATAPATH + value: / + - name: LOGLEVEL + value: verbose + - name: MODE + value: elastic + - name: AUTH_JWKS_URI + value: {{ default (printf "http://%s-keys:8080/v1/keys/qlik.api.internal" .Release.Name ) .Values.auth.jwksURI | quote }} + - name: FEATURE_FLAG_URL + value: {{ default (printf "http://%s-feature-flags.%s:8080/v1/features" .Release.Name .Release.Namespace ) .Values.featureFlags.url | quote }} + - name: DPS_REDIS_URL + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-redis-secret" .Release.Name) (tpl (default "" .Values.redis.existingSecret) .) }} + key: {{ default "redis-addr" .Values.redis.addressKey }} + - name: DPS_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ default (printf "%s-redis-secret" .Release.Name) (tpl (default "" .Values.redis.existingSecret) .) }} + key: redis-password + - name: SPACES_URI + value: {{ default (printf "http://%s-spaces:6080" .Release.Name) .Values.spaces.uri | quote }} + ports: + - containerPort: {{ .Values.service.port }} + volumeMounts: + - mountPath: /qlik/apps + name: apps-storage + - mountPath: /Apps + name: apps-storage + {{- if .Values.srcPath }} + - mountPath: /usr/src/app/src + name: src-dir + {{- end }} + livenessProbe: + httpGet: + path: /health + port: 9072 + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} + volumes: + - name: apps-storage + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ default (printf "%s-engine" .Release.Name) .Values.persistence.existingClaim | quote }} + {{- else }} + emptyDir: {} + {{- end -}} + {{- if .Values.srcPath }} + - name: src-dir + hostPath: + path: {{ .Values.srcPath }} + type: Directory + {{- end }} diff --git a/qliksense/charts/data-prep/templates/hpa.yaml b/qliksense/charts/data-prep/templates/hpa.yaml new file mode 100644 index 0000000..de998e1 --- /dev/null +++ b/qliksense/charts/data-prep/templates/hpa.yaml @@ -0,0 +1,22 @@ +{{- if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "data-prep.fullname" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "data-prep.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationCpu }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationMemory }} +{{- end }} diff --git a/qliksense/charts/data-prep/templates/ingress.yaml b/qliksense/charts/data-prep/templates/ingress.yaml new file mode 100644 index 0000000..0de7f18 --- /dev/null +++ b/qliksense/charts/data-prep/templates/ingress.yaml @@ -0,0 +1,35 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "data-prep.fullname" . }} + labels: + app: {{ template "data-prep.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + kubernetes.io/ingress.class: {{ template "ingressClass" . }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + nginx.ingress.kubernetes.io/auth-url: {{ default (printf "http://%s-edge-auth.%s.svc.cluster.local:8080/v1/auth" .Release.Name .Release.Namespace ) .Values.ingress.authURL | quote }} + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite (?i)/api/dataprepservice/(.*) /$1 break; + rewrite (?i)/dataprepservice/(.*) /$1 break; +spec: + rules: + - http: + paths: + - path: /api/dataprepservice/v1/openapi + backend: + serviceName: {{ template "data-prep.fullname" . }} + servicePort: {{ .Values.service.port }} + {{- if .Values.ingress.host }} + host: {{ .Values.ingress.host }} + {{- end -}} + + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} diff --git a/qliksense/charts/data-prep/templates/pvc.yaml b/qliksense/charts/data-prep/templates/pvc.yaml new file mode 100644 index 0000000..8bd004b --- /dev/null +++ b/qliksense/charts/data-prep/templates/pvc.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "data-prep.fullname" . }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: {{ .Values.persistence.storageClass }} +{{- end }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/data-prep/templates/redis-secret.yaml b/qliksense/charts/data-prep/templates/redis-secret.yaml new file mode 100644 index 0000000..2b9bca8 --- /dev/null +++ b/qliksense/charts/data-prep/templates/redis-secret.yaml @@ -0,0 +1,20 @@ +{{- if or .Values.redis.uri .Values.redis.enabled }} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-redis-secret +type: Opaque +data: +{{- if .Values.redis.uri}} + redis-addr: {{ .Values.redis.uri | b64enc }} +{{- else if .Values.redis.enabled}} + redis-addr: {{ print .Release.Name "-redis-master:6379" | b64enc }} +{{- end}} +{{ if not .Values.redis.usePassword }} # usePassword=false + redis-password: {{ print "" | b64enc }} +{{- else if .Values.redis.password }} # usePassword=true AND password is set + redis-password: {{ print .Values.redis.password | b64enc }} +{{- end }} + +{{- end }} diff --git a/qliksense/charts/data-prep/templates/service.yaml b/qliksense/charts/data-prep/templates/service.yaml new file mode 100644 index 0000000..1ae2043 --- /dev/null +++ b/qliksense/charts/data-prep/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-prep.fullname" . }} + labels: + app: {{ template "data-prep.name" . }} + chart: {{ template "data-prep.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port}} + protocol: TCP + name: {{ template "data-prep.fullname" . }} + selector: + app: {{ template "data-prep.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/data-prep/values.yaml b/qliksense/charts/data-prep/values.yaml new file mode 100644 index 0000000..5767f19 --- /dev/null +++ b/qliksense/charts/data-prep/values.yaml @@ -0,0 +1,201 @@ +# Default values for data-prep. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Number of replicas +## +replicaCount: 1 + +## optional pod affinity +## +optionalPodAffinity: + +## DataPrepService image +## + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + ## data-prep image name + ## + repository: qlik-download/data-prep-service + tag: 2.194.0 + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: IfNotPresent + +service: + name: data-prep + type: ClusterIP + port: 9072 + engineport: 8080 + datafilesport: 8080 + qixdatafilesport: 8080 + precedentsport: 9054 + bindaddress: '::' + ## time to live and keep alive specified in ms + ttl: 10000 + keepalive: 30000 + +## DataPrepService resource limits +## +resources: {} + +## Horizontal pod autoscaler +## +hpa: + ## Toggle horizontal pod autoscaler + enabled: false + ## Minimum number of replicas + minReplicas: 3 + ## Maximum number of replicas + maxReplicas: 6 + ## See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details + targetAverageUtilizationCpu: 75 + targetAverageUtilizationMemory: 75 + +## Secrets for pulling images from a private Docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Annotations to be added to the ingress. + ## + + ## authURL override of default http://{.Release.Name}.{.Release.Namespace}.svc.cluster.local:8080/v1/auth + # authURL: + + ## class provides an kubernetes.io/ingress.class override of default nginx + class: "nginx" + + annotations: + nginx.ingress.kubernetes.io/auth-response-headers: Authorization + + + ## Default host. Ingress will not work unless this host is resolved. + # host: elastic.example + + ## TLS configuration. + ## + # tls: + # - secretName: elastic-infra-elastic-infra-tls-secret + # hosts: + # - elastic.example + +## Authorization configuration +auth: + jwksURI: + +## Url to retrieve feature flags +featureFlags: + url: + +## Spaces-Service uri +spaces: + uri: + +## Metrics configuration +metrics: + ## Prometheus configurations + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + +## Persistence configuration +## +persistence: + enabled: false + + ## engine Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Persistence access mode + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1 + ## defaults to ReadWriteOnce suitable for local development + accessMode: ReadWriteOnce + + ## Persistence volume default size + size: 5Gi + + ## To enable an externally defined persistent volume claim set the name of the claim. + ## If configured this chart will not create a persistent volume claim. + # existingClaim: + + internalStorageClass: + ## Normally the storage class should be created outside this helm chart + ## If we want to deploy a storage class as part of the helm chart + ## - Provide a storageClassName above. + ## - set enabled true + ## - provide a storage class definition. + + ## If enabled storage class will be configured as part of the chart. + ## If not enabled an external storageclass can be used by providing storageClassName above. + enabled: false + + ## Storageclass definition + definition: {} + ## Storage classes have a provisioner that determines what volume plugin is used for provisioning PVs. + ## This field must be specified. + ## See https://kubernetes.io/docs/concepts/storage/storage-classes/ + # provisioner: kubernetes.io/no-provisioner + + ## Reclaim policy should normally be set to Retain to avoid loosing data when deleting this helm chart. + # reclaimPolicy: Retain + + ## Persistent Volumes that are dynamically created by a storage class will have the mount options specified + ## in the mountOptions field of the class. + # mountOptions: {} + + ## Storage classes have parameters that describe volumes belonging to the storage class. + ## Different parameters may be accepted depending on the provisioner. + # parameters: {} + +## Liveness probe parameters +livenessProbe: + failureThreshold: + periodSeconds: + +## Redis configuration +## +redis: + ## Enables a Redis chart by default (for local development for example) + enabled: false + ## Image pull policy for Redis chart + image: + pullPolicy: IfNotPresent + ## Disable password authentication by default (for local development for example) + usePassword: false + ## Disable master-secondary topology by default (for local development for example) + cluster: + enabled: false + ## master node configurations + master: + securityContext: + enabled: false + statefulset: + ## Updating all Pods in a StatefulSet, in reverse ordinal order, while respecting the StatefulSet guarantees + updateStrategy: RollingUpdate + slave: + securityContext: + enabled: false + + ## metrics configurations + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" diff --git a/qliksense/charts/data-rest-source/.helmignore b/qliksense/charts/data-rest-source/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/data-rest-source/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/data-rest-source/Chart.yaml b/qliksense/charts/data-rest-source/Chart.yaml new file mode 100644 index 0000000..0f5dd25 --- /dev/null +++ b/qliksense/charts/data-rest-source/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +appVersion: 1.0.6 +description: Helm chart for data-rest-source, a service provides data for DCaas testings +home: https://www.qlik.com +name: data-rest-source +sources: +- https://github.com/qlik-trial/data-rest-source +version: 1.2.1 diff --git a/qliksense/charts/data-rest-source/README.md b/qliksense/charts/data-rest-source/README.md new file mode 100644 index 0000000..3163136 --- /dev/null +++ b/qliksense/charts/data-rest-source/README.md @@ -0,0 +1,61 @@ +# data-rest-source + +[data-rest-source](https://github.com/qlik-trial/data-rest-source) is a tool used to serve testing data for DCaas tests. + +## Introduction + +This chart bootstraps a data-rest-source deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/data-rest-source +``` + +The command deploys data-rest-source on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the data-rest-source chart and their default values. + +| Parameter | Description | Default | +|---------------------------------|----------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------| +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil` | +| `image.registry` | The default registry where the repositories are pulled from | `qliktech-docker.jfrog.io` | +| `image.repository` | image name with no registry | `data-rest-source` | +| `image.tag` | image version | `1.0.4` | +| `image.pullPolicy` | image pull policy | `Always` if `image.tag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | a list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `service.type` | service type | `ClusterIP` | +| `service.port` | server listen port | `8080` | +| `ingress.class` | the `kubernetes.io/ingress.class` to use | `nginx` | +| `ingress.authURL` | The URL to use for nginx's `auth-url` configuration to authenticate `/api` requests | `http://{.Release.Name}-edge-auth.{.Release.Namespace}.svc.cluster.local:8080/v1/auth` | +| `ingress.annotations` | Ingress additional annotations | `[]` | +| `metrics.prometheus.enabled` | whether prometheus metrics are enabled | `true` | +| `replicaCount` | number of data-rest-source replicas | `1` | +| `deployment.annotations` | deployment annotations | `{}` | +| `prometheus.port` | port used for /metrics endpoint | `8080` | +| `prestop.enabled` | toggle for prestop hook to prevent pod from exiting while there's an active connection | `false` | +| `terminationGracePeriodSeconds` | maximum time k8s will wait after sending SIGTERM before sending SIGKILL | `30` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install --name my-release -f values.yaml qlik/data-rest-source +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/data-rest-source/templates/_helpers.tpl b/qliksense/charts/data-rest-source/templates/_helpers.tpl new file mode 100644 index 0000000..a6a7df6 --- /dev/null +++ b/qliksense/charts/data-rest-source/templates/_helpers.tpl @@ -0,0 +1,50 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "data-rest-source.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "data-rest-source.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "data-rest-source.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* Return data-rest-source image name */}} +{{- define "data-rest-source.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} diff --git a/qliksense/charts/data-rest-source/templates/deployment.yaml b/qliksense/charts/data-rest-source/templates/deployment.yaml new file mode 100644 index 0000000..179b2fb --- /dev/null +++ b/qliksense/charts/data-rest-source/templates/deployment.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: {{ template "data-rest-source.fullname" . }} + labels: + app: {{ template "data-rest-source.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.deployment.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "data-rest-source.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "data-rest-source.name" . }} + release: {{ .Release.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "data-rest-source.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: HOST_PORT + value: {{ .Values.service.port | quote }} + - name: PROMETHEUS_PORT + value: {{ .Values.metrics.prometheus.port | quote }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} +{{- if .Values.prestop.enabled }} + lifecycle: + preStop: + exec: + command: ["sh", "-c", "/data-rest-source-prestop-hook/data-rest-source-prestop-hook.sh"] +{{- end }} + volumeMounts: + {{- if .Values.prestop.enabled }} + - name: data-rest-source-prestop-hook + mountPath: /data-rest-source-prestop-hook + {{- end }} + volumes: + - name: data-rest-source-prestop-hook + configMap: + name: {{ template "data-rest-source.fullname" .}}-prestop-hook + defaultMode: 0755 + optional: true + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} diff --git a/qliksense/charts/data-rest-source/templates/hpa.yaml b/qliksense/charts/data-rest-source/templates/hpa.yaml new file mode 100644 index 0000000..3a6763a --- /dev/null +++ b/qliksense/charts/data-rest-source/templates/hpa.yaml @@ -0,0 +1,18 @@ +{{- if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "data-rest-source.fullname" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "data-rest-source.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationCpu }} +{{- end }} diff --git a/qliksense/charts/data-rest-source/templates/pre-stop-hook.yaml b/qliksense/charts/data-rest-source/templates/pre-stop-hook.yaml new file mode 100644 index 0000000..659112b --- /dev/null +++ b/qliksense/charts/data-rest-source/templates/pre-stop-hook.yaml @@ -0,0 +1,18 @@ +{{- if .Values.prestop.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "data-rest-source.fullname" . }}-prestop-hook +data: + data-rest-source-prestop-hook.sh: |- + env + while true + do + sessionCount=$(wget -qO- localhost:$PROMETHEUS_PORT/metrics | grep -oE '(data_rest_source_active_connections_total\ [0-9]+)' | grep -oE '([0-9]+)') + if [ "$sessionCount" -eq "0" ] + then + exit 0 + fi + sleep 1 + done +{{- end -}} diff --git a/qliksense/charts/data-rest-source/templates/service.yaml b/qliksense/charts/data-rest-source/templates/service.yaml new file mode 100644 index 0000000..c5c408e --- /dev/null +++ b/qliksense/charts/data-rest-source/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "data-rest-source.fullname" . }} + labels: + app: {{ template "data-rest-source.name" . }} + chart: {{ template "data-rest-source.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port}} + protocol: TCP + name: {{ template "data-rest-source.name" . }} + selector: + app: {{ template "data-rest-source.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/data-rest-source/values.yaml b/qliksense/charts/data-rest-source/values.yaml new file mode 100644 index 0000000..3d9000e --- /dev/null +++ b/qliksense/charts/data-rest-source/values.yaml @@ -0,0 +1,75 @@ +# Default values for data-rest-source. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + + ## data-rest-source image + ## + repository: qlik-download/data-rest-source + + ## data-rest-source image version. + ## ref: https://hub.docker.com/r/qlik/data-rest-source/tags/ + ## + tag: 1.0.6 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: IfNotPresent + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Number of replicas. +## +replicaCount: 1 + +## Deployment configuration. +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/ +deployment: {} + ## Annotations to be added to the deployment. + ## + # annotations: + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 8080 + +## deployment resources +resources: {} + +## Metrics configuration +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + port: 8080 + +## Horizontal pod autoscaler +hpa: + ## Toggle horizontal pod autoscaler + enabled: false + ## Minimum number of replicas + minReplicas: 1 + ## Maximum number of replicas + maxReplicas: 15 + ## See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details + targetAverageUtilizationCpu: 100 + +## Prestop hook enabled/disabled +## +prestop: + enabled: false + +terminationGracePeriodSeconds: 30 diff --git a/qliksense/charts/dcaas-web/.helmignore b/qliksense/charts/dcaas-web/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/dcaas-web/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/dcaas-web/Chart.yaml b/qliksense/charts/dcaas-web/Chart.yaml new file mode 100644 index 0000000..6b5d0cf --- /dev/null +++ b/qliksense/charts/dcaas-web/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: Helm chart for the DCaaS Web - web resources for custom connectors +home: https://www.qlik.com +name: dcaas-web +sources: +- https://github.com/qlik-trial/dcaas-web +version: 1.1.92 diff --git a/qliksense/charts/dcaas-web/README.md b/qliksense/charts/dcaas-web/README.md new file mode 100644 index 0000000..d96dfa3 --- /dev/null +++ b/qliksense/charts/dcaas-web/README.md @@ -0,0 +1,51 @@ +# dcaas-web + +[dcaas-web](https://github.com/qlik-trial/dcaas-web) is the service that provides static web resources for custom connectors. + +## Introduction + +This chart bootstraps a dcaas-web service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release qlik/dcaas-web +``` + +The command deploys dcaas-web on the Kubernetes cluster in the default configuration. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | ---------------------------------------------------------- | +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil` | +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io` | +| `image.repository` | image name | `dcaas-web`| +| `image.tag` | image version | `1.1.88` | +| `image.pullPolicy` | image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | DCaaS Web external port | `6384` | +| `ingress.dcaasweb` | Direct route for new iframe connectors | `/dcaas` | +| `metrics.prometheus.enabled` | whether prometheus metrics are enabled | true | + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml dcaas-web +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/dcaas-web/templates/_helpers.tpl b/qliksense/charts/dcaas-web/templates/_helpers.tpl new file mode 100644 index 0000000..68d30d3 --- /dev/null +++ b/qliksense/charts/dcaas-web/templates/_helpers.tpl @@ -0,0 +1,60 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "dcaas-web.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "dcaas-web.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "dcaas-web.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* Return dcaas-web image name */}} +{{- define "dcaas-web.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} + +{{- define "ingressClass" -}} + {{- $ingressClass := .Values.ingress.class -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} diff --git a/qliksense/charts/dcaas-web/templates/deployment.yaml b/qliksense/charts/dcaas-web/templates/deployment.yaml new file mode 100644 index 0000000..315111d --- /dev/null +++ b/qliksense/charts/dcaas-web/templates/deployment.yaml @@ -0,0 +1,41 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "dcaas-web.fullname" . }} + labels: + app: {{ template "dcaas-web.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "dcaas-web.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "dcaas-web.name" . }} + release: {{ .Release.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ template "dcaas-web.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} diff --git a/qliksense/charts/dcaas-web/templates/ingress.yaml b/qliksense/charts/dcaas-web/templates/ingress.yaml new file mode 100644 index 0000000..7d68a03 --- /dev/null +++ b/qliksense/charts/dcaas-web/templates/ingress.yaml @@ -0,0 +1,34 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "dcaas-web.fullname" . }} + labels: + app: {{ template "dcaas-web.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + kubernetes.io/ingress.class: {{ template "ingressClass" . }} + nginx.ingress.kubernetes.io/auth-url: {{ default (printf "http://%s-edge-auth.%s.svc.cluster.local:8080/v1/auth" .Release.Name .Release.Namespace ) .Values.ingress.authURL | quote }} + nginx.ingress.kubernetes.io/auth-signin: https://$host/login?returnto=$escaped_request_uri + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite (?i)/customdata/64/?(.*) /$1 break; + nginx.ingress.kubernetes.io/rewrite-target: "/" + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - http: + paths: + - path: /customdata/64 + backend: + serviceName: {{ template "dcaas-web.fullname" . }} + servicePort: {{ .Values.service.port }} + {{- if .Values.ingress.host }} + host: {{ .Values.ingress.host }} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} diff --git a/qliksense/charts/dcaas-web/templates/service.yaml b/qliksense/charts/dcaas-web/templates/service.yaml new file mode 100644 index 0000000..c83b2d7 --- /dev/null +++ b/qliksense/charts/dcaas-web/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "dcaas-web.fullname" . }} + labels: + app: {{ template "dcaas-web.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.metrics_port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: {{ template "dcaas-web.name" . }} + selector: + app: {{ template "dcaas-web.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/dcaas-web/values.yaml b/qliksense/charts/dcaas-web/values.yaml new file mode 100644 index 0000000..e6c3544 --- /dev/null +++ b/qliksense/charts/dcaas-web/values.yaml @@ -0,0 +1,79 @@ +# Default values for DCaaS Web Helm Chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Override the name of the Chart. +## +# nameOverride: + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + + ## DCaaS Web image. + ## + repository: qlik-download/dcaas-web + + ## DCaaS Web image version. + ## ref: https://hub.docker.com/r/qlik/dcaas-web/tags/ + ## + tag: 1.1.88 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Number of replicas. +## +replicaCount: 1 + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 6384 + metrics_port: 9180 + +## Metrics configuration +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + + ## class provides an kubernetes.io/ingress.class override of default nginx + class: nginx + + ## authURL override of default http://{.Release.Name}.{.Release.Namespace}.svc.cluster.local:8080/v1/auth + # authURL: + + ## Annotations to be added to the ingress. + ## + annotations: [] + + ## Default host. Ingress will not work unless this host is resolved. + # host: elastic.example + + ## TLS configuration. + ## + # tls: + # - secretName: elastic-infra-elastic-infra-tls-secret + # hosts: + # - elastic.example + +## deployment resources +resources: {} diff --git a/qliksense/charts/dcaas/.helmignore b/qliksense/charts/dcaas/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/dcaas/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/dcaas/Chart.yaml b/qliksense/charts/dcaas/Chart.yaml new file mode 100644 index 0000000..d658255 --- /dev/null +++ b/qliksense/charts/dcaas/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: Connector registry to access custom gRPC connectors +home: https://www.qlik.com +name: dcaas +sources: +- https://github.com/qlik-trial/dcaas +version: 1.7.4 diff --git a/qliksense/charts/dcaas/README.md b/qliksense/charts/dcaas/README.md new file mode 100644 index 0000000..91d5e9e --- /dev/null +++ b/qliksense/charts/dcaas/README.md @@ -0,0 +1,66 @@ +# dcaas + +[dcaas](https://github.com/qlik-trial/dcaas) is a connector registry service for gRPC custom connectors. + +## Introduction + +This chart bootstraps a dcaas service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release qlik/dcaas +``` + +The command deploys dcaas on the Kubernetes cluster in the default configuration. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +| ---------------------------------- | ---------------------------------------------------------------------------------- | --------------------------------------------------------------------- | +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil` | +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io` | +| `image.repository` | image name with no registry | `dcaas` | +| `image.tag` | image version | `1.8.3` | +| `image.pullPolicy` | image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `jwks.uri` | URI where the JWKS to validate JWTs is located | `http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal` | +| `service.type` | Service type | `ClusterIP` | +| `env.node_env` | Environment the service operates under (use enterprise for elastic for enterprise) | `enterprise` | +| `env.loglevel` | log level e.g. error, info, debug (from watson logger) | `info` | +| `env.connector_service` | Static list (space separated) of gRPC connector services to be brokered by dcaas | `data-connector-rest:50060` | +| `env.connection_service` | The url of the qix-data-connection service | `http://{.Release.Name}-qix-data-connection:9011` | +| `env.new_data_connection_service` | The url of the new data-connections service | `http://{.Release.Name}-data-connections:9011` | +| `env.space_service` | The url of the space service | `http://{.Release.Name}-spaces:6080` | +| `env.emulateStorageProvider` | Treat file streaming data sources as coming from a separate connector | `true` | +| `env.enableJwt` | Enable JWT authorization | `true` | +| `env.enableFeatureflagFiltering` | Enable datasource filtering using feature-flags | `true` | +| `env.featureflagUrl` | Feature flags service URL | `http://{{.Release.Name}}-feature-flags:8080` | +| `metrics.prometheus.enabled` | whether prometheus metrics are enabled | `true` | +| `dcaas-redis.enabled` | whether enable redis as a dependency | `true` | +| `dcaas-redis.usePassword` | whether use password to authenticate clients | `false` | +| `dcaas-redis.password` | password for redis authentication | | +| `dcaas-redis.cluster.enabled` | whether to use master-secondary topology | `false` | +| `dcaas-redis.customAddr` | A host:port address to use a custom Redis | | +| `dcaas-redis.customPassword` | Password for custom redis | | + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml dcaas +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/dcaas/charts/redis/.helmignore b/qliksense/charts/dcaas/charts/redis/.helmignore new file mode 100644 index 0000000..b2767ae --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/.helmignore @@ -0,0 +1,3 @@ +.git +# OWNERS file for Kubernetes +OWNERS diff --git a/qliksense/charts/dcaas/charts/redis/Chart.yaml b/qliksense/charts/dcaas/charts/redis/Chart.yaml new file mode 100644 index 0000000..0b1ce8a --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 5.0.7 +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: http://redis.io/ +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +version: 10.5.6 diff --git a/qliksense/charts/dcaas/charts/redis/README.md b/qliksense/charts/dcaas/charts/redis/README.md new file mode 100644 index 0000000..72eb836 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/README.md @@ -0,0 +1,497 @@ + +# Redis + +[Redis](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +## TL;DR; + +```bash +# Testing configuration +$ helm install my-release stable/redis +``` + +```bash +# Production configuration +$ helm install my-release stable/redis --values values-production.yaml +``` + +## Introduction + +This chart bootstraps a [Redis](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release stable/redis +``` + +The command deploys Redis on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the Redis chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | Redis password (overrides `password`) | `nil` | +| `image.registry` | Redis Image registry | `docker.io` | +| `image.repository` | Redis Image name | `bitnami/redis` | +| `image.tag` | Redis Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `1` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | Redis password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common Redis node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.runAsUser` | User ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.sysctls` | Set namespaced sysctls for the container (both redis master and slave pods) | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Redis exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | {} | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.podLabels` | Additional labels for Redis master pod | {} | +| `master.podAnnotations` | Additional annotations for Redis master pod | {} | +| `redisPort` | Redis port (in both master and slaves) | `6379` | +| `master.command` | Redis master entrypoint string. The command `redis-server` is executed if this is not provided. | `/run.sh` | +| `master.configmap` | Additional Redis configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of Redis commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | Redis master additional command line flags | [] | +| `master.nodeSelector` | Redis master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for Redis master pod assignment | [] | +| `master.affinity` | Affinity settings for Redis master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.priorityClassName` | Redis Master pod priorityClassName | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.command` | Redis slave entrypoint array. The docker image's ENTRYPOINT is used if this is not provided. | `/run.sh` | +| `slave.configmap` | Additional Redis configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of Redis commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | Redis slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `30` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `10` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.priorityClassName` | Redis Slave pod priorityClassName | {} | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | Redis Sentinel port | `26379` | +| `sentinel.configmap` | Additional Redis configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for Redis read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | Redis sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | Redis Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | Redis Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + stable/redis +``` + +The above command sets the Redis server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml stable/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis pod as it attempts to write to the `/bitnami` directory. Consider installing Redis with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of slaves: +```diff +- cluster.slaveCount: 2 ++ cluster.slaveCount: 3 +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a Redis master StatefulSet (only one master node allowed) and a Redis slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - Redis Master service: Points to the master, where read-write operations can be performed + - Redis Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis master StatefulSet (only one master allowed) and a Redis slave StatefulSet. In this case, the pods will contain en extra container with Redis Sentinel. This container will form a cluster of Redis Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - Redis service: Exposes port 6379 for Redis read-only operations and port 26379 for accesing Redis Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for Redis you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Host Kernel Settings +Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME stable/redis +``` + +## NetworkPolicy + +To enable network policy for Redis, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to Redis. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: +* Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +* Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + + - Recommended: Create a clone of the Redis Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release stable/redis --set persistence.existingClaim= + ``` + + - Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install stable/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + + - `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) + - `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis StatefulSet before upgrading: +```bash +$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` +And edit the Redis slave (and metrics if enabled) deployment: +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Notable changes + +### 9.0.0 +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### 7.0.0 +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis Sentinel containers inside of the Redis Pods (feature disabled by default). In case the master crashes, a new Redis node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/qliksense/charts/dcaas/charts/redis/ci/default-values.yaml b/qliksense/charts/dcaas/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/qliksense/charts/dcaas/charts/redis/ci/dev-values.yaml b/qliksense/charts/dcaas/charts/redis/ci/dev-values.yaml new file mode 100644 index 0000000..be01913 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/ci/dev-values.yaml @@ -0,0 +1,9 @@ +master: + persistence: + enabled: false + +cluster: + enabled: true + slaveCount: 1 + +usePassword: false diff --git a/qliksense/charts/dcaas/charts/redis/ci/extra-flags-values.yaml b/qliksense/charts/dcaas/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/qliksense/charts/dcaas/charts/redis/ci/insecure-sentinel-values.yaml b/qliksense/charts/dcaas/charts/redis/ci/insecure-sentinel-values.yaml new file mode 100644 index 0000000..2e9174f --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/ci/insecure-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: false + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/dcaas/charts/redis/ci/production-sentinel-values.yaml b/qliksense/charts/dcaas/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..36a00e3 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/dcaas/charts/redis/ci/production-values.yaml b/qliksense/charts/dcaas/charts/redis/ci/production-values.yaml new file mode 100644 index 0000000..6fa9c88 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/ci/production-values.yaml @@ -0,0 +1,525 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/dcaas/charts/redis/ci/redis-lib-values.yaml b/qliksense/charts/dcaas/charts/redis/ci/redis-lib-values.yaml new file mode 100644 index 0000000..e03382b --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/ci/redis-lib-values.yaml @@ -0,0 +1,13 @@ +## Redis library image +## ref: https://hub.docker.com/r/library/redis/ +## +image: + registry: docker.io + repository: redis + tag: '5.0.5' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/dcaas/charts/redis/ci/redisgraph-module-values.yaml b/qliksense/charts/dcaas/charts/redis/ci/redisgraph-module-values.yaml new file mode 100644 index 0000000..8096020 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/ci/redisgraph-module-values.yaml @@ -0,0 +1,10 @@ +image: + registry: docker.io + repository: redislabs/redisgraph + tag: '1.0.0' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/dcaas/charts/redis/templates/NOTES.txt b/qliksense/charts/dcaas/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..5b1089e --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/NOTES.txt @@ -0,0 +1,104 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis server: + +1. Run a Redis pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash + +2. Connect using the Redis CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/_helpers.tpl b/qliksense/charts/dcaas/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..3397a7b --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/_helpers.tpl @@ -0,0 +1,355 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/dcaas/charts/redis/templates/configmap.yaml b/qliksense/charts/dcaas/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..d17ec26 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{ tpl .Values.configmap . | indent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{ tpl .Values.master.configmap . | indent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{ tpl .Values.slave.configmap . | indent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{ tpl .Values.sentinel.configmap . | indent 4 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/headless-svc.yaml b/qliksense/charts/dcaas/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..909cbce --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis +{{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel +{{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/health-configmap.yaml b/qliksense/charts/dcaas/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..35c61b5 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,134 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_SENTINEL_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/qliksense/charts/dcaas/charts/redis/templates/metrics-prometheus.yaml b/qliksense/charts/dcaas/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..3f33454 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,30 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/dcaas/charts/redis/templates/metrics-svc.yaml b/qliksense/charts/dcaas/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..74f6fa8 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,30 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.service.labels -}} + {{ toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" -}} {{ if .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/networkpolicy.yaml b/qliksense/charts/dcaas/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..da05552 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,73 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/prometheusrule.yaml b/qliksense/charts/dcaas/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..500c3b3 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/psp.yaml b/qliksense/charts/dcaas/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..28ae22a --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/psp.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/redis-master-statefulset.yaml b/qliksense/charts/dcaas/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..b61c539 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,419 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master +{{- if .Values.master.podLabels }} +{{ toYaml .Values.master.podLabels | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} +{{ toYaml .Values.master.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" + {{- end }} + {{- with .Values.master.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: "{{ .Values.master.schedulerName }}" + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: "{{ template "redis.image" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.command }} + {{ .Values.master.command }} ${ARGS[@]} + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.master.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + echo "Getting information about current running sentinels" + # Get information from existing sentinels + existing_sentinels=$(timeout -s 9 {{ .Values.sentinel.initialCheckTimeout }} redis-cli --raw -h {{ template "redis.fullname" . }} -a "$REDIS_PASSWORD" -p {{ .Values.sentinel.service.sentinelPort }} SENTINEL sentinels {{ .Values.sentinel.masterSet }}) + echo "$existing_sentinels" | awk -f /health/parse_sentinels.awk | tee -a /opt/bitnami/redis-sentinel/etc/sentinel.conf + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc/ + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel-tmp-conf + emptyDir: {} + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.master.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.master.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/redis-master-svc.yaml b/qliksense/charts/dcaas/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..3a98e66 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,39 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{ toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{ toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/redis-role.yaml b/qliksense/charts/dcaas/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..71f75ef --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/redis-role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{ toYaml .Values.rbac.role.rules | indent 2 }} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/dcaas/charts/redis/templates/redis-rolebinding.yaml b/qliksense/charts/dcaas/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..aceb258 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/qliksense/charts/dcaas/charts/redis/templates/redis-serviceaccount.yaml b/qliksense/charts/dcaas/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..f027176 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/qliksense/charts/dcaas/charts/redis/templates/redis-slave-statefulset.yaml b/qliksense/charts/dcaas/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..d5a8db5 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,437 @@ +{{- if .Values.cluster.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: +{{ toYaml .Values.slave.updateStrategy | indent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} +{{ toYaml .Values.slave.podLabels | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} +{{ toYaml .Values.slave.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: "{{ .Values.slave.schedulerName }}" + {{- end }} + {{- with .Values.slave.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.command }} + {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.slave.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.slave.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.slave.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/redis-slave-svc.yaml b/qliksense/charts/dcaas/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..052ecea --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{ toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: +{{ toYaml .Values.slave.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/redis-with-sentinel-svc.yaml b/qliksense/charts/dcaas/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..5017c22 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,40 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{ toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: +{{ toYaml .Values.sentinel.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/dcaas/charts/redis/templates/secret.yaml b/qliksense/charts/dcaas/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..ead9c61 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/templates/secret.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/qliksense/charts/dcaas/charts/redis/values-production.yaml b/qliksense/charts/dcaas/charts/redis/values-production.yaml new file mode 100644 index 0000000..cae2af1 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/values-production.yaml @@ -0,0 +1,630 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/dcaas/charts/redis/values.schema.json b/qliksense/charts/dcaas/charts/redis/values.schema.json new file mode 100644 index 0000000..2138e45 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "condition": false, + "value": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "master.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "slave.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/qliksense/charts/dcaas/charts/redis/values.yaml b/qliksense/charts/dcaas/charts/redis/values.yaml new file mode 100644 index 0000000..2649466 --- /dev/null +++ b/qliksense/charts/dcaas/charts/redis/values.yaml @@ -0,0 +1,631 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/dcaas/requirements.yaml b/qliksense/charts/dcaas/requirements.yaml new file mode 100644 index 0000000..99552bf --- /dev/null +++ b/qliksense/charts/dcaas/requirements.yaml @@ -0,0 +1,6 @@ +dependencies: + - name: redis + version: 10.5.6 + repository: "@stable" + alias: dcaas-redis + condition: dcaas-redis.enabled diff --git a/qliksense/charts/dcaas/templates/_helpers.tpl b/qliksense/charts/dcaas/templates/_helpers.tpl new file mode 100644 index 0000000..98fcc7b --- /dev/null +++ b/qliksense/charts/dcaas/templates/_helpers.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "dcaas.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "dcaas.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "dcaas.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* Return dcaas image name */}} +{{- define "dcaas.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} + +{{- define "ingressClass" -}} + {{- $ingressClass := .Values.ingress.class -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} + diff --git a/qliksense/charts/dcaas/templates/deployment.yaml b/qliksense/charts/dcaas/templates/deployment.yaml new file mode 100644 index 0000000..ad19ced --- /dev/null +++ b/qliksense/charts/dcaas/templates/deployment.yaml @@ -0,0 +1,70 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "dcaas.fullname" . }} + labels: + app: {{ template "dcaas.name" . }} + chart: {{ template "dcaas.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "dcaas.name" . }} + release: {{ .Release.Name }} + spec: +{{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ template "dcaas.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: NODE_ENV + value: {{ .Values.env.node_env }} + - name: LOG_LEVEL + value: {{ .Values.env.loglevel }} + - name: CONNECTOR_SERVICE + value: {{ tpl (.Values.env.connector_service) . | quote }} + - name: DATA_CONNECTION_SERVICE + value: {{ tpl (.Values.env.connection_service) . | quote }} + - name: NEW_DATA_CONNECTION_SERVICE + value: {{ tpl (.Values.env.new_data_connection_service) . | quote }} + - name: SPACE_SERVICE + value: {{ tpl (.Values.env.space_service) . | quote }} + - name: EMULATE_STORAGE_PROVIDER + value: {{ .Values.env.emulateStorageProvider | quote }} + - name: ENABLE_JWT_AUTH + value: {{ .Values.env.enableJwt | quote }} + - name: ENABLE_POD_LOADBALANCING + value: {{ .Values.env.enablePodLoadBalancing | quote }} + - name: JWKS_ENDPOINT + value: {{ tpl (.Values.jwks.uri) . | quote }} + - name: ENABLE_FEATUREFLAG_FILTERING + value: {{ .Values.env.enableFeatureflagFiltering | quote }} + - name: FEATUREFLAG_URL + value: {{ tpl (.Values.env.featureflagUrl) . | quote }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: {{ .Values.service.port }} +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + selector: + matchLabels: + app: {{ template "dcaas.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/dcaas/templates/ingress.yaml b/qliksense/charts/dcaas/templates/ingress.yaml new file mode 100644 index 0000000..0ee1602 --- /dev/null +++ b/qliksense/charts/dcaas/templates/ingress.yaml @@ -0,0 +1,38 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "dcaas.fullname" . }} + labels: + app: {{ template "dcaas.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + kubernetes.io/ingress.class: {{ template "ingressClass" . }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite (?i)/api/dcaas/(.*) /$1 break; + rewrite (?i)/dcaas/(.*) /$1 break; + nginx.ingress.kubernetes.io/auth-url: {{ default (printf "http://%s-edge-auth.%s.svc.cluster.local:8080/v1/auth" .Release.Name .Release.Namespace ) .Values.ingress.authURL | quote }} +spec: + rules: + - http: + paths: + - path: /dcaas + backend: + serviceName: {{ template "dcaas.fullname" . }} + servicePort: {{ .Values.service.port }} + - path: /api/dcaas + backend: + serviceName: {{ template "dcaas.fullname" . }} + servicePort: {{ .Values.service.port }} + {{- if .Values.ingress.host }} + host: {{ .Values.ingress.host }} + {{- end -}} + + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} diff --git a/qliksense/charts/dcaas/templates/redis-secret.yaml b/qliksense/charts/dcaas/templates/redis-secret.yaml new file mode 100644 index 0000000..0536e1b --- /dev/null +++ b/qliksense/charts/dcaas/templates/redis-secret.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ .Release.Name }}-dcaas-redis-secret +data: +{{ $val := index .Values "dcaas-redis" }} + {{ if $val.customAddr }} + redis-addr: {{ print $val.customAddr | b64enc }} + {{ if $val.customPassword }} + redis-password: {{ print $val.customPassword | b64enc }} + {{- else }} + redis-password: {{ print "" | b64enc }} + {{- end }} + + {{- else if $val.enabled }} + redis-addr: {{ print .Release.Name "-dcaas-redis-master:6379" | b64enc }} + {{ if not $val.usePassword }} # usePassword=false + redis-password: {{ print "" | b64enc }} + {{- else if $val.password }} # usePassword=true AND password is set + redis-password: {{ print $val.password | b64enc }} + {{- end }} + + {{- end }} diff --git a/qliksense/charts/dcaas/templates/service.yaml b/qliksense/charts/dcaas/templates/service.yaml new file mode 100644 index 0000000..7a40093 --- /dev/null +++ b/qliksense/charts/dcaas/templates/service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "dcaas.fullname" . }} + labels: + app: {{ template "dcaas.name" . }} + chart: {{ template "dcaas.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/scrape_high_cardinality: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + app: {{ template "dcaas.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/dcaas/values.yaml b/qliksense/charts/dcaas/values.yaml new file mode 100644 index 0000000..6abd556 --- /dev/null +++ b/qliksense/charts/dcaas/values.yaml @@ -0,0 +1,129 @@ +# Default values for DCaaS Helm Chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Override the name of the Chart. +## +# nameOverride: + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + + ## DCaaS image. + ## + repository: qlik-download/dcaas + + ## DCaaS image version. + ## ref: https://hub.docker.com/r/qlik/dcaas/tags/ + ## + tag: 1.8.3 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## See https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-config +dnsConfig: + options: + - name: timeout + value: '1' + +## Number of replicas. +## +replicaCount: 1 + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 9026 + +## Metrics configuration +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Annotations to be added to the ingress. + ## + + ## authURL override of default http://{.Release.Name}.{.Release.Namespace}.svc.cluster.local:8080/v1/auth + # authURL: + + ## class provides an kubernetes.io/ingress.class override of default nginx + class: "nginx" + + annotations: + nginx.ingress.kubernetes.io/auth-signin: https://$host/login?returnto=$escaped_request_uri + nginx.ingress.kubernetes.io/auth-response-headers: Authorization + nginx.ingress.kubernetes.io/proxy-read-timeout: 3600 + +## JWKS configuration +jwks: + ## URI where the JWKS to validate JWTs is located. + ## If left blank the service will return 401 on all authenticated endpoints + uri: "http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal" + +## Environment configuration +env: + node_env: enterprise + loglevel: info + connector_service: "{{ .Release.Name }}-data-connector-rest-rld:{{ .Release.Name }}-data-connector-rest-cmd:50060 {{ .Release.Name }}-data-connector-qwc-rld:{{ .Release.Name }}-data-connector-qwc-cmd:50060 {{ .Release.Name }}-data-connector-odbc-rld:{{ .Release.Name }}-data-connector-odbc-cmd:50060 {{ .Release.Name }}-data-connector-sap-sql-rld:{{ .Release.Name }}-data-connector-sap-sql-cmd:50060 {{ .Release.Name }}-data-connector-sap-bw-rld:{{ .Release.Name }}-data-connector-sap-bw-cmd:50060 {{ .Release.Name }}-qix-datafiles:50051" + connection_service: http://{{ .Release.Name }}-qix-data-connection:9011 + new_data_connection_service: http://{{ .Release.Name }}-data-connections:9011 + space_service: http://{{ .Release.Name }}-spaces:6080 + emulateStorageProvider: "true" + enableJwt: "true" + enablePodLoadBalancing: "false" + enableFeatureflagFiltering: "true" + featureflagUrl: http://{{ .Release.Name }}-feature-flags:8080 + +## Redis configuration +## +dcaas-redis: + ## Enables a Redis chart by default (for local development for example) + enabled: true + ## Image pull policy for Redis chart + image: + pullPolicy: IfNotPresent + ## Disable password authentication by default (for local development for example) + usePassword: false + ## Disable master-secondary topology by default (for local development for example) + cluster: + enabled: false + ## master node configurations + master: + securityContext: + enabled: false + statefulset: + ## Updating all Pods in a StatefulSet, in reverse ordinal order, while respecting the StatefulSet guarantees + updateStrategy: RollingUpdate + slave: + securityContext: + enabled: false + + ## metrics configurations + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + +## deployment resources +resources: {} diff --git a/qliksense/charts/edge-auth/.helmignore b/qliksense/charts/edge-auth/.helmignore new file mode 100644 index 0000000..4c7bf9b --- /dev/null +++ b/qliksense/charts/edge-auth/.helmignore @@ -0,0 +1,2 @@ +dependencies.yaml +dev-values.yaml diff --git a/qliksense/charts/edge-auth/Chart.yaml b/qliksense/charts/edge-auth/Chart.yaml new file mode 100644 index 0000000..594dca7 --- /dev/null +++ b/qliksense/charts/edge-auth/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: Helm chart for edge-auth +home: https://www.qlik.com +name: edge-auth +sources: +- https://github.com/qlik-trial/edge-auth +version: 6.2.3 diff --git a/qliksense/charts/edge-auth/README.md b/qliksense/charts/edge-auth/README.md new file mode 100644 index 0000000..0e1fac0 --- /dev/null +++ b/qliksense/charts/edge-auth/README.md @@ -0,0 +1,175 @@ +# edge-auth + +[edge-auth](https://github.com/qlik-trial/edge-auth) is used to do authentication and exchange external authentication for an internal jwt. + +## Introduction + +This chart bootstraps an edge-auth deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `edge-auth`: + +```console +helm install --name edge-auth qlik/edge-auth +``` + +The command deploys `edge-auth` on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `edge-auth` deployment: + +```console +helm delete --purge edge-auth +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables list the configurable parameters of the `edge-auth` chart and their default values. + +### Chart configuration + +| Parameter | Description | Default | +| ---------------------------- | -------------------------------------------------------------------------------------- | ----------------------------------------------------------| +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil` | +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io` | +| `image.repository` | image name with no registry | `edge-auth` | +| `image.tag` | image version | `2.67.21` | +| `image.pullPolicy` | image pull policy | `Always` if `image.tag` is `latest`, else `IfNotPresent` | +| `image.pullSecrets` | a list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `deployment.replicas` | number of edge-auth replicas | `1` | +| `service.type` | service type. It is recommended to use `ClusterIP` in production systems | `NodePort` | +| `service.port` | edge-auth listen port | `8080` | +| `service.podAlias` | hostname alias for the internal pod ip used to mask hostname to `simple-oidc-provider` | `elastic.example` | +| `ingress.annotations` | ingress annotations for `/login` and `/logout` | `{}` | +| `ingress.class` | the `kubernetes.io/ingress.class` to use | `nginx` | +| `ingress.host` | host for ingress | `nil` | +| `ingress.tls` | edge-auth ingress TLS configuration | `nil` | +| `apiIngress.annotations` | Additional apiingress annotations | `[]` | +| `apiIngress.class` | the `kubernetes.io/ingress.class` to use | `nginx` | +| `nginx-ingress.enabled` | enable nginx-ingress as a chart dependency | `false` | +| `mongodb.enabled` | Enable MongoDB as a chart dependency. | `true` | +| `hpa.minReplicas` | minimum no. of replicas ensured by the horizontal pod autoscaler | `1` | +| `hpa.maxReplicas` | maximum no.of replicas allowed by the horizontal pod autoscaler | `1` | + +### Oidc configuration +| Parameter | Description | Default | +| ---------------------------- | -------------------------------------------------------------------------------------- | ----------------------------------------------------------| +| `deployment.oidc.enabled` | creates a simple oidc provider _See [OIDC](#OIDC)_ | `false` | +| `deployment.oidc.registry` | The default registry where the oidc image is pulled from | `qlik` | +| `deployment.oidc.repository` | oidc image name with no registry | `simple-oidc-provider` | +| `deployment.oidc.tag` | oidc image version | `0.2.1` | +| `deployment.oidc.configs.port` | oidc service port | `32123` | +| `deployment.oidc.configs.data.redirectUri` | whitelist of where the oidc will all redirect to | `https://elastic.example/login/callback` | + +### Redis configuration + +| Parameter | Description | Default | +| ---------------------------- | -------------------------------------------------------------------------------------- | ---------------------------------------------------------| +| `redis.enabled` | Enable Redis as chart's dependency | `false` | +| `redis.encryption` | Whether to use TLS while connecting to Redis | `false` | +| `redis.usePassword` | Disable password authentication | `false` | +| `redis.password` | Custom password for authentication (needs `usePassword` set to `true`) | `""` | +| `redis.cluster.enabled` | Use master-secondary topology | `false` | +| `redis.master.statefulset.updateStrategy` | Update strategy for Redis StatefulSet | `RollingUpdate` | +| `redis.master.resources.requests.cpu` | Redis master CPU reservation | `100m` | +| `redis.master.resources.requests.memory` | Redis master memory reservation | `256Mi` | +| `redis.master.limits.requests.cpu` | Redis master CPU reservation | `400m` | +| `redis.master.limits.requests.memory` | Redis master memory reservation | `5Gi` | +| `redis.slave.resources.requests.cpu` | Redis master CPU reservation | `100m` | +| `redis.slave.resources.requests.memory` | Redis master memory reservation | `256Mi` | +| `redis.slave.limits.requests.cpu` | Redis master CPU reservation | `400m` | +| `redis.slave.limits.requests.memory` | Redis master memory reservation | `5Gi` | + + +### Edge-auth configuration + +| Parameter | Description | Default | +| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ | +| `configs.mongoSsl` | Enables/disables ssl for the MongoDB connection. Can be overridden using ssl query parameter in the URI (?ssl=true or ?ssl=false). | `false` | +| `configs.mongoSslValidate` | Validate mongo server certificate against CA. Untrusted certificates will be rejected. | `false` | +| `configs.mongoCheckServerIdentity` | Enforces that mongo server certificate CN matches mongo URI hostname/IP address. | `false` | +| `configs.natsEnabled` | Enable NATS? | `true` | +| `configs.environment` | The environment name | `example` | +| `configs.region` | Deployed region | `example` | +| `configs.enforceTLS` | When enabled edge-auth will reject non-TLS requests on it's external endpoints | `true` | +| `configs.loginStateLifetime` | The length of time between initiating and completing login is allowed to take | `5m` | +| `configs.secureCookies` | Restrict cookies to only be sent over SSL | `true` | +| `configs.sessionTTLSeconds` | The length of time in seconds that a session will live passed the last interaction | `1800` | +| `configs.sessionMaxLifetimeSeconds` | The maximum length of time in seconds that a session can exist | `86400` (one day) | +| `configs.cacheRedisEnabled` | Toggle to enable Redis caching | `false` | +| `configs.cacheMaxAgeMilliseconds` | The maximum age of items kept in the in-memory cache, in milliseconds. Default is 600000 (10 mins) | `600000` | +| `configs.cacheMaxEntries` | Global setting for the maximum number of entries each cache will hold. | `250` | +| `configs.rollbarEnabled` | Enable rollbar to track server errors | `nil` | +| `configs.rollbarToken` | The rollbar token | | +| `configs.data.featureFlagsUri` | uri for features service | `http://{{ .Release.Name }}-feature-flags.{{ .Release.Namespace }}.svc.cluster.local:8080` | +| `configs.data.keysUri` | uri for keys service | `http://{{ .Release.Name }}-keys.{{ .Release.Namespace }}.svc.cluster.local:8080` | +| `configs.data.usersUri` | uri for users service | `http://{{ .Release.Name }}-users.{{ .Release.Namespace }}.svc.cluster.local:8080` | +| `configs.data.tenantsUri` | uri for tenants service | `http://{{ .Release.Name }}-tenants.{{ .Release.Namespace }}.svc.cluster.local:8080` | +| `configs.data.groupsUri` | uri for groups service | `http://{{ .Release.Name }}-groups.{{ .Release.Namespace }}.svc.cluster.local:8080` | +| `configs.data.apiKeysUri` | uri for api-keys service | `http://{{ .Release.Name }}-api-keys.{{ .Release.Namespace }}.svc.cluster.local:8080` | +| `configs.data.identityProvidersUri` | uri for identity-providers service | `ttp://{{ .Release.Name }}-identity-providers.{{ .Release.Namespace }}.svc.cluster.local:8080` | +| `configs.data.identityProvidersExtUri`| uri for Identity provider Ext service service service | `http://{{ .Release.Name }}-identity-providers-ext.{{ .Release.Namespace }}.svc.cluster.local:8080` | +| `configs.data.natsUri` | NATS URL | `nats://{{ .Release.Name }}-nats-client:4222` | +| `configs.data.natsStreamingClusterId` | NATS Streaming cluster ID | `{{{ .Release.Name }}-nats-streaming-cluster` | +| `configs.data.nodeExtraCaCerts` | Path to file to add additional certificates to the trust chain | `nil` | +| `configs.data.logLevel` | Log level values (silly, debug, verbose, info, warn, error) | `verbose` | +| `configs.data.redisUri` | Full Redis URI (port included) | `{{.Release.Name}}-redis-master:6379` | +| `secrets.cookieKeys` | Array of strings used for signing cookies | `["A secret key"]` | +| `secrets.stringData.cookiesKeys` | JSON Value for cookieKeys - Don't alter this value but use `secrets.cookieKeys` to configure the cookie keys value - | `{{ toJson .Values.secrets.cookieKeys }}` | +| `secrets.stringData.tokenAuthPrivateKey` | RSA or EC Private signing key for internal JWTs | Generate EC 384 private key `ssh-keygen -t ecdsa -b 384 -f jwtPrivateKey -N ''` | +| `secrets.stringData.loginStateKey` | The key with which to sign the state parameter (encoded in base64), must be larger than 256 bits | To generate use `openssl rand -base64 32` | +| `secrets.stringData.mongodbUri` | Full Mongo DB URI (port included) | `mongodb://{{ .Release.Name }}-mongodb:27017/{{ .Release.Name }}?ssl=false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install --name edge-auth -f dev-values.yaml qlik/edge-auth +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## OIDC + +This chart is configured by default to use a [simple-oidc-provider](https://github.com/qlik-trial/simple-oidc-provider) for development and testing purposes. It is strongly recommended to set `oidc.enabled` to `false` for any production environments and instead configure your own OIDC provider. The default [values.yaml](values.yaml) for `secrets.idp*` secrets are configured to work with the `simple-oidc-provider` and will need to be updated for your OIDC provider. + +## Secrets + +Secrets can either be created from within the Helm chart or outside. To define your own secrets within the Helm chart, fill in secret values in the [values.yaml](values.yaml). Otherwise set `secrets.create` to `false` to manage these secrets outside the Helm chart. To do this, create files for each secret value, and turn it all into a k8s secret. Be careful with introducing trailing newline characters; following the steps below ensures none will end up in your secrets. First, perform the following to create the manditory secret values: + +Create a directory and create files to store secrets + +```console +mkdir edge-auth-secrets +cd edge-auth-secrets +touch cookieKeys +``` + +### Cookie signing keys + +Store your cookie signing keys (any string) in the `cookieKeys` file that was created. See example: + +```json +["signingKey1", "signingKey2"] +``` + +### Keys + +Create a JWT private key + +```console +ssh-keygen -t ecdsa -b 384 -f jwtPrivateKey -N '' +rm jwtPrivateKey.pub +``` + +Create key for signing state parameter + +```console +openssl rand -base64 32 > loginStateKey +``` diff --git a/qliksense/charts/edge-auth/charts/messaging/.helmignore b/qliksense/charts/edge-auth/charts/messaging/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/edge-auth/charts/messaging/Chart.yaml b/qliksense/charts/edge-auth/charts/messaging/Chart.yaml new file mode 100644 index 0000000..8f4c1e1 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +description: | + Messaging system services. NATS and NATS Streaming are supported. Other services can communicate with each other and orchestrate their works using the services provided by this chart. +home: https://www.qlik.com +keywords: +- messaging +- queue +- nats +- nats-streaming +name: messaging +sources: +- https://github.com/nats-io/gnatsd +- https://github.com/nats-io/nats-streaming-server +- https://github.com/helm/charts/tree/master/stable/nats +- https://github.com/nats-io/prometheus-nats-exporter +- https://github.com/qlik-trial/nats-prom-exporter +version: 2.0.29 diff --git a/qliksense/charts/edge-auth/charts/messaging/README.md b/qliksense/charts/edge-auth/charts/messaging/README.md new file mode 100644 index 0000000..9627063 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/README.md @@ -0,0 +1,288 @@ +# messaging + +This charts provides **messaging system** (a.k.a. message queue, message bus, etc.) capabilities for services. +Currently, [NATS](https://www.nats.io) and [NATS Streaming](https://nats.io/documentation/streaming/nats-streaming-intro/) +are included in this chart, but in the future, other message systems like RabbitMQ can also be added. + +## Installing the Chart + +To install the chart with the release name `messaging`: + +```console +helm install --name messaging qlik/messaging +``` + +## Uninstalling the Chart + +To uninstall/delete the `messaging` deployment: + +```console +helm delete messaging +``` + +## Configuration + +### NATS + +| Parameter | Description | Default | +| --------------------------------- | ------------------------------------------- | ------------------------------------- | +| `nats.enabled` | enable NATS messaging system | `true` | +| `nats.image.registry` | NATS image registry | `qliktech-docker.jfrog.io` | +| `nats.image.repository` | NATS Image name | `qnatsd` | +| `nats.image.tag` | NATS Image tag | `0.3.1` | +| `nats.image.pullPolicy` | pull policy for nats docker image | `IfNotPresent` | +| `nats.image.pullSecrets` | specify image pull secrets | `artifactory-docker-secret` | +| `nats.replicaCount` | number of nats replicas | `1` | +| `nats.antiAffinity` | anti-affinity for nats pod assignment | `soft` | +| `nats.auth.enabled` | enable authentication for nats clients | `true` | +| `nats.auth.user` | username for nats client authentication | `nats_client` | +| `nats.auth.password` | password for nats client authentication | `T0pS3cr3t` | +| `auth.users` | Client authentication users | `[]` See [Rotation](#how-to-rotate) | +| `auth.users[].user` | Client authentication user | `nil` | +| `auth.users[].password` | Client authentication password | `nil` | +| `nats.auth.jwtUsers` | array of jwt authenticated users | See [Authentication](#authentication) | +| `nats.clusterAuth.enabled` | enable authentication for nats clustering | `false` | +| `nats.clusterAuth.user` | username for nats clustering authentication | `nats_cluster` | +| `nats.clusterAuth.password` | password for nats clustering authentication | random string | +| `nats.statefulset.updateStrategy` | update strategy for nats statefulsets | `onDelete` | +| `nats.client.service.type` | nats-client service type | `ClusterIP` | +| `nats.client.service.port` | nats-client service port | `4222` | +| `nats.cluster.service.type` | nats-cluster service type | `ClusterIP` | +| `nats.cluster.service.port` | nats-cluster service port | `6222` | +| `nats.monitoring.service.type` | nats-monitoring service type | `ClusterIP` | +| `nats.monitoring.service.port` | nats-monitoring service port | `8222` | +| `nats.livenessProbe.enabled` | enable liveness probe | `true` | +| `nats.readinessProbe.enabled` | enable readiness probe | `true` | +| `nats.resources` | CPU and memory requests and limits for nats | `{}` | +| `extraArgs` | Optional flags for NATS | See [values.yaml](./values.yaml) | + +### NATS Streaming + +| Parameter | Description | Default | +| ------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- | +| `nats-streaming.enabled` | enable NATS messaging system | `false` | +| `nats-streaming.image.registry` | NATS streaming image registry | `qliktech-docker.jfrog.io` | +| `nats-streaming.image.repository` | NATS streaming image name | `nats-streaming` | +| `nats-streaming.image.tag` | NATS Streaming image tag | `0.14.1` | +| `nats-streaming.image.pullPolicy` | pull policy for nats docker image | `IfNotPresent` | +| `nats-streaming.image.pullSecrets` | specify image pull secrets | `artifactory-registry-secret` | +| `nats-streaming.replicaCount` | number of nats replicas | `3` | +| `nats-streaming.antiAffinity` | anti-affinity for nats pod assignment | `soft` | +| `nats-streaming.auth.enabled` | enable authentication for nats clients | `true` | +| `nats-streaming.auth.user` | username for nats client authentication | `nats_client` | +| `nats-streaming.auth.password` | password for nats client authentication | `nil` (Uses Secret below for password) | +| `nats-streaming.auth.secretName` | secretName for nats client authentication | `{{ .Release.Name }}-nats-secret` | +| `nats-streaming.auth.secretKey` | secretKey for nats client authentication | `client-password` | +| `nats-streaming.statefulset.updateStrategy` | update strategy for nats statefulsets | `onDelete` | +| `nats-streaming.monitoring.service.type` | nats-streaming-monitoring service type | `ClusterIP` | +| `nats-streaming.monitoring.service.port` | nats-streaming-monitoring service port | `8222` | +| `nats-streaming.livenessProbe.enabled` | enable liveness probe | `true` | +| `nats-streaming.readinessProbe.enabled` | enable readiness probe | `true` | +| `nats-streaming.resources` | CPU and memory requests and limits for nats | `{}` | +| `nats-streaming.clusterID` | nats streaming cluster name id | `{{ .Release.Name }}-nats-streaming-cluster` | +| `nats-streaming.natsSvc` | external nats server url | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats-streaming.hbInterval` | Interval at which server sends heartbeat to a client | `10s` | +| `nats-streaming.hbTimeout` | How long server waits for a heartbeat response | `10s` | +| `nats-streaming.hbFailCount` | Number of failed heartbeats before server closes the client connection | `5` | +| `clustered` | Run NATS Streaming in clustered mode (incompatible with ftGroup value) | `false` | +| `cluster_raft_logging` | Used for raft related debugging | `false` | +| `ftGroup` | Enable Fault Tolerance mode with this group name (incompatible with clustered value) | `nil` | +| `store` | Storage options (Support values are `memory` and `file`) | `file` | +| `nats-streaming.persistence.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `nats-streaming.persistence.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `nats-streaming.persistence.size` | Persistence volume size | `nil` | +| `nats-streaming.persistence.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `nats-streaming.persistence.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +### Network Policy for NATS and NATS Streaming + +| Parameter | Description | Default | +| -------------------------------------- | ---------------------------------------------------------------- | --------------------- | +| `networkPolicy.nats.enabled` | enable custom network policy for NATS messaging system | `false` | +| `networkPolicy.nats-streaming.enabled` | enable custom network policy for NATS Streaming messaging system | `false` | +| `networkPolicy.keys.release` | keys service release name for egress rules | `{{ .Release.Name }}` | + +## Requirements + +### Network Plugin to enable Network Policies in Kubernetes cluster + +This chart include options to enable [Network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) for the created +`nats` and `nats-streaming` clusters. + +Network policies are implemented by the network plugin, so the Kubernetes cluster must be configured with a networking solution which supports NetworkPolicy - +simply creating the resource without a controller to implement it will have no effect. + +For local development, please refer to [Setting Up a Minikube Cluster - Configuring Network Plugin to support Network Policies](https://github.com/qlik-trial/elastic-charts/blob/master/docs/prerequisites/minikube-cluster.md#configuring-network-plugin-to-support-network-policies) +for detailed instructions. + +### Secrets + +For deploying this chart to **stage**/**prod**, you need the following secrets written to **vault**. + +*The passwords should not start with a number!* + +| Secret | Key | Purpose | +| -------------------------------------------------------------- | ------- | ----------------------------------- | +| `/secret/{environment}/messaging/{region}/natsClientPassword` | `value` | password for client authentication | +| `/secret/{environment}/messaging/{region}/natsClusterPassword` | `value` | password for cluster authentication | + +## Connecting to NATS / NATS Streaming + +### From the command line: +#### Port-forward NATS Client Service: +```sh + > kubectl port-forward messaging-nats-0 4222 +``` +#### Connect via `telnet`: +```sh + > telnet localhost 4222 +``` +#### Connect with no auth: +```sh + CONNECT {} +``` +#### Connect with auth: +```sh + CONNECT {"user":"my-user","pass":"T0pS3cr3t"} +``` +#### Subscribing to channel, publishing to a channel, and receiving the published message: +```sh + SUB foo 1 + +OK + PUB foo 11 + Hello World + +OK + MSG foo 1 11 + Hello World +``` + +### Using [go-nats](https://github.com/nats-io/go-nats/) and [go-nats-streaming](https://github.com/nats-io/go-nats-streaming) clients: +```golang +package main + +import ( + "log" + + "github.com/nats-io/go-nats" + "github.com/nats-io/go-nats-streaming" +) + +func main() { + nc, err := nats.Connect("nats://nats_client:asdf@localhost:4222") + if err != nil { + log.Fatal(err) + } + sc, err := stan.Connect("messaging-nats-streaming-cluster", "client-123", stan.NatsConn(nc)) + if err != nil { + log.Fatal(err) + } + sc.Publish("hello", []byte("msg1")) + + sc.Subscribe("hello", func(m *stan.Msg) { + log.Printf("[Received] %+v", m) + }, stan.StartWithLastReceived()) + + sc.Publish("hello", []byte("msg2")) + + select{} +} +``` + +### With Network Policies enabled + +To connect to `NATS` as a client with Network Policies enabled , the pod in which the service client is in must have the label +`{{ .Release.Name }}-nats-client=true`. + +Otherwise, if enabled, the `ingress` `Network Policy` for `NATS` will block incoming traffic from any pod without the appropriate label. + +`Network Policy` is enabled in `stage` and `production` environments. + +## Authentication + +It's important to know that when using NATS Streaming, a NATS connection is also required and that it is the NATS connection that handles authentication and authorization not the NATS Streaming connnection. + +### NATS to NATS-Streaming password rotation + +The nats chart supports an array of users (`nats.auth.users`) used for authenticating NATS-Streaming to NATS. NATS-Streaming will use the first entry in the array to authenticate to NATS. Any additional entries can still be used to authenticate against. + +#### How to rotate + +In this example we have a deployed cluster with a NATS-Streaming that is authenticated using user `user1` with password `password1` from the following config. We want to update this to use `password2`. +```yaml +auth: + users: + - user: user1 + password: password1 +``` + +1) Add new user/password to the first entry in the array, but leave the old entry as *second* in the list. Then `helm update` your release. +```yaml +auth: + users: + - user: user2 + password: password2 + - user: user1 + password: password1 +``` +2) NATS will now have both user/passwords configured, but NATS-Streaming will still be using the original entry to authenticate. NATS-Streaming servers will need to be restarted to pickup the new password from the first entry in `nats.auth-users`. +```sh +kubectl delete pod {Release.Name}-nats-streaming-2 #wait for new pod to become ready +kubectl delete pod {Release.Name}-nats-streaming-1 #wait for new pod to become ready +kubectl delete pod {Release.Name}-nats-streaming-0 #wait for new pod to become ready +``` +3) Finally remove the old user from the `nats.auth.users` array and `helm update` to remove authentication for the old user. +```yaml +auth: + users: + - user: user2 + password: password2 +``` + +### JWT Authentication + +NATS has been configured to allow authentication using service-to-service(S2S) JWTs, but in order to be authenticated, a service must be whitelisted. +The `nats.auth.jwtUsers` value can be used to provide a whitelist of users that should be authenticated using a S2S JWT. +**Note** when using a S2S JWT both the NATS username and JWT `subject` must match + +Adding a new service to the whitelist is as simple as updating `nats.auth.jwtUsers` value as such: +```yaml +nats: + auth: + jwtUsers: + - user: "my-service" + - user: "my-service"` + ...etc +``` + +### Authorization + +The above method of adding a JWT authentication whitelist also allows for setting authorization rules. +NATS [authorization rules](https://nats.io/documentation/managing_the_server/authorization/) can be configured on a per subject basis. + +The following is an example of adding publish/subscribe authorization rules +```yaml +nats: + auth: + jwtUsers: + - user: "my-service" + stanPermissions: + publish: + - "events.mysubject.>" # service can publish to any subject that starts with `events.mysubject.` + - "system-events.mysubject" # service can publish to `system-events.mysubject` subject + subscribe: + - "events.somesubject" # service can subscribe `events.somesubject` subject + natsPermissions: + publish: + - "events.mysubject1" # service can publish to `events.mysubject1` subject + subscribe: + - "events.somesubject1" # service can subscribe `events.somesubject1` subject +``` +Wildcard support works as follow: + +The dot character `.` is the token separator. + +The asterisk character `*` is a token wildcard match. +`e.g foo.* matches foo.bar, foo.baz, but not foo.bar.baz.` + +The greater-than symbol `>` is a full wildcard match. +`e.g. foo.> matches foo.bar, foo.baz, foo.bar.baz, foo.bar.1, etc.` diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/Chart.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/Chart.yaml new file mode 100644 index 0000000..32bfb9d --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +description: | + Service that monitors NATS/NATS-Streaming message delivery metrics +home: https://www.qlik.com +name: message-delivery-monitor +sources: +- https://github.com/qlik-trial/message-delivery-monitor +version: 0.1.0 diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/README.md b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/README.md new file mode 100644 index 0000000..a770466 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/README.md @@ -0,0 +1,70 @@ +# message-delivery-monitor + +[message-delivery-monitor](https://github.com/qlik-trial/message-delivery-monitor) is responsible for measuring delivery and latency of NATS/NATS-Streaming. + +## Introduction + +This chart bootstraps a message-delivery-monitor deployment on a [Kubernetes](http://kubernetes.io) cluster using the +[Helm](https://helm.sh) package manager. + +## Installing the chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/message-delivery-monitor +``` + +The command deploys message-delivery-monitor on the Kubernetes cluster in the default configuration. +The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the data-engineering-exporter chart and their default values. + +| Parameter | Description | Default | +| -------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------- | +| `image.registry` | Image registry | `qliktech-docker.jfrog.io` | +| `image.repository` | Image repository | `message-delivery-monitor` | +| `image.tag` | Image version | `0.1.0` | +| `image.pullPolicy` | Image pull policy\* | `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `logLevel` | Level of logging | `info` | +| `nats.server` | Nats server address | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats.auth.enabled` | Enabled authentication to NATS | `false` | +| `nats.auth.user` | Username to authenticate to NATS | `nil` | +| `nats.auth.password` | Password to authenticate to NATS | `nil` | +| `nats.auth.secretName` | Read user/passowrd from a this K8s secret with this name | `nil` | +| `nats.auth.secretClientUser` | Key to read from a K8s secret key to retrieve a username | `nil` | +| `nats.auth.secretClientPassword` | Key to read from a K8s secret key to retrieve a password | `nil` | +| `stan.clusterID` | NATS Streaming cluster ID | `{{ .Release.Name }}-nats-streaming-cluster` | +| `stan.monitorChannel` | NATS Streaming channel to monitor on | `monitor-channel` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | message-delivery-monitor listen port | `8080` | +| `metrics.prometheus.enabled` | Whether Prometheus metrics are enabled | `true` | + +(\*) If setting `image.tag` to `latest`, it is recommended to change `image.pullPolicy` to `Always` + +### Setting Parameters + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. +For example, + +```console +helm install --name my-release -f values.yaml qlik/message-delivery-monitor +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl new file mode 100644 index 0000000..1ac7ca5 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/_helper.tpl @@ -0,0 +1,47 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "message-delivery-monitor.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. +*/}} +{{- define "message-delivery-monitor.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper message-delivery-monitor image name +*/}} +{{- define "message-delivery-monitor.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml new file mode 100644 index 0000000..a48a06f --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/deployment.yaml @@ -0,0 +1,73 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "message-delivery-monitor.fullname" . }} + labels: + app: {{ template "message-delivery-monitor.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} + {{- range $key, $val := .Values.podLabels }} + {{- if tpl ($val) $}} + {{ tpl ($key) $ }}: {{ tpl ($val) $ | quote }} + {{- end }} + {{- end}} + spec: + containers: + - name: {{ template "message-delivery-monitor.name" . }} + image: {{ template "message-delivery-monitor.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- end }} + ports: + - containerPort: {{ .Values.service.port }} + env: + {{- if .Values.nats.auth.enabled }} + - name: NATS_USER + {{- if .Values.nats.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.nats.auth.secretName) . }} + key: {{ .Values.nats.auth.secretClientUser }} + {{- else }} + value: {{ .Values.nats.auth.user }} + {{- end }} + - name: NATS_PASSWORD + {{- if .Values.nats.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.nats.auth.secretName) . }} + key: {{ .Values.nats.auth.secretClientPassword }} + {{- else }} + value: {{ .Values.nats.auth.password }} + {{- end }} + {{- end }} + - name: LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: NATS_ADDR + value: {{ tpl (.Values.nats.server) . | quote }} + - name: NATS_STREAMING_CLUSTER_ID + value: {{ tpl (.Values.stan.clusterID) . | quote }} + - name: NATS_STREAMING_MONITORING_ENDPOINT + value: {{ .Values.stan.monitor_channel | quote }} + livenessProbe: + httpGet: + path: /metrics + port: {{ .Values.service.port }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + selector: + matchLabels: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/service.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/service.yaml new file mode 100644 index 0000000..b45beeb --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/templates/service.yaml @@ -0,0 +1,24 @@ +{{- if .Values.metrics.prometheus.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "message-delivery-monitor.fullname" . }} + labels: + app: {{ template "message-delivery-monitor.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: {{ template "message-delivery-monitor.name" . }} + selector: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/values.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/values.yaml new file mode 100644 index 0000000..b0ee34d --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/message-delivery-monitor/values.yaml @@ -0,0 +1,90 @@ +## Default values for the message-delivery-monitor Helm chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## Sets service log level +logLevel: info + +## NATS configuration +## +nats: + ## Comma seperated list of NATS servers + server: "nats://{{ .Release.Name }}-nats-client:4222" + ## For localdev use this configuration instead + # servers: "nats://messaging-nats-client:4222" + + auth: + enabled: false + ## NATS client authentication user + ## user: + + ## password: + + # secretName: "{{ .Release.Name }}-message-delivery-monitor-secret" + # seceretClientUser: "client-user" + # seceretClientPassword: "client-password" + +## NATS Streaming configuration +## +stan: + ## NATS Streaming cluster ID + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + ## For localdev use this configuration instead + # clusterID: "messaging-nats-streaming-cluster" + + ## Channels to send monitoring messages on + monitorChannel: "monitor-channel" + +image: + registry: ghcr.io + repository: qlik-download/message-delivery-monitor + tag: 0.1.1 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: IfNotPresent + +## Secrets for pulling images from a private Docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Number of replicas. +## +replicaCount: 1 + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: + ## Pod label required to allow communication with NATS + "{{ .Release.Name }}-nats-client": "true" + ## Pod label required to allow communication with NATS Streaming Monitoring endpoint + "{{ .Release.Name }}-nats-streaming-admin": "true" + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services +## +service: + type: ClusterIP + port: 8080 + +## Metrics configuration +## +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/Chart.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/Chart.yaml new file mode 100644 index 0000000..869289d --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +appVersion: 0.6.0 +description: A NATS Streaming cluster setup +home: https://nats.io/ +keywords: +- NATS +- Messaging +- publish +- subscribe +- streaming +- cluster +- persistence +name: nats-streaming +version: 0.4.0 diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/README.md b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/README.md new file mode 100644 index 0000000..bbb2ccb --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/README.md @@ -0,0 +1,137 @@ +# NATS Streaming Clustering Helm Chart + +Sets up a [NATS](http://nats.io/) Streaming server cluster with Raft based replication. + +## Getting started + +This chart relies on an already available NATS Service to which the +NATS Streaming nodes that will form a clusters can connect to. +You can install the NATS Operator and then use it to create a NATS cluster +via the following: + +```console +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/example-nats-cluster.yaml +``` + +This will create a NATS cluster on the `nats-io` namespace. Then, to +install a NATS Streaming cluster the URL to the NATS cluster can be +specified as follows (using `my-release` for a name label for the +cluster): + +```console +$ git clone https://github.com/wallyqs/nats-streaming-cluster-chart nats-streaming-cluster +$ helm install nats-streaming-cluster -n my-release --set natsUrl=nats://nats.nats-io.svc.cluster.local:4222 +``` + +This will create 3 follower nodes plus an extra Pod which is +configured to be in bootstrapping mode, which will start as the leader +of the Raft group as soon as it joins. + +```console +$ kubectl get pods --namespace default -l "app=nats-streaming-cluster,release=my-release" +NAME READY STATUS RESTARTS AGE +my-release-nats-streaming-cluster-0 1/1 Running 0 30s +my-release-nats-streaming-cluster-1 1/1 Running 0 23s +my-release-nats-streaming-cluster-2 1/1 Running 0 17s +my-release-nats-streaming-cluster-bootstrap 1/1 Running 0 30s +``` + +Note that in case the bootstrapping Pod fails then it will not be +recreated and instead one of the extra follower Pods will take over +the leadership. The follower Pods are part of a Deployment so those +in case of failure they will be recreated. + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | -------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS Streaming image registry | `docker.io` | +| `image.repository` | NATS Streaming Image name | `nats-streaming` | +| `image.tag` | NATS Streaming Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.secretName` | Client authentication secret name | `nil` | +| `auth.secretKey` | Client authentication secret key | `nil` | +| `clusterID` | NATS Streaming Cluster Name ID | `"test-cluster"` | +| `natsSvc` | External NATS Server URL | `"nats://username:password@nats:4222"` | +| `maxChannels` | Max # of channels | `100` | +| `maxSubs` | Max # of subscriptions per channel | `1000` | +| `maxMsgs` | Max # of messages per channel | `"1000000"` | +| `maxBytes` | Max messages total size per channel | `900mb` | +| `maxAge` | Max duration a message can be stored | `"0s" (unlimited)` | +| `maxMsgs` | Max # of messages per channel | `1000000` | +| `debug` | Enable debugging | `false` | +| `trace` | Enable detailed tracing | `false` | +| `clustered` | Run NATS Streaming in clustered mode (incompatible with ftGroup value) | `false` | +| `cluster_raft_logging` | Used for raft related debugging | `false` | +| `ftGroup` | Enable Fault Tolerance mode with this group name (incompatible with clustered value) | `nil` | +| `store` | Storage options (Support values are `memory` and `file`) | `file` | +| `replicaCount` | Number of NATS Streaming nodes | `3` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `RollingUpdate` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `monitoring.service.type` | Kubernetes Service type (NATS Streaming monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS Streaming monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS Streaming monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS Streaming monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS Streaming monitoring service type is `LoadBalancer` | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `sidecars` | Attach additional containers to the pod. | `nil` | + +### File Specific Persistence Configuration + +| Parameter | Description | Default | +| --------------------------------- | ---------------------------------- | ---------------- | +| `file.compactEnabled` | Enable compaction | true | +| `file.bufferSize` | File buffer size (in bytes) | "2097152" (2MB) | +| `file.crc` | Enable file CRC-32 checksum | true | +| `file.sync` | Enable File.Sync on Flush | true | +| `file.fdsLimit` | Max File Descriptor limit (approx) | 0 (unlimited) | + +### Storage Specific Persistence Configuration + +| Parameter | Description | Default | +| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `persistence.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `persistence.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `persistence.size` | Persistence volume size | `nil` | +| `persistence.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `persistence.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +*Additional configuration parameters not typically used may be found in [values.yaml](values.yaml).* diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/NOTES.txt b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/NOTES.txt new file mode 100644 index 0000000..cc0eaa0 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/NOTES.txt @@ -0,0 +1,25 @@ +NATS Streaming server has been installed. + +Externally monitor the NATS streaming server by running these commands. + +{{- if contains "NodePort" .Values.monitoring.service.type }} + +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats-streaming-cluster.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + +echo http://$NODE_IP:$NODE_PORT/streaming + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "nats-streaming-cluster.fullname" . }}' +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats-streaming-cluster.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +echo http://$SERVICE_IP:{{ .Values.monitoring.service.port }}/streaming + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + +export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats-streaming-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 8222:{{ .Values.monitoring.service.port }} +echo "Visit http://127.0.0.1:8222/streaming to monitor the NATS streaming server" + +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/_helpers.tpl b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/_helpers.tpl new file mode 100644 index 0000000..69ba8e6 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/_helpers.tpl @@ -0,0 +1,101 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nats-streaming-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats-streaming-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper NATS Streaming image name +*/}} +{{- define "nats-streaming-cluster.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a NATS Streaming cluster peers list string based on fullname, namespace, # of servers +in a format like "host-1,host-2,host-3" +*/}} +{{- define "nats-streaming-cluster.peers" -}} +{{- $name := include "nats-streaming-cluster.fullname" . -}} +{{- $nats_streaming_cluster := dict "peers" (list) -}} +{{- range $idx, $v := until (int .Values.replicaCount) }} +{{- $noop := printf "%s-%d" $name $idx | append $nats_streaming_cluster.peers | set $nats_streaming_cluster "peers" -}} +{{- end }} +{{- printf "%s" (join "," $nats_streaming_cluster.peers) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + + +{{/* Return nats-streaming storage class name */}} +{{- define "nats-streaming.StorageClassName" -}} +{{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) -}} +storageClassName: "" + {{- else -}} +storageClassName: {{ .Values.persistence.storageClass }} + {{- end -}} +{{- else -}} + {{- if .Values.global }} + {{- if .Values.global.persistence }} + {{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) -}} +storageClassName: "" + {{- else -}} +storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml new file mode 100644 index 0000000..2950f19 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }}-monitoring + labels: + app: {{ template "nats-streaming-cluster.name" . }} + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - name: monitoring + port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml new file mode 100644 index 0000000..b642078 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/pvc.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/pvc.yaml new file mode 100644 index 0000000..dacfc5d --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/pvc.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "nats-streaming.fullname" . }} +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- include "nats-streaming.StorageClassName" . | nindent 2 }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/sc.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/sc.yaml new file mode 100644 index 0000000..6b08ff8 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/sc.yaml @@ -0,0 +1,7 @@ +{{- if .Values.persistence.internalStorageClass.enabled -}} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.persistence.storageClass }} +{{ toYaml .Values.persistence.internalStorageClass.definition }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/statefulset.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/statefulset.yaml new file mode 100644 index 0000000..d270f30 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/templates/statefulset.yaml @@ -0,0 +1,256 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats-streaming-cluster.name" . }} + image: {{ template "nats-streaming-cluster.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.auth.enabled }} + - name: USER + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientUser }} + {{- else }} + value: {{ .Values.auth.user }} + {{- end }} + {{- if .Values.auth.token }} + - name: AUTH + value: {{ .Values.auth.token }} + {{- else }} + - name: PASSWORD + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientPassword }} + {{- else }} + value: {{ .Values.auth.password }} + {{- end }} + {{- end }} + {{- end }} + args: [ + "-cid", "{{ tpl (.Values.clusterID) . }}", + "-m", "{{ .Values.monitoring.service.port }}", + "-ns", "{{ tpl (.Values.natsSvc) . }}", + "-mc", "{{ .Values.maxChannels }}", + "-msu", "{{ .Values.maxSubs }}", + "-mm", "{{ .Values.maxMsgs }}", + "-mb", "{{ .Values.maxBytes }}", + "-ma", "{{ .Values.maxAge }}", + "-hbi", "{{ .Values.hbInterval }}", + "-hbt", "{{ .Values.hbTimeout }}", + "-hbf", "{{ .Values.hbFailCount }}", + + {{- if .Values.clustered }} + "-clustered", + "--cluster_node_id", "$(POD_NAME)", + "--cluster_peers", "{{ template "nats-streaming-cluster.peers" . }}", + "--cluster_log_path", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/raft", + {{- if .Values.cluster_raft_logging }} + "--cluster_raft_logging", + {{- end }} + {{- end }} + + {{- if .Values.ftGroup }} + "--ft_group", "{{.Values.ftGroup}}", + {{- end}} + + "--store", "{{ .Values.store }}", + {{- if eq .Values.store "file" }} + {{- if .Values.clustered }} + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/data", + {{- else }} + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/{{ .Release.Name }}-nats-streaming-0/data", + {{- end }} + + {{- if .Values.file.compactEnabled }} + "--file_compact_enabled", + "--file_compact_frag", "{{ .Values.file.compactFrag }}", + "--file_compact_interval", "{{ .Values.file.compactInterval }}", + "--file_compact_min_size", "{{ .Values.file.compactMinSize }}", + {{- end }} + "--file_buffer_size", "{{ .Values.file.bufferSize }}", + {{- if .Values.file.crc }} + "--file_crc", + "--file_crc_poly", "{{ .Values.file.crcPoly }}", + {{- end }} + {{- if .Values.file.sync }} + "--file_sync", + {{- end }} + "--file_slice_max_msgs", "{{ .Values.file.sliceMaxMsgs }}", + "--file_slice_max_bytes", "{{ .Values.file.sliceMaxBytes }}", + "--file_slice_max_age", "{{ .Values.file.sliceMaxAge }}", + {{- if ne .Values.file.sliceArchiveScript "" }} + "--file_slice_archive_script", "{{ .Values.file.sliceArchiveScript }}", + {{- end }} + "--file_fds_limit", "{{ .Values.file.fdsLimit }}", + "--file_parallel_recovery", "{{ .Values.file.parallelRecovery }}", + {{- end}} + {{- if .Values.auth.enabled }} + "--user", "$(USER)", + {{- if .Values.auth.token }} + "--auth", "$(AUTH)", + {{- else }} + "--pass", "$(PASSWORD)", + {{- end }} + {{- end }} + + {{- if .Values.debug }} + "-SD", + {{- end }} + {{- if .Values.trace }} + "-SV", + {{- end }} + ] + ports: + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + volumeMounts: + - name: datadir + mountPath: /nats + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: + {{- range .Values.metrics.args }} + - {{ . }} + {{- end }} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + {{ if .Values.persistence.enabled }} + - name: datadir + persistentVolumeClaim: + claimName: {{ template "nats-streaming.fullname" . }} + {{- else }} + - name: datadir + emptyDir: {} + {{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/values.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/values.yaml new file mode 100644 index 0000000..1c32da9 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats-streaming/values.yaml @@ -0,0 +1,328 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## NATS Streaming image +## +image: + registry: docker.io + repository: nats-streaming + tag: 0.11.2 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS Streaming replicas +replicaCount: 3 + +# +# Streaming server configuration +# + +# Cluster ID (default: test-cluster) +clusterID: "test-cluster" + +# Connect to this external NATS Server URL +natsSvc: "nats://nats:4222" + +# Max number of channels (0 for unlimited) +maxChannels: 100 + +# Max number of subscriptions per channel (0 for unlimited) +maxSubs: 1000 + +# Max number of messages per channel (0 for unlimited) +maxMsgs: "1000000" + +# Max messages total size per channel (0 for unlimited) +maxBytes: "900mb" + +# Max duration a message can be stored ("0s" for unlimited) +maxAge: 0 + +# +# ADVANCED CONFIGURATION: Change these with caution. +# + +# Interval at which server sends heartbeat to a client +hbInterval: 30s + +# How long server waits for a heartbeat response +hbTimeout: 10s + +# Number of failed heartbeats before server closes the client connection +hbFailCount: 330 + +# Use for general debugging. Enabling this will negatively affect performance. +debug: false + +# Do not normally set this as it will drastically decrease performance and generate +# volumous logs. +trace: false + +# Run NATS Streaming in clustered mode (incompatible with ftGroup value) +# https://github.com/nats-io/nats-streaming-server#clustering +clustered: false + +# Use for raft related debugging +cluster_raft_logging: false + +# Run NATS Streaming in fault tolerance mode with this group name (incompatible with clustered value) +# https://github.com/nats-io/nats-streaming-server#fault-tolerance +ftGroup: ~ + +store: "file" + +file: + # Enable compaction + compactEnabled: true + + # Enable file CRC-32 checksum + crc: true + + # Enable File.Sync on Flush + sync: true + + # Store will try to use no more file descriptors than this given limit + fdsLimit: 0 + + ## + ## ADVANCED CONFIGURATION: Change these with caution. + ## + # File buffer size (in bytes) + bufferSize: "2097152" + + # File fragmentation % threshold for compaction + compactFrag: 50 + + # Minimum interval (in seconds) between file compactions, 5min + compactInterval: 300 + + # Minimum file size for compaction + compactMinSize: "1048576" + + # Polynomial used to make the table used for CRC-32 checksum (default is crc32.IEEE) + crcPoly : "3988292384" + + # Maximum number of messages per file slice (subject to channel limits) + sliceMaxMsgs: 0 + + # Maximum file slice size - including index file (subject to channel limits) 64MB + sliceMaxBytes: "67108931" + + # Maximum file slice duration starting when the first message is stored (subject to channel limits) + sliceMaxAge: 0 + + # Path to script to use if you want to archive a file slice being removed + sliceArchiveScript: "" + + # On startup, number of channels that can be recovered in parallel + parallelRecovery: 1 + +persistence: + # If false, emptyDir will be used as a volume. + enabled: false + + ## Persistence volume default size + # size: 10Gi + + ## nats-streaming Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + internalStorageClass: + ## If enabled storage class will be configured as part of the chart. + ## If not enabled an external storageclass can be used by providing storageClassName above. + enabled: false + + ## Storageclass definition + definition: {} + ## Storage classes have a provisioner that determines what volume plugin is used for provisioning PVs. + ## This field must be specified. + ## See https://kubernetes.io/docs/concepts/storage/storage-classes/ + # provisioner: kubernetes.io/no-provisioner + + ## Reclaim policy should normally be set to Retain to avoid loosing data when deleting this helm chart. + # reclaimPolicy: Retain + + ## Persistent Volumes that are dynamically created by a storage class will have the mount options specified + ## in the mountOptions field of the class. + # mountOptions: {} + + ## Storage classes have parameters that describe volumes belonging to the storage class. + ## Different parameters may be accepted depending on the provisioner. + # parameters: {} + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +## NATS Streaming Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + ## Annotations for Promethues metrics + # prometheus.io/scrape: "true" + # prometheus.io/port: "7777" + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/nats-streaming-server#authorization +## +auth: + enabled: false + # user: nats_client + # password: + # token: + # secretName: "{{ .Release.Name }}-nats-secret" + # secretKey: "client-password" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + +## NATS Streaming svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + # TODO: Uncomment these args once synadia/prometheus-nats-exporter adds NATS Streaming support + # - -serverz + # - -storez + # - -clientz + # - -channelz + +## Sidecars +sidecars: + ## e.g. + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/Chart.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/Chart.yaml new file mode 100644 index 0000000..07f566c --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +appVersion: 1.3.0 +description: An open-source, cloud-native messaging system +home: https://nats.io/ +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png +keywords: +- nats +- messaging +- addressing +- discovery +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: nats +sources: +- https://github.com/bitnami/bitnami-docker-nats +version: 2.4.1 diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/README.md b/qliksense/charts/edge-auth/charts/messaging/charts/nats/README.md new file mode 100644 index 0000000..368d388 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/README.md @@ -0,0 +1,194 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.users` | Client authentication users | `nil` | +| `auth.users[].user` | Client authentication user | `nil` | +| `auth.users[].password` | Client authentication password | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `OnDelete` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `extraArgs` | Optional flags for NATS | `[]` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `client.service.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `client.service.port` | NATS client port | `4222` | +| `client.service.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `client.service.annotations` | Annotations for NATS client service | {} | +| `client.service.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `cluster.service.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `cluster.service.port` | NATS cluster port | `6222` | +| `cluster.service.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `cluster.service.annotations` | Annotations for NATS cluster service | {} | +| `cluster.service.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `cluster.noAdvertise` | Do not advertise known cluster IPs to clients | `false` | +| `monitoring.service.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | +| `metrics.enabled` | Enable Prometheus metrics via exporter side-car | `false` | +| `metrics.image.registry` | Prometheus metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Prometheus metrics exporter image name | `synadia/prometheus-nats-exporter` | +| `metrics.image.tag` | Prometheus metrics exporter image tag | `0.1.0` | +| `metrics.image.pullPolicy` | Prometheus metrics image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Prometheus metrics image pull secrets | `nil` | +| `metrics.port` | Prometheus metrics exporter port | `7777` | +| `metrics.podAnnotations` | Prometheus metrics exporter annotations | `prometheus.io/scrape: "true"`, `prometheus.io/port: "7777"` | +| `metrics.resources` | Prometheus metrics exporter resource requests/limit | {} | +| `sidecars` | Attach additional containers to the pod | `nil` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Sidecars + +If you have a need for additional containers to run within the same pod as NATS (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` + +## Upgrading + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is nats: + +```console +$ kubectl delete statefulset nats-nats --cascade=false +``` diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/NOTES.txt b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/NOTES.txt new file mode 100644 index 0000000..224df8f --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/NOTES.txt @@ -0,0 +1,88 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.client.service.type "LoadBalancer") (contains .Values.client.service.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "client.service.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.client.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoring.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoring.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "Monitoring URL: http://$SERVICE_IP/" + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoring.service.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "nats.fullname" . }}-monitoring {{ .Values.monitoring.service.port }}:{{ .Values.monitoring.service.port }} +{{- end }} + +2. Access NATS monitoring by opening the URL obtained in a browser. +{{- end }} + +{{- if .Values.metrics.enabled }} + +3. Get the NATS Prometheus Metrics URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +4. Access NATS Prometheus metrics by opening the URL obtained in a browser. +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/_helpers.tpl b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/_helpers.tpl new file mode 100644 index 0000000..ef93757 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Nats image name +*/}} +{{- define "nats.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random alphanumeric password string. +We prepend a random letter to the string to avoid password validation errors +*/}} +{{- define "nats.randomPassword" -}} +{{- randAlpha 1 -}}{{- randAlphaNum 9 -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/client-svc.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/client-svc.yaml new file mode 100644 index 0000000..aacab44 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.client.service.annotations }} + annotations: +{{ toYaml .Values.client.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.client.service.type }} + {{- if and (eq .Values.client.service.type "LoadBalancer") .Values.client.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.client.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.client.service.port }} + targetPort: client + name: client + {{- if and (eq .Values.client.service.type "NodePort") (not (empty .Values.client.service.nodePort)) }} + nodePort: {{ .Values.client.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/cluster-svc.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000..b48c791 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.cluster.service.annotations }} + annotations: +{{ toYaml .Values.cluster.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.cluster.service.type }} + {{- if and (eq .Values.cluster.service.type "LoadBalancer") .Values.cluster.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.cluster.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.cluster.service.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.cluster.service.type "NodePort") (not (empty .Values.cluster.service.nodePort)) }} + nodePort: {{ .Values.cluster.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/configmap.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/configmap.yaml new file mode 100644 index 0000000..072826b --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/configmap.yaml @@ -0,0 +1,114 @@ +{{- $authPwd := default (include "nats.randomPassword" .) .Values.auth.password -}} +{{- $clusterAuthPwd := default (include "nats.randomPassword" .) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.client.service.port }} + http: 0.0.0.0:{{ .Values.monitoring.service.port }} + + {{- if .Values.clientAdvertise }} + client_advertise: {{ tpl (.Values.clientAdvertise) . }} + {{- end }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + + {{- if .Values.auth.users }} + users: [ + {{- range .Values.auth.users }} + { + user: {{ .user | quote }}, + {{- if .password }} + password: {{ .password | quote }}, + {{- end }} + {{- if .permissions }} + permissions: {{ toJson .permissions | replace "\\u003e" ">"}} + {{- end }} + } + {{- end }} + {{- if .Values.auth.monitor.enabled }} + { + user: {{ .Values.auth.monitor.user | quote }}, + password: {{ .Values.auth.monitor.password | quote }}, + permissions: {{ toJson .Values.auth.monitor.permissions | replace "\\u003e" ">"}} + } + {{- end }} + ] + {{- end }} + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.cluster.service.port }} + + no_advertise: {{ .Values.cluster.noAdvertise }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + ] + } + users.json: {{ if .Values.auth.enabled }}{{ toJson .Values.auth.jwtUsers | quote }}{{else}}"[]"{{ end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/headless-svc.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/headless-svc.yaml new file mode 100644 index 0000000..d340551 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/headless-svc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-headless + labels: + app: {{ template "nats.name" . }} + chart: {{ template "nats.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: client + port: 4222 + targetPort: client + - name: cluster + port: 6222 + targetPort: cluster + selector: + app: {{ template "nats.name" . }} + release: {{ .Release.Name | quote }} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/ingress.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/ingress.yaml new file mode 100644 index 0000000..eec9749 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/monitoring-svc.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000..f8f4081 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/networkpolicy.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000..12032f8 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.client.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.cluster.service.port }} + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/statefulset.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/statefulset.yaml new file mode 100644 index 0000000..3c7d71a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/statefulset.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + serviceName: {{ template "nats.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} +{{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: + checksum/secrets: {{ toYaml .Values.auth.users | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - qnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + {{- range .Values.extraArgs }} + - {{ tpl (.) $ }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.client.service.port }} + - name: cluster + containerPort: {{ .Values.cluster.service.port }} + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: + {{- range .Values.metrics.args }} + - {{ . }} + {{- end }} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/tls-secret.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/tls-secret.yaml new file mode 100644 index 0000000..5acf441 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/charts/nats/values.yaml b/qliksense/charts/edge-auth/charts/messaging/charts/nats/values.yaml new file mode 100644 index 0000000..9200bff --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/charts/nats/values.yaml @@ -0,0 +1,306 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.3.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: false + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Client URL to advertise to other servers +## +# clientAdvertise: + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +client: + service: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +cluster: + service: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: + + ## Do not advertise known cluster IPs to clients + ## + noAdvertise: false + +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: + +# Optional additional arguments +extraArgs: [] + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +sidecars: +## Add sidecars to the pod. +## e.g. +# - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/Chart.yaml b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/Chart.yaml new file mode 100644 index 0000000..66a9006 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +description: > + Service that monitors NATS/NATS-Streaming message delivery metrics +name: message-delivery-monitor +version: 0.1.0 +home: https://www.qlik.com +sources: + - https://github.com/qlik-trial/message-delivery-monitor diff --git a/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/README.md b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/README.md new file mode 100644 index 0000000..a770466 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/README.md @@ -0,0 +1,70 @@ +# message-delivery-monitor + +[message-delivery-monitor](https://github.com/qlik-trial/message-delivery-monitor) is responsible for measuring delivery and latency of NATS/NATS-Streaming. + +## Introduction + +This chart bootstraps a message-delivery-monitor deployment on a [Kubernetes](http://kubernetes.io) cluster using the +[Helm](https://helm.sh) package manager. + +## Installing the chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/message-delivery-monitor +``` + +The command deploys message-delivery-monitor on the Kubernetes cluster in the default configuration. +The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the data-engineering-exporter chart and their default values. + +| Parameter | Description | Default | +| -------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------- | +| `image.registry` | Image registry | `qliktech-docker.jfrog.io` | +| `image.repository` | Image repository | `message-delivery-monitor` | +| `image.tag` | Image version | `0.1.0` | +| `image.pullPolicy` | Image pull policy\* | `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `logLevel` | Level of logging | `info` | +| `nats.server` | Nats server address | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats.auth.enabled` | Enabled authentication to NATS | `false` | +| `nats.auth.user` | Username to authenticate to NATS | `nil` | +| `nats.auth.password` | Password to authenticate to NATS | `nil` | +| `nats.auth.secretName` | Read user/passowrd from a this K8s secret with this name | `nil` | +| `nats.auth.secretClientUser` | Key to read from a K8s secret key to retrieve a username | `nil` | +| `nats.auth.secretClientPassword` | Key to read from a K8s secret key to retrieve a password | `nil` | +| `stan.clusterID` | NATS Streaming cluster ID | `{{ .Release.Name }}-nats-streaming-cluster` | +| `stan.monitorChannel` | NATS Streaming channel to monitor on | `monitor-channel` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | message-delivery-monitor listen port | `8080` | +| `metrics.prometheus.enabled` | Whether Prometheus metrics are enabled | `true` | + +(\*) If setting `image.tag` to `latest`, it is recommended to change `image.pullPolicy` to `Always` + +### Setting Parameters + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. +For example, + +```console +helm install --name my-release -f values.yaml qlik/message-delivery-monitor +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/_helper.tpl b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/_helper.tpl new file mode 100644 index 0000000..1ac7ca5 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/_helper.tpl @@ -0,0 +1,47 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "message-delivery-monitor.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. +*/}} +{{- define "message-delivery-monitor.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper message-delivery-monitor image name +*/}} +{{- define "message-delivery-monitor.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/deployment.yaml b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/deployment.yaml new file mode 100644 index 0000000..a48a06f --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/deployment.yaml @@ -0,0 +1,73 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "message-delivery-monitor.fullname" . }} + labels: + app: {{ template "message-delivery-monitor.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} + {{- range $key, $val := .Values.podLabels }} + {{- if tpl ($val) $}} + {{ tpl ($key) $ }}: {{ tpl ($val) $ | quote }} + {{- end }} + {{- end}} + spec: + containers: + - name: {{ template "message-delivery-monitor.name" . }} + image: {{ template "message-delivery-monitor.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- end }} + ports: + - containerPort: {{ .Values.service.port }} + env: + {{- if .Values.nats.auth.enabled }} + - name: NATS_USER + {{- if .Values.nats.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.nats.auth.secretName) . }} + key: {{ .Values.nats.auth.secretClientUser }} + {{- else }} + value: {{ .Values.nats.auth.user }} + {{- end }} + - name: NATS_PASSWORD + {{- if .Values.nats.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.nats.auth.secretName) . }} + key: {{ .Values.nats.auth.secretClientPassword }} + {{- else }} + value: {{ .Values.nats.auth.password }} + {{- end }} + {{- end }} + - name: LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: NATS_ADDR + value: {{ tpl (.Values.nats.server) . | quote }} + - name: NATS_STREAMING_CLUSTER_ID + value: {{ tpl (.Values.stan.clusterID) . | quote }} + - name: NATS_STREAMING_MONITORING_ENDPOINT + value: {{ .Values.stan.monitor_channel | quote }} + livenessProbe: + httpGet: + path: /metrics + port: {{ .Values.service.port }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + selector: + matchLabels: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/service.yaml b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/service.yaml new file mode 100644 index 0000000..b45beeb --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/templates/service.yaml @@ -0,0 +1,24 @@ +{{- if .Values.metrics.prometheus.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "message-delivery-monitor.fullname" . }} + labels: + app: {{ template "message-delivery-monitor.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: {{ template "message-delivery-monitor.name" . }} + selector: + app: {{ template "message-delivery-monitor.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/values.yaml b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/values.yaml new file mode 100644 index 0000000..b0ee34d --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/message-delivery-monitor/values.yaml @@ -0,0 +1,90 @@ +## Default values for the message-delivery-monitor Helm chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## Sets service log level +logLevel: info + +## NATS configuration +## +nats: + ## Comma seperated list of NATS servers + server: "nats://{{ .Release.Name }}-nats-client:4222" + ## For localdev use this configuration instead + # servers: "nats://messaging-nats-client:4222" + + auth: + enabled: false + ## NATS client authentication user + ## user: + + ## password: + + # secretName: "{{ .Release.Name }}-message-delivery-monitor-secret" + # seceretClientUser: "client-user" + # seceretClientPassword: "client-password" + +## NATS Streaming configuration +## +stan: + ## NATS Streaming cluster ID + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + ## For localdev use this configuration instead + # clusterID: "messaging-nats-streaming-cluster" + + ## Channels to send monitoring messages on + monitorChannel: "monitor-channel" + +image: + registry: ghcr.io + repository: qlik-download/message-delivery-monitor + tag: 0.1.1 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: IfNotPresent + +## Secrets for pulling images from a private Docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Number of replicas. +## +replicaCount: 1 + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: + ## Pod label required to allow communication with NATS + "{{ .Release.Name }}-nats-client": "true" + ## Pod label required to allow communication with NATS Streaming Monitoring endpoint + "{{ .Release.Name }}-nats-streaming-admin": "true" + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services +## +service: + type: ClusterIP + port: 8080 + +## Metrics configuration +## +metrics: + ## Prometheus configuration + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included + enabled: true diff --git a/qliksense/charts/edge-auth/charts/messaging/nats-streaming/Chart.yaml b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/Chart.yaml new file mode 100644 index 0000000..af41960 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +description: A NATS Streaming cluster setup +name: nats-streaming +version: 0.4.0 +appVersion: 0.6.0 +keywords: +- NATS +- Messaging +- publish +- subscribe +- streaming +- cluster +- persistence +home: https://nats.io/ diff --git a/qliksense/charts/edge-auth/charts/messaging/nats-streaming/README.md b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/README.md new file mode 100644 index 0000000..bbb2ccb --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/README.md @@ -0,0 +1,137 @@ +# NATS Streaming Clustering Helm Chart + +Sets up a [NATS](http://nats.io/) Streaming server cluster with Raft based replication. + +## Getting started + +This chart relies on an already available NATS Service to which the +NATS Streaming nodes that will form a clusters can connect to. +You can install the NATS Operator and then use it to create a NATS cluster +via the following: + +```console +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/example-nats-cluster.yaml +``` + +This will create a NATS cluster on the `nats-io` namespace. Then, to +install a NATS Streaming cluster the URL to the NATS cluster can be +specified as follows (using `my-release` for a name label for the +cluster): + +```console +$ git clone https://github.com/wallyqs/nats-streaming-cluster-chart nats-streaming-cluster +$ helm install nats-streaming-cluster -n my-release --set natsUrl=nats://nats.nats-io.svc.cluster.local:4222 +``` + +This will create 3 follower nodes plus an extra Pod which is +configured to be in bootstrapping mode, which will start as the leader +of the Raft group as soon as it joins. + +```console +$ kubectl get pods --namespace default -l "app=nats-streaming-cluster,release=my-release" +NAME READY STATUS RESTARTS AGE +my-release-nats-streaming-cluster-0 1/1 Running 0 30s +my-release-nats-streaming-cluster-1 1/1 Running 0 23s +my-release-nats-streaming-cluster-2 1/1 Running 0 17s +my-release-nats-streaming-cluster-bootstrap 1/1 Running 0 30s +``` + +Note that in case the bootstrapping Pod fails then it will not be +recreated and instead one of the extra follower Pods will take over +the leadership. The follower Pods are part of a Deployment so those +in case of failure they will be recreated. + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | -------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS Streaming image registry | `docker.io` | +| `image.repository` | NATS Streaming Image name | `nats-streaming` | +| `image.tag` | NATS Streaming Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.secretName` | Client authentication secret name | `nil` | +| `auth.secretKey` | Client authentication secret key | `nil` | +| `clusterID` | NATS Streaming Cluster Name ID | `"test-cluster"` | +| `natsSvc` | External NATS Server URL | `"nats://username:password@nats:4222"` | +| `maxChannels` | Max # of channels | `100` | +| `maxSubs` | Max # of subscriptions per channel | `1000` | +| `maxMsgs` | Max # of messages per channel | `"1000000"` | +| `maxBytes` | Max messages total size per channel | `900mb` | +| `maxAge` | Max duration a message can be stored | `"0s" (unlimited)` | +| `maxMsgs` | Max # of messages per channel | `1000000` | +| `debug` | Enable debugging | `false` | +| `trace` | Enable detailed tracing | `false` | +| `clustered` | Run NATS Streaming in clustered mode (incompatible with ftGroup value) | `false` | +| `cluster_raft_logging` | Used for raft related debugging | `false` | +| `ftGroup` | Enable Fault Tolerance mode with this group name (incompatible with clustered value) | `nil` | +| `store` | Storage options (Support values are `memory` and `file`) | `file` | +| `replicaCount` | Number of NATS Streaming nodes | `3` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `RollingUpdate` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `monitoring.service.type` | Kubernetes Service type (NATS Streaming monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS Streaming monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS Streaming monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS Streaming monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS Streaming monitoring service type is `LoadBalancer` | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `sidecars` | Attach additional containers to the pod. | `nil` | + +### File Specific Persistence Configuration + +| Parameter | Description | Default | +| --------------------------------- | ---------------------------------- | ---------------- | +| `file.compactEnabled` | Enable compaction | true | +| `file.bufferSize` | File buffer size (in bytes) | "2097152" (2MB) | +| `file.crc` | Enable file CRC-32 checksum | true | +| `file.sync` | Enable File.Sync on Flush | true | +| `file.fdsLimit` | Max File Descriptor limit (approx) | 0 (unlimited) | + +### Storage Specific Persistence Configuration + +| Parameter | Description | Default | +| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `persistence.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `persistence.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `persistence.size` | Persistence volume size | `nil` | +| `persistence.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `persistence.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +*Additional configuration parameters not typically used may be found in [values.yaml](values.yaml).* diff --git a/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/NOTES.txt b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/NOTES.txt new file mode 100644 index 0000000..cc0eaa0 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/NOTES.txt @@ -0,0 +1,25 @@ +NATS Streaming server has been installed. + +Externally monitor the NATS streaming server by running these commands. + +{{- if contains "NodePort" .Values.monitoring.service.type }} + +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats-streaming-cluster.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + +echo http://$NODE_IP:$NODE_PORT/streaming + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "nats-streaming-cluster.fullname" . }}' +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats-streaming-cluster.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +echo http://$SERVICE_IP:{{ .Values.monitoring.service.port }}/streaming + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + +export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats-streaming-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 8222:{{ .Values.monitoring.service.port }} +echo "Visit http://127.0.0.1:8222/streaming to monitor the NATS streaming server" + +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/_helpers.tpl b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/_helpers.tpl new file mode 100644 index 0000000..69ba8e6 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/_helpers.tpl @@ -0,0 +1,101 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nats-streaming-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats-streaming-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper NATS Streaming image name +*/}} +{{- define "nats-streaming-cluster.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a NATS Streaming cluster peers list string based on fullname, namespace, # of servers +in a format like "host-1,host-2,host-3" +*/}} +{{- define "nats-streaming-cluster.peers" -}} +{{- $name := include "nats-streaming-cluster.fullname" . -}} +{{- $nats_streaming_cluster := dict "peers" (list) -}} +{{- range $idx, $v := until (int .Values.replicaCount) }} +{{- $noop := printf "%s-%d" $name $idx | append $nats_streaming_cluster.peers | set $nats_streaming_cluster "peers" -}} +{{- end }} +{{- printf "%s" (join "," $nats_streaming_cluster.peers) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + + +{{/* Return nats-streaming storage class name */}} +{{- define "nats-streaming.StorageClassName" -}} +{{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) -}} +storageClassName: "" + {{- else -}} +storageClassName: {{ .Values.persistence.storageClass }} + {{- end -}} +{{- else -}} + {{- if .Values.global }} + {{- if .Values.global.persistence }} + {{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) -}} +storageClassName: "" + {{- else -}} +storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/monitoring-svc.yaml b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/monitoring-svc.yaml new file mode 100644 index 0000000..2950f19 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }}-monitoring + labels: + app: {{ template "nats-streaming-cluster.name" . }} + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - name: monitoring + port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/networkpolicy.yaml b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/networkpolicy.yaml new file mode 100644 index 0000000..b642078 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/networkpolicy.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/pvc.yaml b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/pvc.yaml new file mode 100644 index 0000000..dacfc5d --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/pvc.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "nats-streaming.fullname" . }} +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- include "nats-streaming.StorageClassName" . | nindent 2 }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/sc.yaml b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/sc.yaml new file mode 100644 index 0000000..6b08ff8 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/sc.yaml @@ -0,0 +1,7 @@ +{{- if .Values.persistence.internalStorageClass.enabled -}} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.persistence.storageClass }} +{{ toYaml .Values.persistence.internalStorageClass.definition }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/statefulset.yaml b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/statefulset.yaml new file mode 100644 index 0000000..d270f30 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/templates/statefulset.yaml @@ -0,0 +1,256 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats-streaming-cluster.name" . }} + image: {{ template "nats-streaming-cluster.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.auth.enabled }} + - name: USER + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientUser }} + {{- else }} + value: {{ .Values.auth.user }} + {{- end }} + {{- if .Values.auth.token }} + - name: AUTH + value: {{ .Values.auth.token }} + {{- else }} + - name: PASSWORD + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientPassword }} + {{- else }} + value: {{ .Values.auth.password }} + {{- end }} + {{- end }} + {{- end }} + args: [ + "-cid", "{{ tpl (.Values.clusterID) . }}", + "-m", "{{ .Values.monitoring.service.port }}", + "-ns", "{{ tpl (.Values.natsSvc) . }}", + "-mc", "{{ .Values.maxChannels }}", + "-msu", "{{ .Values.maxSubs }}", + "-mm", "{{ .Values.maxMsgs }}", + "-mb", "{{ .Values.maxBytes }}", + "-ma", "{{ .Values.maxAge }}", + "-hbi", "{{ .Values.hbInterval }}", + "-hbt", "{{ .Values.hbTimeout }}", + "-hbf", "{{ .Values.hbFailCount }}", + + {{- if .Values.clustered }} + "-clustered", + "--cluster_node_id", "$(POD_NAME)", + "--cluster_peers", "{{ template "nats-streaming-cluster.peers" . }}", + "--cluster_log_path", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/raft", + {{- if .Values.cluster_raft_logging }} + "--cluster_raft_logging", + {{- end }} + {{- end }} + + {{- if .Values.ftGroup }} + "--ft_group", "{{.Values.ftGroup}}", + {{- end}} + + "--store", "{{ .Values.store }}", + {{- if eq .Values.store "file" }} + {{- if .Values.clustered }} + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/data", + {{- else }} + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/{{ .Release.Name }}-nats-streaming-0/data", + {{- end }} + + {{- if .Values.file.compactEnabled }} + "--file_compact_enabled", + "--file_compact_frag", "{{ .Values.file.compactFrag }}", + "--file_compact_interval", "{{ .Values.file.compactInterval }}", + "--file_compact_min_size", "{{ .Values.file.compactMinSize }}", + {{- end }} + "--file_buffer_size", "{{ .Values.file.bufferSize }}", + {{- if .Values.file.crc }} + "--file_crc", + "--file_crc_poly", "{{ .Values.file.crcPoly }}", + {{- end }} + {{- if .Values.file.sync }} + "--file_sync", + {{- end }} + "--file_slice_max_msgs", "{{ .Values.file.sliceMaxMsgs }}", + "--file_slice_max_bytes", "{{ .Values.file.sliceMaxBytes }}", + "--file_slice_max_age", "{{ .Values.file.sliceMaxAge }}", + {{- if ne .Values.file.sliceArchiveScript "" }} + "--file_slice_archive_script", "{{ .Values.file.sliceArchiveScript }}", + {{- end }} + "--file_fds_limit", "{{ .Values.file.fdsLimit }}", + "--file_parallel_recovery", "{{ .Values.file.parallelRecovery }}", + {{- end}} + {{- if .Values.auth.enabled }} + "--user", "$(USER)", + {{- if .Values.auth.token }} + "--auth", "$(AUTH)", + {{- else }} + "--pass", "$(PASSWORD)", + {{- end }} + {{- end }} + + {{- if .Values.debug }} + "-SD", + {{- end }} + {{- if .Values.trace }} + "-SV", + {{- end }} + ] + ports: + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + volumeMounts: + - name: datadir + mountPath: /nats + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: + {{- range .Values.metrics.args }} + - {{ . }} + {{- end }} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + {{ if .Values.persistence.enabled }} + - name: datadir + persistentVolumeClaim: + claimName: {{ template "nats-streaming.fullname" . }} + {{- else }} + - name: datadir + emptyDir: {} + {{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats-streaming/values.yaml b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/values.yaml new file mode 100644 index 0000000..1c32da9 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats-streaming/values.yaml @@ -0,0 +1,328 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## NATS Streaming image +## +image: + registry: docker.io + repository: nats-streaming + tag: 0.11.2 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS Streaming replicas +replicaCount: 3 + +# +# Streaming server configuration +# + +# Cluster ID (default: test-cluster) +clusterID: "test-cluster" + +# Connect to this external NATS Server URL +natsSvc: "nats://nats:4222" + +# Max number of channels (0 for unlimited) +maxChannels: 100 + +# Max number of subscriptions per channel (0 for unlimited) +maxSubs: 1000 + +# Max number of messages per channel (0 for unlimited) +maxMsgs: "1000000" + +# Max messages total size per channel (0 for unlimited) +maxBytes: "900mb" + +# Max duration a message can be stored ("0s" for unlimited) +maxAge: 0 + +# +# ADVANCED CONFIGURATION: Change these with caution. +# + +# Interval at which server sends heartbeat to a client +hbInterval: 30s + +# How long server waits for a heartbeat response +hbTimeout: 10s + +# Number of failed heartbeats before server closes the client connection +hbFailCount: 330 + +# Use for general debugging. Enabling this will negatively affect performance. +debug: false + +# Do not normally set this as it will drastically decrease performance and generate +# volumous logs. +trace: false + +# Run NATS Streaming in clustered mode (incompatible with ftGroup value) +# https://github.com/nats-io/nats-streaming-server#clustering +clustered: false + +# Use for raft related debugging +cluster_raft_logging: false + +# Run NATS Streaming in fault tolerance mode with this group name (incompatible with clustered value) +# https://github.com/nats-io/nats-streaming-server#fault-tolerance +ftGroup: ~ + +store: "file" + +file: + # Enable compaction + compactEnabled: true + + # Enable file CRC-32 checksum + crc: true + + # Enable File.Sync on Flush + sync: true + + # Store will try to use no more file descriptors than this given limit + fdsLimit: 0 + + ## + ## ADVANCED CONFIGURATION: Change these with caution. + ## + # File buffer size (in bytes) + bufferSize: "2097152" + + # File fragmentation % threshold for compaction + compactFrag: 50 + + # Minimum interval (in seconds) between file compactions, 5min + compactInterval: 300 + + # Minimum file size for compaction + compactMinSize: "1048576" + + # Polynomial used to make the table used for CRC-32 checksum (default is crc32.IEEE) + crcPoly : "3988292384" + + # Maximum number of messages per file slice (subject to channel limits) + sliceMaxMsgs: 0 + + # Maximum file slice size - including index file (subject to channel limits) 64MB + sliceMaxBytes: "67108931" + + # Maximum file slice duration starting when the first message is stored (subject to channel limits) + sliceMaxAge: 0 + + # Path to script to use if you want to archive a file slice being removed + sliceArchiveScript: "" + + # On startup, number of channels that can be recovered in parallel + parallelRecovery: 1 + +persistence: + # If false, emptyDir will be used as a volume. + enabled: false + + ## Persistence volume default size + # size: 10Gi + + ## nats-streaming Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + internalStorageClass: + ## If enabled storage class will be configured as part of the chart. + ## If not enabled an external storageclass can be used by providing storageClassName above. + enabled: false + + ## Storageclass definition + definition: {} + ## Storage classes have a provisioner that determines what volume plugin is used for provisioning PVs. + ## This field must be specified. + ## See https://kubernetes.io/docs/concepts/storage/storage-classes/ + # provisioner: kubernetes.io/no-provisioner + + ## Reclaim policy should normally be set to Retain to avoid loosing data when deleting this helm chart. + # reclaimPolicy: Retain + + ## Persistent Volumes that are dynamically created by a storage class will have the mount options specified + ## in the mountOptions field of the class. + # mountOptions: {} + + ## Storage classes have parameters that describe volumes belonging to the storage class. + ## Different parameters may be accepted depending on the provisioner. + # parameters: {} + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +## NATS Streaming Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + ## Annotations for Promethues metrics + # prometheus.io/scrape: "true" + # prometheus.io/port: "7777" + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/nats-streaming-server#authorization +## +auth: + enabled: false + # user: nats_client + # password: + # token: + # secretName: "{{ .Release.Name }}-nats-secret" + # secretKey: "client-password" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + +## NATS Streaming svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + # TODO: Uncomment these args once synadia/prometheus-nats-exporter adds NATS Streaming support + # - -serverz + # - -storez + # - -clientz + # - -channelz + +## Sidecars +sidecars: + ## e.g. + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/Chart.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/Chart.yaml new file mode 100644 index 0000000..f8596f4 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/Chart.yaml @@ -0,0 +1,17 @@ +name: nats +version: 2.4.1 +appVersion: 1.3.0 +description: An open-source, cloud-native messaging system +keywords: +- nats +- messaging +- addressing +- discovery +home: https://nats.io/ +sources: +- https://github.com/bitnami/bitnami-docker-nats +maintainers: +- name: Bitnami + email: containers@bitnami.com +engine: gotpl +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/README.md b/qliksense/charts/edge-auth/charts/messaging/nats/README.md new file mode 100644 index 0000000..368d388 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/README.md @@ -0,0 +1,194 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.users` | Client authentication users | `nil` | +| `auth.users[].user` | Client authentication user | `nil` | +| `auth.users[].password` | Client authentication password | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `OnDelete` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `extraArgs` | Optional flags for NATS | `[]` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `client.service.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `client.service.port` | NATS client port | `4222` | +| `client.service.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `client.service.annotations` | Annotations for NATS client service | {} | +| `client.service.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `cluster.service.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `cluster.service.port` | NATS cluster port | `6222` | +| `cluster.service.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `cluster.service.annotations` | Annotations for NATS cluster service | {} | +| `cluster.service.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `cluster.noAdvertise` | Do not advertise known cluster IPs to clients | `false` | +| `monitoring.service.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | +| `metrics.enabled` | Enable Prometheus metrics via exporter side-car | `false` | +| `metrics.image.registry` | Prometheus metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Prometheus metrics exporter image name | `synadia/prometheus-nats-exporter` | +| `metrics.image.tag` | Prometheus metrics exporter image tag | `0.1.0` | +| `metrics.image.pullPolicy` | Prometheus metrics image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Prometheus metrics image pull secrets | `nil` | +| `metrics.port` | Prometheus metrics exporter port | `7777` | +| `metrics.podAnnotations` | Prometheus metrics exporter annotations | `prometheus.io/scrape: "true"`, `prometheus.io/port: "7777"` | +| `metrics.resources` | Prometheus metrics exporter resource requests/limit | {} | +| `sidecars` | Attach additional containers to the pod | `nil` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Sidecars + +If you have a need for additional containers to run within the same pod as NATS (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` + +## Upgrading + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is nats: + +```console +$ kubectl delete statefulset nats-nats --cascade=false +``` diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/NOTES.txt b/qliksense/charts/edge-auth/charts/messaging/nats/templates/NOTES.txt new file mode 100644 index 0000000..224df8f --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/NOTES.txt @@ -0,0 +1,88 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.client.service.type "LoadBalancer") (contains .Values.client.service.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "client.service.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.client.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoring.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoring.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "Monitoring URL: http://$SERVICE_IP/" + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoring.service.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "nats.fullname" . }}-monitoring {{ .Values.monitoring.service.port }}:{{ .Values.monitoring.service.port }} +{{- end }} + +2. Access NATS monitoring by opening the URL obtained in a browser. +{{- end }} + +{{- if .Values.metrics.enabled }} + +3. Get the NATS Prometheus Metrics URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +4. Access NATS Prometheus metrics by opening the URL obtained in a browser. +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/_helpers.tpl b/qliksense/charts/edge-auth/charts/messaging/nats/templates/_helpers.tpl new file mode 100644 index 0000000..ef93757 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Nats image name +*/}} +{{- define "nats.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random alphanumeric password string. +We prepend a random letter to the string to avoid password validation errors +*/}} +{{- define "nats.randomPassword" -}} +{{- randAlpha 1 -}}{{- randAlphaNum 9 -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/client-svc.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/templates/client-svc.yaml new file mode 100644 index 0000000..aacab44 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.client.service.annotations }} + annotations: +{{ toYaml .Values.client.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.client.service.type }} + {{- if and (eq .Values.client.service.type "LoadBalancer") .Values.client.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.client.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.client.service.port }} + targetPort: client + name: client + {{- if and (eq .Values.client.service.type "NodePort") (not (empty .Values.client.service.nodePort)) }} + nodePort: {{ .Values.client.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/cluster-svc.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000..b48c791 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.cluster.service.annotations }} + annotations: +{{ toYaml .Values.cluster.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.cluster.service.type }} + {{- if and (eq .Values.cluster.service.type "LoadBalancer") .Values.cluster.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.cluster.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.cluster.service.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.cluster.service.type "NodePort") (not (empty .Values.cluster.service.nodePort)) }} + nodePort: {{ .Values.cluster.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/configmap.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/templates/configmap.yaml new file mode 100644 index 0000000..072826b --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/configmap.yaml @@ -0,0 +1,114 @@ +{{- $authPwd := default (include "nats.randomPassword" .) .Values.auth.password -}} +{{- $clusterAuthPwd := default (include "nats.randomPassword" .) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.client.service.port }} + http: 0.0.0.0:{{ .Values.monitoring.service.port }} + + {{- if .Values.clientAdvertise }} + client_advertise: {{ tpl (.Values.clientAdvertise) . }} + {{- end }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + + {{- if .Values.auth.users }} + users: [ + {{- range .Values.auth.users }} + { + user: {{ .user | quote }}, + {{- if .password }} + password: {{ .password | quote }}, + {{- end }} + {{- if .permissions }} + permissions: {{ toJson .permissions | replace "\\u003e" ">"}} + {{- end }} + } + {{- end }} + {{- if .Values.auth.monitor.enabled }} + { + user: {{ .Values.auth.monitor.user | quote }}, + password: {{ .Values.auth.monitor.password | quote }}, + permissions: {{ toJson .Values.auth.monitor.permissions | replace "\\u003e" ">"}} + } + {{- end }} + ] + {{- end }} + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.cluster.service.port }} + + no_advertise: {{ .Values.cluster.noAdvertise }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + ] + } + users.json: {{ if .Values.auth.enabled }}{{ toJson .Values.auth.jwtUsers | quote }}{{else}}"[]"{{ end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/headless-svc.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/templates/headless-svc.yaml new file mode 100644 index 0000000..d340551 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/headless-svc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-headless + labels: + app: {{ template "nats.name" . }} + chart: {{ template "nats.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: client + port: 4222 + targetPort: client + - name: cluster + port: 6222 + targetPort: cluster + selector: + app: {{ template "nats.name" . }} + release: {{ .Release.Name | quote }} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/ingress.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/templates/ingress.yaml new file mode 100644 index 0000000..eec9749 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/monitoring-svc.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000..f8f4081 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/networkpolicy.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000..12032f8 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.client.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.cluster.service.port }} + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/statefulset.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/templates/statefulset.yaml new file mode 100644 index 0000000..3c7d71a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/statefulset.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + serviceName: {{ template "nats.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} +{{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: + checksum/secrets: {{ toYaml .Values.auth.users | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - qnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + {{- range .Values.extraArgs }} + - {{ tpl (.) $ }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.client.service.port }} + - name: cluster + containerPort: {{ .Values.cluster.service.port }} + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: + {{- range .Values.metrics.args }} + - {{ . }} + {{- end }} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/templates/tls-secret.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/templates/tls-secret.yaml new file mode 100644 index 0000000..5acf441 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/nats/values.yaml b/qliksense/charts/edge-auth/charts/messaging/nats/values.yaml new file mode 100644 index 0000000..9200bff --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/nats/values.yaml @@ -0,0 +1,306 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.3.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: false + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Client URL to advertise to other servers +## +# clientAdvertise: + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +client: + service: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +cluster: + service: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: + + ## Do not advertise known cluster IPs to clients + ## + noAdvertise: false + +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: + +# Optional additional arguments +extraArgs: [] + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +sidecars: +## Add sidecars to the pod. +## e.g. +# - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/edge-auth/charts/messaging/requirements.yaml b/qliksense/charts/edge-auth/charts/messaging/requirements.yaml new file mode 100644 index 0000000..87ba686 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/requirements.yaml @@ -0,0 +1,19 @@ +dependencies: + - name: message-delivery-monitor + version: 0.1.0 + repository: "file://./message-delivery-monitor" + # message-delivery-monitor.monitor.enabled is used by services that depend on the messaging chart to enable or disable nats + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.message-delivery-monitor.enabled,message-delivery-monitor.enabled + - name: nats + version: 2.4.1 + repository: "file://./nats" + # messaging.nats.enabled is used by services that depend on the messaging chart to enable or disable nats + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.nats.enabled,nats.enabled + - name: nats-streaming + version: 0.4.0 + repository: "file://./nats-streaming" + # messaging.nats-streaming.enabled is used by services that depend on the messaging chart to enable or disable nats streaming + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.nats-streaming.enabled,nats-streaming.enabled diff --git a/qliksense/charts/edge-auth/charts/messaging/templates/_helper.tpl b/qliksense/charts/edge-auth/charts/messaging/templates/_helper.tpl new file mode 100644 index 0000000..d03e4d7 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/templates/_helper.tpl @@ -0,0 +1,38 @@ +{{- define "messaging.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "messaging.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "messaging.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.name" -}} +{{- "nats" -}} +{{- end -}} + +{{- define "nats.fullname" -}} +{{- $name := "nats" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming.name" -}} +{{- "nats-streaming" -}} +{{- end -}} + +{{- define "nats-streaming.fullname" -}} +{{- $name := "nats-streaming" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/messaging/templates/message-delivery-monitor-secret.yaml b/qliksense/charts/edge-auth/charts/messaging/templates/message-delivery-monitor-secret.yaml new file mode 100644 index 0000000..f17e47d --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/templates/message-delivery-monitor-secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ .Release.Name }}-message-delivery-monitor-secret +data: + {{ if .Values.nats.auth.monitor.enabled }} + client-user: {{ print .Values.nats.auth.monitor.user | b64enc }} + client-password: {{ print .Values.nats.auth.monitor.password | b64enc }} + {{- end -}} diff --git a/qliksense/charts/edge-auth/charts/messaging/templates/nats-secret.yaml b/qliksense/charts/edge-auth/charts/messaging/templates/nats-secret.yaml new file mode 100644 index 0000000..92ffbe4 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/templates/nats-secret.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ .Release.Name }}-nats-secret +data: + {{ if .Values.nats.auth.enabled }} + {{ if .Values.nats.auth.user }} + client-user: {{ print .Values.nats.auth.user | b64enc }} + client-password: {{ print .Values.nats.auth.password | b64enc }} + {{ else if .Values.nats.auth.users }} + client-user: {{ print (index .Values.nats.auth.users 0).user | b64enc }} + client-password: {{ print (index .Values.nats.auth.users 0).password | b64enc }} + {{- end -}} + {{- end -}} diff --git a/qliksense/charts/edge-auth/charts/messaging/templates/networkpolicy-nats-streaming.yaml b/qliksense/charts/edge-auth/charts/messaging/templates/networkpolicy-nats-streaming.yaml new file mode 100644 index 0000000..cd855c0 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/templates/networkpolicy-nats-streaming.yaml @@ -0,0 +1,51 @@ +{{- if and (index .Values "nats-streaming" "enabled") (index .Values "networkPolicy" "nats-streaming" "enabled") }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "{{ template "nats-streaming.fullname" . }}" + labels: + chart: "{{ template "messaging.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + - ports: + - port: {{ index .Values "nats-streaming" "monitoring" "service" "port" }} + from: + - podSelector: + matchLabels: + {{ template "nats-streaming.fullname" . }}-admin: "true" + - ports: + - port: {{ index .Values "nats-streaming" "metrics" "port" }} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/templates/networkpolicy-nats.yaml b/qliksense/charts/edge-auth/charts/messaging/templates/networkpolicy-nats.yaml new file mode 100644 index 0000000..df645c6 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/templates/networkpolicy-nats.yaml @@ -0,0 +1,51 @@ +{{- if and (.Values.nats.enabled) (.Values.networkPolicy.nats.enabled) }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "{{ template "nats.fullname" . }}" + labels: + chart: "{{ template "messaging.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + - ports: + - port: {{ .Values.nats.client.service.port }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + - ports: + - port: {{ .Values.nats.metrics.port }} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "keys" + release: {{ tpl ( .Values.networkPolicy.keys.release ) . | quote }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/messaging/values.yaml b/qliksense/charts/edge-auth/charts/messaging/values.yaml new file mode 100644 index 0000000..3cf8fbd --- /dev/null +++ b/qliksense/charts/edge-auth/charts/messaging/values.yaml @@ -0,0 +1,474 @@ +## Default values for the messaging Helm chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## NATS configuration +## +nats: + ## Enables NATS chart by default + enabled: true + + securityContext: + enabled: false + + ## Image pull policy for NATS chart + image: + registry: ghcr.io + repository: qlik-download/qnatsd + tag: 0.3.1 + pullPolicy: IfNotPresent + pullSecrets: + - name: artifactory-docker-secret + + ## Number of NATS nodes + replicaCount: 1 + + ## NATS statefulset configurations + statefulset: + updateStrategy: RollingUpdate + + ## NATS svc used for client connections + ## ref: https://github.com/nats-io/gnatsd#running + ## + client: + service: + type: ClusterIP + port: 4222 + + clientAdvertise: "{{.Release.Name}}-nats-client:4222" + + ## Kubernetes svc used for clustering + ## ref: https://github.com/nats-io/gnatsd#clustering + ## + cluster: + service: + type: ClusterIP + port: 6222 + # noAdvertise: false + + ## NATS svc used for monitoring + ## ref: https://github.com/nats-io/gnatsd#monitoring + ## + monitoring: + service: + type: ClusterIP + port: 8222 + ## Annotations for Promethues + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + + ## Client Authentication + auth: + enabled: true + + monitor: + enabled: false + user: "delivery-monitor" + password: password + permissions: + publish: + - "monitor-channel" + - "_STAN.pub.*.monitor-channel" + - "_STAN.discover.>" + - "_STAN.close.>" + - "_STAN.discover.*.ping" + - "_STAN.sub.>" + - "_STAN.unsub.>" + - "_STAN.subclose.>" + - "_INBOX.>" + subscribe: + - "monitor-channel" + - "_STAN.acks.>" + - "_INBOX.>" + + users: + - user: "nats_client" + password: T0pS3cr3t + + ## Configuration of users that are authenticated used JWTs + ## Users can be configured with permissions to allow or deny publish/subscribe access to subjects + ## ref: https://nats.io/documentation/managing_the_server/authorization/ + ## + jwtUsers: + - user: "audit" + stanPermissions: + subscribe: + - "system-events.odag.request" + - "system-events.engine.app" + - "system-events.user-session" + - "system-events.spaces" + - "system-events.licenses" + - "system-events.generic-links" + - "system-events.api-keys" + - "system-events.user-identity" + - "system-events.web-security" + - user: "chronos-worker" + stanPermissions: + publish: + - "chronos-worker.>" + - user: "data-engineering-exporter" + stanPermissions: + subscribe: + - "system-events.>" + - user: "edge-auth" + stanPermissions: + publish: + - "system-events.user-session" + - "system-events.user-identity" + subscribe: + - "system-events.users" + - "system-events.user-session" + - "system-events.identity-providers" + - "private.idp-sync" + - user: "engine" + stanPermissions: + publish: + - "com.qlik.app" + - "com.qlik.engine.session" + - "system-events.engine.app" + - "system-events.engine.session" + - user: "identity-providers" + stanPermissions: + publish: + - "private.idp-sync" + - "system-events.identity-providers" + - user: "invite" + stanPermissions: + subscribe: + - "system-events.users" + publish: + - "system-events.invite" + - user: "nl-parser" + stanPermissions: + subscribe: + - "system-events.engine.app" + - user: "odag" + stanPermissions: + publish: + - "system-events.odag.request" + - "odag.>" + subscribe: + - "odag.>" + - "system-events.engine.app" + - "system-events.reloadResults" + - user: "qix-data-reload" + stanPermissions: + publish: + - "reload" + - "system-events.reloadResults" + subscribe: + - "reload" + - user: "resource-library" + stanPermissions: + publish: + - "system-events.resource-library" + - user: "tenants" + stanPermissions: + publish: + - "system-events.tenants" + - "system-events.web-integrations" + - user: "users" + stanPermissions: + publish: + - "system-events.users" + - user: "api-keys" + stanPermissions: + publish: + - "system-events.api-keys" + - user: "collections" + stanPermissions: + publish: + - "system-events.items" + subscribe: + - "system-events.engine.app" + - user: "licenses" + stanPermissions: + publish: + - "system-events.licenses" + subscribe: + - "system-events.licenses" + - user: "spaces" + stanPermissions: + publish: + - "system-events.spaces" + subscribe: + - "system-events.tenants" + - user: "precedents" + stanPermissions: + subscribe: + - "system-events.engine.app" + - user: "eventing" + stanPermissions: + publish: + - "system-events.notification-request" + subscribe: + - "system-events.engine.app" + - user: "qix-sessions" + stanPermissions: + subscribe: + - "system-events.engine.app" + - "system-events.reloadResults" + - user: "qix-datafiles" + stanPermissions: + subscribe: + - "system-events.engine.app" + - "system-events.spaces" + - user: "sharing" + stanPermissions: + subscribe: + - "system-events.engine.app" + publish: + - "system-events.notification-request" + - "system-events.sharing" + - user: "subscriptions" + stanPermissions: + subscribe: + - "system-events.engine.app" + - user: "notification-prep" + stanPermissions: + subscribe: + - "system-events.notification-request" + publish: + - "system-events.transport-request" + - user: "web-notifications" + stanPermissions: + subscribe: + - "system-events.transport-request" + publish: + - "system-events.web-notifications" + - user: "generic-links" + stanPermissions: + publish: + - "system-events.generic-links" + - user: "data-connections" + stanPermissions: + subscribe: + - "system-events.spaces" + - user: "transport" + stanPermissions: + subscribe: + - "system-events.transport-request" + publish: + - "system-events.transport-response" + - user: "web-security" + stanPermissions: + publish: + - "system-events.web-security" + - user: "reload-tasks" + stanPermissions: + subscribe: + - "system-events.engine.app" + - user: "reporting" + stanPermissions: + publish: + - "reporting.>" + + extraArgs: + - --jwt_users_file=/opt/bitnami/nats/users.json + - --jwt_auth_url=http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal + ## for localdev use this configuration instead + # - --jwt_auth_url=http://keys:8080/v1/keys/qlik.api.internal + + ## Cluster Authentication + clusterAuth: + enabled: false + + ## Metrics / Prometheus NATS Exporter + ## + ## ref: https://github.com/nats-io/prometheus-nats-exporter + metrics: + enabled: true + image: + registry: ghcr.io + repository: qlik-download/prometheus-nats-exporter + tag: 0.3.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-docker-secret + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "false" # prometheus annotations are added on the monitoring service instead + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +## NATS Streaming configuration +## +nats-streaming: + enabled: true + + securityContext: + enabled: false + + ## NATS Streaming image + image: + registry: ghcr.io + repository: qlik-download/nats-streaming + tag: 0.14.2 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-docker-secret + + ## NATS Streaming replicas + replicaCount: 3 + + ## NATS Streaming statefulset configurations + # statefulset: + # updateStrategy: RollingUpdate + + ## NATS Streaming extra options for liveness and readiness probes + readinessProbe: + enabled: true + initialDelaySeconds: 30 + + ## NATS Streaming svc used for monitoring + ## + monitoring: + service: + type: ClusterIP + port: 8222 + # Annotations for Promethues + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + + ## NATS Streaming cluster id + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + + ## NATS server + natsSvc: "nats://{{ .Release.Name }}-nats-client:4222" + + ## NATS server client Authentication + auth: + enabled: true + secretName: "{{ .Release.Name }}-nats-secret" + secretClientUser: "client-user" + secretClientPassword: "client-password" + + ## Use for general debugging. Enabling this will negatively affect performance. + debug: true + + # Interval at which server sends heartbeat to a client + hbInterval: 10s + + # How long server waits for a heartbeat response + hbTimeout: 10s + + # Number of failed heartbeats before server closes the client connection + hbFailCount: 5 + + # Run NATS Streaming in clustered mode (incompatible with ftGroup value) + # https://github.com/nats-io/nats-streaming-server#clustering + clustered: true + + # Run NATS Streaming in fault tolerance mode with this group name (incompatible with clustered value) + # https://github.com/nats-io/nats-streaming-server#fault-tolerance + # ftGroup: "myGroupName" + + persistence: + ## If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + internalStorageClass: + ## Normally the storage class should be created outside this helm chart + ## If we want to deploy a storage class as part of the helm chart + ## - Provide a storageClassName above. + ## - set enabled true + ## - provide a storage class definition. + + ## If enabled storage class will be configured as part of the chart. + ## If not enabled an external storageclass can be used by providing storageClassName above. + enabled: false + + ## Storageclass definition + definition: {} + ## Storage classes have a provisioner that determines what volume plugin is used for provisioning PVs. + ## This field must be specified. + ## See https://kubernetes.io/docs/concepts/storage/storage-classes/ + # provisioner: kubernetes.io/no-provisioner + + ## Reclaim policy should normally be set to Retain to avoid loosing data when deleting this helm chart. + # reclaimPolicy: Retain + + ## Persistent Volumes that are dynamically created by a storage class will have the mount options specified + ## in the mountOptions field of the class. + # mountOptions: {} + + ## Storage classes have parameters that describe volumes belonging to the storage class. + ## Different parameters may be accepted depending on the provisioner. + # parameters: {} + maxAge: "2h" + + ## Metrics / Prometheus NATS Exporter + ## + ## ref: https://github.com/nats-io/prometheus-nats-exporter + metrics: + enabled: true + image: + registry: ghcr.io + repository: qlik-download/prometheus-nats-exporter + tag: 0.3.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-docker-secret + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "false" # prometheus annotations are added on the monitoring service instead + ## Metrics exporter flags + args: + - -channelz + - -serverz + + +## NATS and NATS Streaming Network Policy +## +networkPolicy: + ## NATS + nats: + enabled: false + ## NATS Streaminng + nats-streaming: + enabled: false + ## Keys + keys: + ## Set keys release name for egress rules + release: "{{ .Release.Name }}" + +message-delivery-monitor: + enabled: false + + nats: + auth: + enabled: true + secretName: "{{ .Release.Name }}-message-delivery-monitor-secret" + secretClientUser: "client-user" + secretClientPassword: "client-password" diff --git a/qliksense/charts/edge-auth/charts/mongodb/.helmignore b/qliksense/charts/edge-auth/charts/mongodb/.helmignore new file mode 100644 index 0000000..6b8710a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/.helmignore @@ -0,0 +1 @@ +.git diff --git a/qliksense/charts/edge-auth/charts/mongodb/Chart.yaml b/qliksense/charts/edge-auth/charts/mongodb/Chart.yaml new file mode 100644 index 0000000..cc8038a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 4.0.3 +description: NoSQL document-oriented database that stores JSON-like documents with + dynamic schemas, simplifying the integration of data in content-driven applications. +home: https://mongodb.org +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb +sources: +- https://github.com/bitnami/bitnami-docker-mongodb +version: 4.5.0 diff --git a/qliksense/charts/edge-auth/charts/mongodb/OWNERS b/qliksense/charts/edge-auth/charts/mongodb/OWNERS new file mode 100644 index 0000000..2c3e9fa --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/OWNERS @@ -0,0 +1,12 @@ +approvers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 +reviewers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 diff --git a/qliksense/charts/edge-auth/charts/mongodb/README.md b/qliksense/charts/edge-auth/charts/mongodb/README.md new file mode 100644 index 0000000..1b9d003 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/README.md @@ -0,0 +1,158 @@ +# MongoDB + +[MongoDB](https://www.mongodb.com/) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +## TL;DR; + +```bash +$ helm install stable/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/mongodb +``` + +The command deploys MongoDB on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the MongoDB chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------|----------------------------------------------------------------------------------------------|----------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | MongoDB image registry | `docker.io` | +| `image.repository` | MongoDB Image name | `bitnami/mongodb` | +| `image.tag` | MongoDB Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `usePassword` | Enable password authentication | `true` | +| `existingSecret` | Existing secret with MongoDB credentials | `nil` | +| `mongodbRootPassword` | MongoDB admin password | `random alhpanumeric string (10)` | +| `mongodbUsername` | MongoDB custom user | `nil` | +| `mongodbPassword` | MongoDB custom user password | `random alhpanumeric string (10)` | +| `mongodbDatabase` | Database to create | `nil` | +| `mongodbEnableIPv6` | Switch to enable/disable IPv6 on MongoDB | `true` | +| `mongodbExtraFlags` | MongoDB additional command line flags | [] | +| `service.annotations` | Kubernetes service annotations | `{}` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.nodePort` | Port to bind to for NodePort service type | `nil` | +| `port` | MongoDB service port | `27017` | +| `replicaSet.enabled` | Switch to enable/disable replica set configuration | `false` | +| `replicaSet.name` | Name of the replica set | `rs0` | +| `replicaSet.useHostnames` | Enable DNS hostnames in the replica set config | `true` | +| `replicaSet.key` | Key used for authentication in the replica set | `nil` | +| `replicaSet.replicas.secondary` | Number of secondary nodes in the replica set | `1` | +| `replicaSet.replicas.arbiter` | Number of arbiter nodes in the replica set | `1` | +| `replicaSet.pdb.minAvailable.primary` | PDB for the MongoDB Primary nodes | `1` | +| `replicaSet.pdb.minAvailable.secondary` | PDB for the MongoDB Secondary nodes | `1` | +| `replicaSet.pdb.minAvailable.arbiter` | PDB for the MongoDB Arbiter nodes | `1` | +| `podAnnotations` | Annotations to be added to pods | {} | +| `resources` | Pod resources | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | {} | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.annotations` | Persistent Volume annotations | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (avoids creating one if this is given) | `nil` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `configmap` | MongoDB configuration file to be used | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set mongodbRootPassword=secretpassword,mongodbUsername=my-user,mongodbPassword=my-password,mongodbDatabase=my-database \ + stable/mongodb +``` + +The above command sets the MongoDB `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/mongodb +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Replication + +You can start the MongoDB chart in replica set mode with the following command: + +```bash +$ helm install --name my-release stable/mongodb --set replication.enabled=true +``` +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available MongoDB deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/mongodb/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/mongodb +``` + +To horizontally scale this chart, run the following command to scale the number of secondary nodes in your MongoDB replica set. + +```console +$ kubectl scale statefulset my-release-mongodb-secondary --replicas=3 +``` + +Some characteristics of this chart are: + +* Each of the participants in the replication has a fixed stateful set so you always know where to find the primary, secondary or arbiter nodes. +* The number of secondary and arbiter nodes can be scaled out independently. +* Easy to move an application from using a standalone MongoDB server to use a replica set. + +## Initialize a fresh instance + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +The allowed extensions are `.sh`, and `.js`. + +## Persistence + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. diff --git a/qliksense/charts/edge-auth/charts/mongodb/files/docker-entrypoint-initdb.d/README.md b/qliksense/charts/edge-auth/charts/mongodb/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..a929990 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom .sh, or .js file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-mongodb](https://github.com/bitnami/bitnami-docker-mongodb#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/NOTES.txt b/qliksense/charts/edge-auth/charts/mongodb/templates/NOTES.txt new file mode 100644 index 0000000..af81001 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/NOTES.txt @@ -0,0 +1,66 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.mongodbRootPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer" and not specifying "mongodbRootPassword" + you have most likely exposed the MongoDB service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "mongodbRootPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB can be accessed via port 27017 on the following DNS name from within your cluster: + + {{ template "mongodb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.usePassword -}} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- end }} +{{- if and .Values.mongodbUsername .Values.mongodbDatabase }} +{{- if .Values.mongodbPassword }} + +To get the password for "{{ .Values.mongodbUsername }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} +{{- end }} + +To connect to your database run the following command: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --image bitnami/mongodb --command -- mongo admin --host {{ template "mongodb.fullname" . }} {{- if .Values.usePassword }} -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.fullname" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "mongodb.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + mongo --host $SERVICE_IP --port {{ .Values.service.nodePort }} {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "mongodb.fullname" . }} 27017:27017 & + mongo --host 127.0.0.1 {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/_helpers.tpl b/qliksense/charts/edge-auth/charts/mongodb/templates/_helpers.tpl new file mode 100644 index 0000000..855dc29 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "mongodb.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name for the admin secret. +*/}} +{{- define "mongodb.adminSecret" -}} + {{- if .Values.auth.existingAdminSecret -}} + {{- .Values.auth.existingAdminSecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-admin + {{- end -}} +{{- end -}} + +{{/* +Create the name for the key secret. +*/}} +{{- define "mongodb.keySecret" -}} + {{- if .Values.auth.existingKeySecret -}} + {{- .Values.auth.existingKeySecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-keyfile + {{- end -}} +{{- end -}} + +{{/* +Return the proper MongoDB image name +*/}} +{{- define "mongodb.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/configmap.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/configmap.yaml new file mode 100644 index 0000000..66dc853 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +data: + mongodb.conf: |- +{{ toYaml .Values.configmap | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/deployment-standalone.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/deployment-standalone.yaml new file mode 100644 index 0000000..d8ff01b --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/deployment-standalone.yaml @@ -0,0 +1,143 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + chart: {{ template "mongodb.chart" . }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.fullname" . }} + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + {{- if .Values.usePassword }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + {{- end }} + - name: MONGODB_USERNAME + value: {{ default "" .Values.mongodbUsername | quote }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_DATABASE + value: {{ default "" .Values.mongodbDatabase | quote }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + ports: + - name: mongodb + containerPort: 27017 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + + {{- else }} + emptyDir: {} + {{- end -}} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- end -}} diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/headless-svc-rs.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/headless-svc-rs.yaml new file mode 100644 index 0000000..29fcf34 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/headless-svc-rs.yaml @@ -0,0 +1,24 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }}-headless + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: {{ .Values.service.port }} + selector: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/initialization-configmap.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/initialization-configmap.yaml new file mode 100644 index 0000000..840e77c --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/initialization-configmap.yaml @@ -0,0 +1,13 @@ +{{ if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mongodb.fullname" . }}-init-scripts + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]").AsConfig | indent 2 }} +{{ end }} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml new file mode 100644 index 0000000..eb7f14a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: arbiter + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.arbiter }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml new file mode 100644 index 0000000..6434e3f --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: primary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.primary }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml new file mode 100644 index 0000000..03f317d --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: secondary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.secondary }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/pvc-standalone.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/pvc-standalone.yaml new file mode 100644 index 0000000..8182ce7 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/pvc-standalone.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.replicaSet.enabled) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/secrets.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/secrets.yaml new file mode 100644 index 0000000..ecbf1eb --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/secrets.yaml @@ -0,0 +1,34 @@ +{{ if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{- if .Values.usePassword }} + {{- if .Values.mongodbRootPassword }} + mongodb-root-password: {{ .Values.mongodbRootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + {{- if .Values.mongodbPassword }} + mongodb-password: {{ .Values.mongodbPassword | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if .Values.replicaSet.enabled }} + {{- if .Values.replicaSet.key }} + mongodb-replica-set-key: {{ .Values.replicaSet.key | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-arbiter-rs.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-arbiter-rs.yaml new file mode 100644 index 0000000..4ed30a1 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-arbiter-rs.yaml @@ -0,0 +1,121 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: arbiter + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.arbiter }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: arbiter + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-arbiter + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-primary-rs.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-primary-rs.yaml new file mode 100644 index 0000000..8dcb004 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-primary-rs.yaml @@ -0,0 +1,174 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: 1 + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-primary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "primary" + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + - name: MONGODB_USERNAME + value: {{ .Values.mongodbUsername | quote }} + - name: MONGODB_DATABASE + value: {{ .Values.mongodbDatabase | quote }} + {{- if .Values.usePassword }} + {{- if .Values.mongodbPassword }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-secondary-rs.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-secondary-rs.yaml new file mode 100644 index 0000000..d4c4a97 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/statefulset-secondary-rs.yaml @@ -0,0 +1,157 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: secondary + podManagementPolicy: "Parallel" + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.secondary }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: secondary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-secondary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "secondary" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/svc-primary-rs.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/svc-primary-rs.yaml new file mode 100644 index 0000000..fd440c8 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/svc-primary-rs.yaml @@ -0,0 +1,28 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + component: primary +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/mongodb/templates/svc-standalone.yaml b/qliksense/charts/edge-auth/charts/mongodb/templates/svc-standalone.yaml new file mode 100644 index 0000000..4ca9443 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/templates/svc-standalone.yaml @@ -0,0 +1,27 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/mongodb/values-production.yaml b/qliksense/charts/edge-auth/charts/mongodb/values-production.yaml new file mode 100644 index 0000000..9070f3b --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/values-production.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: true + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# replication: +# replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/edge-auth/charts/mongodb/values.yaml b/qliksense/charts/edge-auth/charts/mongodb/values.yaml new file mode 100644 index 0000000..4b090d4 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/mongodb/values.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# #replication: +# # replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/.helmignore b/qliksense/charts/edge-auth/charts/nginx-ingress/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/Chart.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/Chart.yaml new file mode 100644 index 0000000..2ead610 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +appVersion: 0.11.0 +description: An nginx Ingress controller that uses ConfigMap to store the nginx configuration. +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +maintainers: +- email: jack.zampolin@gmail.com + name: jackzampolin +- email: mgoodness@gmail.com + name: mgoodness +- email: chance.zibolski@coreos.com + name: chancez +name: nginx-ingress +sources: +- https://github.com/kubernetes/ingress-nginx +version: 0.11.3 diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/README.md b/qliksense/charts/edge-auth/charts/nginx-ingress/README.md new file mode 100644 index 0000000..743af6b --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/README.md @@ -0,0 +1,187 @@ +# nginx-ingress + +[nginx-ingress](https://github.com/kubernetes/ingress-nginx) is an Ingress controller that uses ConfigMap to store the nginx configuration. + +To use, add the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +## TL;DR; + +```console +$ helm install stable/nginx-ingress +``` + +## Introduction + +This chart bootstraps an nginx-ingress deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + - Kubernetes 1.4+ with Beta APIs enabled + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/nginx-ingress +``` + +The command deploys nginx-ingress on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the nginx-ingress chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`controller.name` | name of the controller component | `controller` +`controller.image.repository` | controller container image repository | `quay.io/kubernetes-ingress-controller/nginx-ingress-controller` +`controller.image.tag` | controller container image tag | `0.11.0` +`controller.image.pullPolicy` | controller container image pull policy | `IfNotPresent` +`controller.config` | nginx ConfigMap entries | none +`controller.hostNetwork` | If the nginx deployment / daemonset should run on the host's network namespace. Do not set this when `controller.service.externalIPs` is set and `kube-proxy` is used as there will be a port-conflict for port `80` | false +`controller.defaultBackendService` | default 404 backend service; required only if `defaultBackend.enabled = false` | `""` +`controller.electionID` | election ID to use for the status update | `ingress-controller-leader` +`controller.extraEnvs` | any additional environment variables to set in the pods | `{}` +`controller.ingressClass` | name of the ingress class to route through this controller | `nginx` +`controller.scope.enabled` | limit the scope of the ingress controller | `false` (watch all namespaces) +`controller.scope.namespace` | namespace to watch for ingress | `""` (use the release namespace) +`controller.extraArgs` | Additional controller container arguments | `{}` +`controller.kind` | install as Deployment or DaemonSet | `Deployment` +`controller.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`controller.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`controller.minReadySeconds` | how many seconds a pod needs to be ready before killing the next, during update | `0` +`controller.nodeSelector` | node labels for pod assignment | `{}` +`controller.podAnnotations` | annotations to be added to pods | `{}` +`controller.replicaCount` | desired number of controller pods | `1` +`controller.minAvailable` | minimum number of available controller pods for PodDisruptionBudget | `1` +`controller.resources` | controller pod resource requests & limits | `{}` +`controller.lifecycle` | controller pod lifecycle hooks | `{}` +`controller.service.annotations` | annotations for controller service | `{}` +`controller.publishService.enabled` | if true, the controller will set the endpoint records on the ingress objects to reflect those on the service | `false` +`controller.publishService.pathOverride` | override of the default publish-service name | `""` +`controller.service.clusterIP` | internal controller cluster service IP | `""` +`controller.service.externalIPs` | controller service external IP addresses. Do not set this when `controller.hostNetwork` is set to `true` and `kube-proxy` is used as there will be a port-conflict for port `80` | `[]` +`controller.service.externalTrafficPolicy` | If `controller.service.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable [source IP preservation](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport) | `"Cluster"` +`controller.service.healthCheckNodePort` | If `controller.service.type` is `NodePort` or `LoadBalancer` and `controller.service.externalTrafficPolicy` is set to `Local`, set this to [the managed health-check port the kube-proxy will expose](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport). If blank, a random port in the `NodePort` range will be assigned | `""` +`controller.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.service.targetPorts.http` | Sets the targetPort that maps to the Ingress' port 80 | `80` +`controller.service.targetPorts.https` | Sets the targetPort that maps to the Ingress' port 443 | `443` +`controller.service.type` | type of controller service to create | `LoadBalancer` +`controller.service.nodePorts.http` | If `controller.service.type` is `NodePort` and this is non-empty, it sets the nodePort that maps to the Ingress' port 80 | `""` +`controller.service.nodePorts.https` | If `controller.service.type` is `NodePort` and this is non-empty, it sets the nodePort that maps to the Ingress' port 443 | `""` +`controller.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 10 +`controller.livenessProbe.periodSeconds` | How often to perform the probe | 10 +`controller.livenessProbe.timeoutSeconds` | When the probe times out | 5 +`controller.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`controller.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`controller.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 10 +`controller.readinessProbe.periodSeconds` | How often to perform the probe | 10 +`controller.readinessProbe.timeoutSeconds` | When the probe times out | 1 +`controller.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`controller.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`controller.stats.enabled` | if `true`, enable "vts-status" page | `false` +`controller.stats.service.annotations` | annotations for controller stats service | `{}` +`controller.stats.service.clusterIP` | internal controller stats cluster service IP | `""` +`controller.stats.service.externalIPs` | controller service stats external IP addresses | `[]` +`controller.stats.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.stats.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.stats.service.type` | type of controller stats service to create | `ClusterIP` +`controller.metrics.enabled` | if `true`, enable Prometheus metrics (`controller.stats.enabled` must be `true` as well) | `false` +`controller.metrics.service.annotations` | annotations for Prometheus metrics service | `{}` +`controller.metrics.service.clusterIP` | cluster IP address to assign to service | `""` +`controller.metrics.service.externalIPs` | Prometheus metrics service external IP addresses | `[]` +`controller.metrics.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.metrics.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.metrics.service.servicePort` | Prometheus metrics service port | `9913` +`controller.metrics.service.targetPort` | Prometheus metrics target port | `10254` +`controller.metrics.service.type` | type of Prometheus metrics service to create | `ClusterIP` +`controller.customTemplate.configMapName` | configMap containing a custom nginx template | `""` +`controller.customTemplate.configMapKey` | configMap key containing the nginx template | `""` +`controller.headers` | configMap key:value pairs containing the [custom headers](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers) for Nginx | `{}` +`controller.updateStrategy` | allows setting of RollingUpdate strategy | `{}` +`defaultBackend.name` | name of the default backend component | `default-backend` +`defaultBackend.image.repository` | default backend container image repository | `k8s.gcr.io/defaultbackend` +`defaultBackend.image.tag` | default backend container image tag | `1.3` +`defaultBackend.image.pullPolicy` | default backend container image pull policy | `IfNotPresent` +`defaultBackend.extraArgs` | Additional default backend container arguments | `{}` +`defaultBackend.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`defaultBackend.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`defaultBackend.nodeSelector` | node labels for pod assignment | `{}` +`defaultBackend.podAnnotations` | annotations to be added to pods | `{}` +`defaultBackend.replicaCount` | desired number of default backend pods | `1` +`defaultBackend.minAvailable` | minimum number of available default backend pods for PodDisruptionBudget | `1` +`defaultBackend.resources` | default backend pod resource requests & limits | `{}` +`defaultBackend.service.annotations` | annotations for default backend service | `{}` +`defaultBackend.service.clusterIP` | internal default backend cluster service IP | `""` +`defaultBackend.service.externalIPs` | default backend service external IP addresses | `[]` +`defaultBackend.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`defaultBackend.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`defaultBackend.service.type` | type of default backend service to create | `ClusterIP` +`rbac.create` | If true, create & use RBAC resources | `false` +`rbac.serviceAccountName` | ServiceAccount to be used (ignored if rbac.create=true) | `default` +`revisionHistoryLimit` | The number of old history to retain to allow rollback. | `10` +`tcp` | TCP service key:value pairs | `{}` +`udp` | UDP service key:value pairs | `{}` + +```console +$ helm install stable/nginx-ingress --name my-release \ + --set controller.stats.enabled=true +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install stable/nginx-ingress --name my-release -f values.yaml +``` + +A useful trick to debug issues with ingress is to increase the logLevel +as described [here](https://github.com/kubernetes/ingress-nginx/blob/master/docs/troubleshooting.md#debug) + +```console +$ helm install stable/nginx-ingress --set controller.extraArgs.v=2 +``` + +## Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics. In order for this to work, the VTS dashboard must be enabled as well. + +```console +$ helm install stable/nginx-ingress --name my-release \ + --set controller.stats.enabled=true \ + --set controller.metrics.enabled=true +``` + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. Alternatively, if you use the Prometheus Operator, you need to create a ServiceMonitor as follows: + +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: nginx-ingress-service-monitor +spec: + jobLabel: nginx-ingress + selector: + matchLabels: + app: nginx-ingress + release: + namespaceSelector: + matchNames: + - + endpoints: + - port: metrics + interval: 30s +``` +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/NOTES.txt b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/NOTES.txt new file mode 100644 index 0000000..2b73f87 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/NOTES.txt @@ -0,0 +1,64 @@ +The nginx-ingress controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ template "nginx-ingress.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ template "nginx-ingress.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ template "nginx-ingress.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "nginx-ingress.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + name: example + namespace: foo + spec: + rules: + - host: www.example.com + http: + paths: + - backend: + serviceName: exampleService + servicePort: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/_helpers.tpl b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/_helpers.tpl new file mode 100644 index 0000000..606f5f1 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/_helpers.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nginx-ingress.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.controller.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "nginx-ingress.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" .Release.Namespace (include "nginx-ingress.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.defaultBackend.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/clusterrole.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/clusterrole.yaml new file mode 100644 index 0000000..a1571fe --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/clusterrole.yaml @@ -0,0 +1,69 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + resources: + - ingresses/status + verbs: + - update +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/clusterrolebinding.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..d1487af --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "nginx-ingress.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-configmap.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-configmap.yaml new file mode 100644 index 0000000..ef9372f --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-configmap.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.controller.fullname" . }} +data: + enable-vts-status: "{{ .Values.controller.stats.enabled }}" +{{- if .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-custom-headers +{{- end }} +{{- if .Values.controller.config }} +{{ toYaml .Values.controller.config | indent 2 }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-daemonset.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-daemonset.yaml new file mode 100644 index 0000000..e0c2c2c --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-daemonset.yaml @@ -0,0 +1,174 @@ +{{- if eq .Values.controller.kind "DaemonSet" }} +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + updateStrategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/controller-configmap.yaml") . | sha256sum }} + {{- if .Values.controller.podAnnotations }} +{{ toYaml .Values.controller.podAnnotations | indent 8}} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.controller.name }}" + release: {{ .Release.Name }} + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8}} + {{- end }} + spec: + dnsPolicy: {{ .Values.controller.dnsPolicy }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.controller.name }} + image: "{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }}" + imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" + {{- if .Values.controller.lifecycle }} + lifecycle: +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + - --default-backend-service={{ if .Values.defaultBackend.enabled }}{{ .Release.Namespace }}/{{ template "nginx-ingress.defaultBackend.fullname" . }}{{ else }}{{ .Values.controller.defaultBackendService }}{{ end }} + {{- if and (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) .Values.controller.publishService.enabled }} + - --publish-service={{ template "nginx-ingress.controller.publishServicePath" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --election-id={{ .Values.controller.electionID }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --ingress-class={{ .Values.controller.ingressClass }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- else }} + - --nginx-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- end }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- range $key, $value := .Values.controller.extraEnvs }} + - name: {{ $key | upper | replace "." "_" }} + value: {{ $value }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + ports: + - name: http + containerPort: 80 + protocol: TCP + {{- if .Values.controller.daemonset.useHostPort }} + hostPort: 80 + {{- end }} + - name: https + containerPort: 443 + protocol: TCP + {{- if .Values.controller.daemonset.useHostPort }} + hostPort: 443 + {{- end }} + {{- if .Values.controller.stats.enabled }} + - name: stats + containerPort: 18080 + protocol: TCP + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: 10254 + protocol: TCP + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + containerPort: {{ $key }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + containerPort: {{ $key }} + protocol: UDP + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} +{{- if .Values.controller.customTemplate.configMapName }} + volumeMounts: + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true +{{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 12 }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + serviceAccountName: {{ if .Values.rbac.create }}{{ template "nginx-ingress.fullname" . }}{{ else }}"{{ .Values.rbac.serviceAccountName }}"{{ end }} + terminationGracePeriodSeconds: 60 +{{- if .Values.controller.customTemplate.configMapName }} + volumes: + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl +{{- end }} + updateStrategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-deployment.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-deployment.yaml new file mode 100644 index 0000000..90194dd --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-deployment.yaml @@ -0,0 +1,167 @@ +{{- if eq .Values.controller.kind "Deployment" }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + replicas: {{ .Values.controller.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + strategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/controller-configmap.yaml") . | sha256sum }} + {{- if .Values.controller.podAnnotations }} +{{ toYaml .Values.controller.podAnnotations | indent 8}} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.controller.name }}" + release: {{ .Release.Name }} + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + spec: + dnsPolicy: {{ .Values.controller.dnsPolicy }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.controller.name }} + image: "{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }}" + imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" + {{- if .Values.controller.lifecycle }} + lifecycle: +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + - --default-backend-service={{ if .Values.defaultBackend.enabled }}{{ .Release.Namespace }}/{{ template "nginx-ingress.defaultBackend.fullname" . }}{{ else }}{{ .Values.controller.defaultBackendService }}{{ end }} + {{- if and (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) .Values.controller.publishService.enabled }} + - --publish-service={{ template "nginx-ingress.controller.publishServicePath" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --election-id={{ .Values.controller.electionID }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --ingress-class={{ .Values.controller.ingressClass }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- else }} + - --nginx-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- end }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- range $key, $value := .Values.controller.extraEnvs }} + - name: {{ $key | upper | replace "." "_" }} + value: {{ $value }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + {{- if .Values.controller.stats.enabled }} + - name: stats + containerPort: 18080 + protocol: TCP + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: 10254 + protocol: TCP + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + containerPort: {{ $key }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + containerPort: {{ $key }} + protocol: UDP + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} +{{- if .Values.controller.customTemplate.configMapName }} + volumeMounts: + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true +{{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 12 }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + serviceAccountName: {{ if .Values.rbac.create }}{{ template "nginx-ingress.fullname" . }}{{ else }}"{{ .Values.rbac.serviceAccountName }}"{{ end }} + terminationGracePeriodSeconds: 60 +{{- if .Values.controller.customTemplate.configMapName }} + volumes: + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl +{{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-hpa.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-hpa.yaml new file mode 100644 index 0000000..422d846 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-hpa.yaml @@ -0,0 +1,22 @@ +{{- if eq .Values.controller.kind "Deployment" }} +{{- if .Values.controller.autoscaling.enabled }} +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1beta1 + kind: Deployment + name: {{ template "nginx-ingress.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + targetCPUUtilizationPercentage: {{ .Values.controller.autoscaling.targetCPUUtilizationPercentage }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-metrics-service.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-metrics-service.yaml new file mode 100644 index 0000000..eeaf297 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-metrics-service.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.controller.stats.enabled .Values.controller.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: +{{ toYaml .Values.controller.metrics.service.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.controller.fullname" . }}-metrics +spec: + clusterIP: "{{ .Values.controller.metrics.service.clusterIP }}" +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.metrics.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.metrics.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: metrics + port: {{ .Values.controller.metrics.service.servicePort }} + targetPort: metrics + selector: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.controller.name }}" + release: {{ .Release.Name }} + type: "{{ .Values.controller.metrics.service.type }}" +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-poddisruptionbudget.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..f2922f2 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,17 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ .Release.Name }} + component: "{{ .Values.controller.name }}" + minAvailable: {{ .Values.controller.minAvailable }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-service.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-service.yaml new file mode 100644 index 0000000..4b20dda --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-service.yaml @@ -0,0 +1,65 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.service.annotations }} + annotations: +{{ toYaml .Values.controller.service.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + clusterIP: "{{ .Values.controller.service.clusterIP }}" +{{- if .Values.controller.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} +{{- if and (ge .Capabilities.KubeVersion.Minor "7") (.Values.controller.service.externalTrafficPolicy) }} + externalTrafficPolicy: "{{ .Values.controller.service.externalTrafficPolicy }}" +{{- end }} +{{- if and (ge .Capabilities.KubeVersion.Minor "7") (.Values.controller.service.healthCheckNodePort) }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} + ports: + - name: http + port: 80 + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if (and (eq .Values.controller.service.type "NodePort") (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + - name: https + port: 443 + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if (and (eq .Values.controller.service.type "NodePort") (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + port: {{ $key }} + protocol: TCP + targetPort: {{ $key }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + port: {{ $key }} + protocol: UDP + targetPort: {{ $key }} + {{- end }} + selector: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.controller.name }}" + release: {{ .Release.Name }} + type: "{{ .Values.controller.service.type }}" diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-stats-service.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-stats-service.yaml new file mode 100644 index 0000000..37486ac --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/controller-stats-service.yaml @@ -0,0 +1,38 @@ +{{- if .Values.controller.stats.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.stats.service.annotations }} + annotations: +{{ toYaml .Values.controller.stats.service.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.controller.fullname" . }}-stats +spec: + clusterIP: "{{ .Values.controller.stats.service.clusterIP }}" +{{- if .Values.controller.stats.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.stats.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.stats.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.stats.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.stats.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.stats.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: stats + port: {{ .Values.controller.stats.service.servicePort }} + targetPort: 18080 + selector: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.controller.name }}" + release: {{ .Release.Name }} + type: "{{ .Values.controller.stats.service.type }}" +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-deployment.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..fa7a54a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-deployment.yaml @@ -0,0 +1,66 @@ +{{- if .Values.defaultBackend.enabled }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.defaultBackend.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + replicas: {{ .Values.defaultBackend.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: +{{ toYaml .Values.defaultBackend.podAnnotations | indent 8 }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.defaultBackend.name }}" + release: {{ .Release.Name }} + {{- if .Values.defaultBackend.podLabels }} +{{ toYaml .Values.defaultBackend.podLabels | indent 8 }} + {{- end }} + spec: + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.defaultBackend.name }} + image: "{{ .Values.defaultBackend.image.repository }}:{{ .Values.defaultBackend.image.tag }}" + imagePullPolicy: "{{ .Values.defaultBackend.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + protocol: TCP + resources: +{{ toYaml .Values.defaultBackend.resources | indent 12 }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: +{{ toYaml .Values.defaultBackend.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: +{{ toYaml .Values.defaultBackend.tolerations | indent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: +{{ toYaml .Values.defaultBackend.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..5350595 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,17 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.defaultBackend.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ .Release.Name }} + component: "{{ .Values.defaultBackend.name }}" + minAvailable: {{ .Values.defaultBackend.minAvailable }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-service.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-service.yaml new file mode 100644 index 0000000..aea23c3 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/default-backend-service.yaml @@ -0,0 +1,37 @@ +{{- if .Values.defaultBackend.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: +{{ toYaml .Values.defaultBackend.service.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.defaultBackend.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + clusterIP: "{{ .Values.defaultBackend.service.clusterIP }}" +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: +{{ toYaml .Values.defaultBackend.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.defaultBackend.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - port: {{ .Values.defaultBackend.service.servicePort }} + targetPort: 8080 + selector: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.defaultBackend.name }}" + release: {{ .Release.Name }} + type: "{{ .Values.defaultBackend.service.type }}" +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/headers-configmap.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/headers-configmap.yaml new file mode 100644 index 0000000..c85f008 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/headers-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.controller.headers }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.fullname" . }}-custom-headers +data: +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/role.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/role.yaml new file mode 100644 index 0000000..fe24731 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/role.yaml @@ -0,0 +1,44 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - namespaces + - pods + - secrets + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }}-{{ .Values.controller.ingressClass }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - update +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/rolebinding.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/rolebinding.yaml new file mode 100644 index 0000000..f608545 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/serviceaccount.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/serviceaccount.yaml new file mode 100644 index 0000000..23fdf00 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.rbac.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.fullname" . }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/tcp-configmap.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/tcp-configmap.yaml new file mode 100644 index 0000000..fdbf282 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/tcp-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.tcp }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.fullname" . }}-tcp +data: +{{ toYaml .Values.tcp | indent 2 }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/templates/udp-configmap.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/udp-configmap.yaml new file mode 100644 index 0000000..75ce163 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/templates/udp-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.udp }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.fullname" . }}-udp +data: +{{ toYaml .Values.udp | indent 2 }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/nginx-ingress/values.yaml b/qliksense/charts/edge-auth/charts/nginx-ingress/values.yaml new file mode 100644 index 0000000..436659a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/nginx-ingress/values.yaml @@ -0,0 +1,305 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md +## +controller: + name: controller + image: + repository: quay.io/kubernetes-ingress-controller/nginx-ingress-controller + tag: "0.11.0" + pullPolicy: IfNotPresent + + config: {} + # Will add custom header to Nginx https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers + headers: {} + + # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + ## Use host ports 80 and 443 + daemonset: + useHostPort: false + + ## Required only if defaultBackend.enabled = false + ## Must be / + ## + defaultBackendService: "" + + ## Election ID to use for status update + ## + electionID: ingress-controller-leader + + ## Name of the ingress class to route through this controller + ## + ingressClass: nginx + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Allows customization of the external service + ## the ingress will be bound to via DNS + publishService: + enabled: false + ## Allows overriding of the publish service to bind to + ## Must be / + ## + pathOverride: "" + + ## Limit the scope of the controller + ## + scope: + enabled: false + namespace: "" # defaults to .Release.Namespace + + ## Additional command line arguments to pass to nginx-ingress-controller + ## E.g. to specify the default SSL certificate you can use + ## extraArgs: + ## default-ssl-certificate: "/" + extraArgs: {} + + ## Additional environment variables to set + ## + extraEnvs: {} + + ## DaemonSet or Deployment + ## + kind: Deployment + + # The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # minReadySeconds to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + ## Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 100m + # memory: 64Mi + + autoscaling: + enabled: false + # minReplicas: 1 + # maxReplicas: 11 + # targetCPUUtilizationPercentage: 50 + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + annotations: {} + clusterIP: "" + + ## List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + externalTrafficPolicy: "" + + healthCheckNodePort: 0 + + targetPorts: + http: 80 + https: 443 + + type: LoadBalancer + + # type: NodePort + # nodePorts: + # http: 32080 + # https: 32443 + nodePorts: + http: "" + https: "" + + stats: + enabled: false + + service: + annotations: {} + clusterIP: "" + + ## List of IP addresses at which the stats service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 18080 + type: ClusterIP + + ## If controller.stats.enabled = true and controller.metrics.enabled = true, Prometheus metrics will be exported + ## + metrics: + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9913 + type: ClusterIP + + lifecycle: {} + +## Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + + ## If false, controller.defaultBackendService must be provided + ## + enabled: true + + name: default-backend + image: + repository: k8s.gcr.io/defaultbackend + tag: "1.3" + pullPolicy: IfNotPresent + + extraArgs: {} + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + service: + annotations: {} + clusterIP: "" + + ## List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + +## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 +rbac: + create: false + serviceAccountName: default + +# TCP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# UDP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp +## +udp: {} +# 53: "kube-system/kube-dns:53" diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/.helmignore b/qliksense/charts/edge-auth/charts/qlikcommon/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/Chart.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/Chart.yaml new file mode 100644 index 0000000..ceaf085 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +appVersion: 1.0.14 +description: Qlik resource contract chartbuilding components and helpers +home: https://github.com/qlik-trial/resource-contract +maintainers: +- email: boris.kuschel@qlik.com + name: bkuschel +name: qlikcommon +version: 1.5.0 diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/README.md b/qliksense/charts/edge-auth/charts/qlikcommon/README.md new file mode 100644 index 0000000..664b529 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/README.md @@ -0,0 +1,837 @@ +# Qlik Common + +This chart is based off of the Common helper chart hosts in the kubernetes incubator +helm chart repo. Documentation below. + +## Common: The Helm Helper Chart + +This chart is designed to make it easier for you to build and maintain Helm +charts. + +It provides utilities that reflect best practices of Kubernetes chart development, +making it faster for you to write charts. + +## Tips + +A few tips for working with Common: + +- Be careful when using functions that generate random data (like `common.fullname.unique`). + They may trigger unwanted upgrades or have other side effects. + +In this document, we use `RELEASE-NAME` as the name of the release. + +## Resource Kinds + +Kubernetes defines a variety of resource kinds, from `Secret` to `StatefulSet`. +We define some of the most common kinds in a way that lets you easily work with +them. + +The resource kind templates are designed to make it much faster for you to +define _basic_ versions of these resources. They allow you to extend and modify +just what you need, without having to copy around lots of boilerplate. + +To make use of these templates you must define a template that will extend the +base template (though it can be empty). The name of this template is then passed +to the base template, for example: + +```yaml +{{- template "common.service" (list . "mychart.service") -}} +{{- define "mychart.service" -}} +## Define overrides for your Service resource here, e.g. +# metadata: +# labels: +# custom: label +# spec: +# ports: +# - port: 8080 +{{- end -}} +``` + +Note that the `common.service` template defines two parameters: + + - The root context (usually `.`) + - A template name containing the service definition overrides + +A limitation of the Go template library is that a template can only take a +single argument. The `list` function is used to workaround this by constructing +a list or array of arguments that is passed to the template. + +The `common.service` template is responsible for rendering the templates with +the root context and merging any overrides. As you can see, this makes it very +easy to create a basic `Service` resource without having to copy around the +standard metadata and labels. + +Each implemented base resource is described in greater detail below. + +### `common.service` + +The `common.service` template creates a basic `Service` resource with the +following defaults: + +- Service type (ClusterIP, NodePort, LoadBalancer) made configurable by `.Values.service.type` +- Named port `http` configured on port 80 +- Selector set to `app: {{ template "common.name" }}, release: {{ .Release.Name | quote }}` to match the default used in the `Deployment` resource + +Example template: + +```yaml +{{- template "common.service" (list . "mychart.mail.service") -}} +{{- define "mychart.mail.service" -}} +metadata: + name: {{ template "common.fullname" . }}-mail # overrides the default name to add a suffix + labels: # appended to the labels section + protocol: mail +spec: + ports: # composes the `ports` section of the service definition. + - name: smtp + port: 25 + targetPort: 25 + - name: imaps + port: 993 + targetPort: 993 + selector: # this is appended to the default selector + protocol: mail +{{- end -}} +--- +{{ template "common.service" (list . "mychart.web.service") -}} +{{- define "mychart.web.service" -}} +metadata: + name: {{ template "common.fullname" . }}-www # overrides the default name to add a suffix + labels: # appended to the labels section + protocol: www +spec: + ports: # composes the `ports` section of the service definition. + - name: www + port: 80 + targetPort: 8080 +{{- end -}} +``` + +The above template defines _two_ services: a web service and a mail service. + +The most important part of a service definition is the `ports` object, which +defines the ports that this service will listen on. Most of the time, +`selector` is computed for you. But you can replace it or add to it. + +The output of the example above is: + +```yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app: service + chart: service-0.1.0 + heritage: Tiller + protocol: mail + release: release-name + name: release-name-service-mail +spec: + ports: + - name: smtp + port: 25 + targetPort: 25 + - name: imaps + port: 993 + targetPort: 993 + selector: + app: service + release: release-name + protocol: mail + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: service + chart: service-0.1.0 + heritage: Tiller + protocol: www + release: release-name + name: release-name-service-www +spec: + ports: + - name: www + port: 80 + targetPort: 8080 + type: ClusterIP +``` + +## `common.deployment` + +The `common.deployment` template defines a basic `Deployment`. Underneath the +hood, it uses `common.container` (see next section). + +By default, the pod template within the deployment defines the labels `app: {{ template "common.name" . }}` +and `release: {{ .Release.Name | quote }` as this is also used as the selector. The +standard set of labels are not used as some of these can change during upgrades, +which causes the replica sets and pods to not correctly match. + +Example use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +## Define overrides for your Deployment resource here, e.g. +spec: + replicas: {{ .Values.replicaCount }} +{{- end -}} +``` + +## `common.container` + +The `common.container` template creates a basic `Container` spec to be used +within a `Deployment` or `ReplicaSet`. It holds the following defaults: + +- The name is set to `main` +- Uses `.Values.image` to describe the image to run, with the following spec: + ```yaml + image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + ``` +- Exposes the named port `http` as port 80 +- Lays out the compute resources using `.Values.resources` + +Example use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +## Define overrides for your Deployment resource here, e.g. +spec: + template: + spec: + containers: + - +{{ include "common.container" (list (set . "container" .Values.deployment.container) "mychart.deployment.container") | indent 8}} +{{- end -}} +{{- define "mychart.deployment.container" -}} +## Define overrides for your Container here, e.g. +livenessProbe: + httpGet: + path: / + port: 80 +readinessProbe: + httpGet: + path: / + port: 80 +{{- end -}} +``` + +The above example creates a `Deployment` resource which makes use of the +`common.container` template to populate the PodSpec's container list. The usage +of this template is similar to the other resources, you must define and +reference a template that contains overrides for the container object. + +The most important part of a container definition is the image you want to run. +As mentioned above, this is derived from `.Values.image` by default. It is a +best practice to define the image, tag and pull policy in your charts' values as +this makes it easy for an operator to change the image registry, or use a +specific tag or version. Another example of configuration that should be exposed +to chart operators is the container's required compute resources, as this is +also very specific to an operators environment. An example `values.yaml` for +your chart could look like: + +```yaml +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +``` + +The output of running the above values through the earlier template is: + +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: deployment + chart: deployment-0.1.0 + heritage: Tiller + release: release-name + name: release-name-deployment +spec: + template: + metadata: + labels: + app: deployment + spec: + containers: + - image: nginx:stable + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: / + port: 80 + name: deployment + ports: + - containerPort: 80 + name: http + readinessProbe: + httpGet: + path: / + port: 80 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +``` + +## `common.configmap` + +The `common.configmap` template creates an empty `ConfigMap` resource that you +can override with your configuration. + +Example use: + +```yaml +{{- template "common.configmap" (list . "mychart.configmap") -}} +{{- define "mychart.configmap" -}} +data: + zeus: cat + athena: cat + julius: cat + one: |- + {{ .Files.Get "file1.txt" }} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +data: + athena: cat + julius: cat + one: This is a file. + zeus: cat +kind: ConfigMap +metadata: + labels: + app: configmap + chart: configmap-0.1.0 + heritage: Tiller + release: release-name + name: release-name-configmap +``` + +## `common.secret` + +The `common.secret` template creates an empty `Secret` resource that you +can override with your secrets. + +Example use: + +```yaml +{{- template "common.secret" (list . "mychart.secret") -}} +{{- define "mychart.secret" -}} +data: + zeus: {{ print "cat" | b64enc }} + athena: {{ print "cat" | b64enc }} + julius: {{ print "cat" | b64enc }} + one: |- + {{ .Files.Get "file1.txt" | b64enc }} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +data: + athena: Y2F0 + julius: Y2F0 + one: VGhpcyBpcyBhIGZpbGUuCg== + zeus: Y2F0 +kind: Secret +metadata: + labels: + app: secret + chart: secret-0.1.0 + heritage: Tiller + release: release-name + name: release-name-secret +type: Opaque +``` + +## `common.ingress` + +The `common.ingress` template is designed to give you a well-defined `Ingress` +resource, that can be configured using `.Values.ingress`. An example values file +that can be used to configure the `Ingress` resource is: + +```yaml +ingress: + hosts: + - chart-example.local + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + tls: + - secretName: chart-example-tls + hosts: + - chart-example.local +``` + +Example use: + +```yaml +{{- template "common.ingress" (list . "mychart.ingress") -}} +{{- define "mychart.ingress" -}} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + labels: + app: ingress + chart: ingress-0.1.0 + heritage: Tiller + release: release-name + name: release-name-ingress +spec: + rules: + - host: chart-example.local + http: + paths: + - backend: + serviceName: release-name-ingress + servicePort: 80 + path: / + tls: + - hosts: + - chart-example.local + secretName: chart-example-tls +``` + +## `common.persistentvolumeclaim` + +`common.persistentvolumeclaim` can be used to easily add a +`PersistentVolumeClaim` resource to your chart that can be configured using +`.Values.persistence`: + +| Value | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------- | +| persistence.enabled | Whether or not to claim a persistent volume. If false, `common.volume.pvc` will use an emptyDir instead | +| persistence.storageClass | `StorageClass` name | +| persistence.accessMode | Access mode for persistent volume | +| persistence.size | Size of persistent volume | +| persistence.existingClaim | If defined, `PersistentVolumeClaim` is not created and `common.volume.pvc` helper uses this claim | + +An example values file that can be used to configure the +`PersistentVolumeClaim` resource is: + +```yaml +persistence: + enabled: true + storageClass: fast + accessMode: ReadWriteOnce + size: 8Gi +``` + +Example use: + +```yaml +{{- template "common.persistentvolumeclaim" (list . "mychart.persistentvolumeclaim") -}} +{{- define "mychart.persistentvolumeclaim" -}} +{{- end -}} +``` + +Output: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app: persistentvolumeclaim + chart: persistentvolumeclaim-0.1.0 + heritage: Tiller + release: release-name + name: release-name-persistentvolumeclaim +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + storageClassName: "fast" +``` + +## Partial API Objects + +When writing Kubernetes resources, you may find the following helpers useful to +construct parts of the spec. + +### EnvVar + +Use the EnvVar helpers within a container spec to simplify specifying key-value +environment variables or referencing secrets as values. + +Example Use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +spec: + template: + spec: + containers: + - {{ template "common.container" (list . "mychart.deployment.container") }} +{{- end -}} +{{- define "mychart.deployment.container" -}} +{{- $fullname := include "common.fullname" . -}} +env: +- {{ template "common.envvar.value" (list "ZEUS" "cat") }} +- {{ template "common.envvar.secret" (list "ATHENA" "secret-name" "athena") }} +{{- end -}} +``` + +Output: + +```yaml +... + spec: + containers: + - env: + - name: ZEUS + value: cat + - name: ATHENA + valueFrom: + secretKeyRef: + key: athena + name: secret-name +... +``` + +### Volume + +Use the Volume helpers within a `Deployment` spec to help define ConfigMap and +PersistentVolumeClaim volumes. + +Example Use: + +```yaml +{{- template "common.deployment" (list . "mychart.deployment") -}} +{{- define "mychart.deployment" -}} +spec: + template: + spec: + volumes: + - {{ template "common.volume.configMap" (list "config" "configmap-name") }} + - {{ template "common.volume.pvc" (list "data" "pvc-name" .Values.persistence) }} +{{- end -}} +``` + +Output: + +```yaml +... + spec: + volumes: + - configMap: + name: configmap-name + name: config + - name: data + persistentVolumeClaim: + claimName: pvc-name +... +``` + +The `common.volume.pvc` helper uses the following configuration from the `.Values.persistence` object: + +| Value | Description | +| ------------------------- | ----------------------------------------------------- | +| persistence.enabled | If false, creates an `emptyDir` instead | +| persistence.existingClaim | If set, uses this instead of the passed in claim name | + +## Utilities + +### `common.fullname` + +The `common.fullname` template generates a name suitable for the `name:` field +in Kubernetes metadata. It is used like this: + +```yaml +name: {{ template "common.fullname" . }} +``` + +The following different values can influence it: + +```yaml +# By default, fullname uses '{{ .Release.Name }}-{{ .Chart.Name }}'. This +# overrides that and uses the given string instead. +fullnameOverride: "some-name" + +# This adds a prefix +fullnamePrefix: "pre-" +# This appends a suffix +fullnameSuffix: "-suf" + +# Global versions of the above +global: + fullnamePrefix: "pp-" + fullnameSuffix: "-ps" +``` + +Example output: + +```yaml +--- +# with the values above +name: pp-pre-some-name-suf-ps + +--- +# the default, for release "happy-panda" and chart "wordpress" +name: happy-panda-wordpress +``` + +Output of this function is truncated at 54 characters, which leaves 9 additional +characters for customized overriding. Thus you can easily extend this name +in your own charts: + +```yaml +{{- define "my.fullname" -}} + {{ template "common.fullname" . }}-my-stuff +{{- end -}} +``` + +### `common.fullname.unique` + +The `common.fullname.unique` variant of fullname appends a unique seven-character +sequence to the end of the common name field. + +This takes all of the same parameters as `common.fullname` + +Example template: + +```yaml +uniqueName: {{ template "common.fullname.unique" . }} +``` + +Example output: + +```yaml +uniqueName: release-name-fullname-jl0dbwx +``` + +It is also impacted by the prefix and suffix definitions, as well as by +`.Values.fullnameOverride` + +Note that the effective maximum length of this function is 63 characters, not 54. + +### `common.name` + +The `common.name` template generates a name suitable for the `app` label. It is used like this: + +```yaml +app: {{ template "common.name" . }} +``` + +The following different values can influence it: + +```yaml +# By default, name uses '{{ .Chart.Name }}'. This +# overrides that and uses the given string instead. +nameOverride: "some-name" + +# This adds a prefix +namePrefix: "pre-" +# This appends a suffix +nameSuffix: "-suf" + +# Global versions of the above +global: + namePrefix: "pp-" + nameSuffix: "-ps" +``` + +Example output: + +```yaml +--- +# with the values above +name: pp-pre-some-name-suf-ps + +--- +# the default, for chart "wordpress" +name: wordpress +``` + +Output of this function is truncated at 54 characters, which leaves 9 additional +characters for customized overriding. Thus you can easily extend this name +in your own charts: + +```yaml +{{- define "my.name" -}} + {{ template "common.name" . }}-my-stuff +{{- end -}} +``` + +### `common.metadata` + +The `common.metadata` helper generates the `metadata:` section of a Kubernetes +resource. + +This takes three objects: + - .top: top context + - .fullnameOverride: override the fullname with this name + - .metadata + - .labels: key/value list of labels + - .annotations: key/value list of annotations + - .hook: name(s) of hook(s) + +It generates standard labels, annotations, hooks, and a name field. + +Example template: + +```yaml +{{ template "common.metadata" (dict "top" . "metadata" .Values.bio) }} +--- +{{ template "common.metadata" (dict "top" . "metadata" .Values.pet "fullnameOverride" .Values.pet.fullnameOverride) }} +``` + +Example values: + +```yaml +bio: + name: example + labels: + first: matt + last: butcher + nick: technosophos + annotations: + format: bio + destination: archive + hook: pre-install + +pet: + fullnameOverride: Zeus + +``` + +Example output: + +```yaml +metadata: + name: release-name-metadata + labels: + app: metadata + heritage: "Tiller" + release: "RELEASE-NAME" + chart: metadata-0.1.0 + first: "matt" + last: "butcher" + nick: "technosophos" + annotations: + "destination": "archive" + "format": "bio" + "helm.sh/hook": "pre-install" +--- +metadata: + name: Zeus + labels: + app: metadata + heritage: "Tiller" + release: "RELEASE-NAME" + chart: metadata-0.1.0 + annotations: +``` + +Most of the common templates that define a resource type (e.g. `common.configmap` +or `common.job`) use this to generate the metadata, which means they inherit +the same `labels`, `annotations`, `nameOverride`, and `hook` fields. + +### `common.labelize` + +`common.labelize` turns a map into a set of labels. + +Example template: + +```yaml +{{- $map := dict "first" "1" "second" "2" "third" "3" -}} +{{- template "common.labelize" $map -}} +``` + +Example output: + +```yaml +first: "1" +second: "2" +third: "3" +``` + +### `common.labels.standard` + +`common.labels.standard` prints the standard set of labels. + +Example usage: + +``` +{{ template "common.labels.standard" . }} +``` + +Example output: + +```yaml +app: labelizer +heritage: "Tiller" +release: "RELEASE-NAME" +chart: labelizer-0.1.0 +``` + +### `common.hook` + +The `common.hook` template is a convenience for defining hooks. + +Example template: + +```yaml +{{ template "common.hook" "pre-install,post-install" }} +``` + +Example output: + +```yaml +"helm.sh/hook": "pre-install,post-install" +``` + +### `common.chartref` + +The `common.chartref` helper prints the chart name and version, escaped to be +legal in a Kubernetes label field. + +Example template: + +```yaml +chartref: {{ template "common.chartref" . }} +``` + +For the chart `foo` with version `1.2.3-beta.55+1234`, this will render: + +```yaml +chartref: foo-1.2.3-beta.55_1234 +``` + +(Note that `+` is an illegal character in label values) diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_certificates.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_certificates.tpl new file mode 100644 index 0000000..d385098 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_certificates.tpl @@ -0,0 +1,32 @@ +{{- define "common.ca-certificates.volume" -}} +{{- if .Values.certs }} +{{- if .Values.global }}{{- if .Values.global.certs }} +{{- if .Values.global.certs.volume }} +- name: ca-certificates + {{- if .Values.global.certs.volume.hostPath }} + hostPath: + path: {{ .Values.global.certs.volume.hostPath }} + type: Directory + {{- end }} + {{- if .Values.global.certs.volume.existingVolumeClaim }} + persistentVolumeClaim: + claimName: {{ .Values.global.certs.volume.existingVolumeClaim }} + {{- end }} +{{- else }} +- name: ca-certificates + persistentVolumeClaim: + claimName: {{ .Release.Name }}-certs-pvc +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "common.ca-certificates.volumeMount" -}} +{{- if .Values.certs }} +{{- if .Values.global }}{{- if .Values.global.certs }} +- name: ca-certificates + mountPath: {{ default "/etc/ssl/certs" .Values.certs.mountPath | quote }} + readOnly: true +{{- end -}} +{{- end -}}{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_chartref.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_chartref.tpl new file mode 100644 index 0000000..e6c1486 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_chartref.tpl @@ -0,0 +1,14 @@ +{{- /* +common.chartref prints a chart name and version. + +It does minimal escaping for use in Kubernetes labels. + +Example output: + + zookeeper-1.2.3 + wordpress-3.2.1_20170219 + +*/ -}} +{{- define "common.chartref" -}} + {{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_configmap.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_configmap.yaml new file mode 100644 index 0000000..f04def2 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_configmap.yaml @@ -0,0 +1,32 @@ +{{- define "common.configmap.tpl" -}} +apiVersion: v1 +kind: ConfigMap +{{ template "common.metadata.configs" . }} +data: + {{- $root := . -}} + {{- if .Values.deployment -}} + {{- range $name, $container:= .Values.deployment }} + {{- if kindIs "map" $container }}{{- if $container.configs }} + {{- range $key, $value := $container.configs.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | quote }} + {{- else }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }}{{- end }} + {{- end }} + {{- end }} + {{- if .Values.configs -}} + {{- range $key, $value := .Values.configs.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | quote }} + {{- else }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- define "common.configmap" -}} +{{- template "common.util.merge" (append . "common.configmap.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_container.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_container.yaml new file mode 100644 index 0000000..5a733a3 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_container.yaml @@ -0,0 +1,99 @@ +{{- define "common.container.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $releaseName := .Release.Name -}} +name: {{ include "common.name" . }} +image: {{ include "common.image" (set . "container" .container ) }} +{{- $pullPolicy := .Values.image.pullPolicy }} +{{- if .container }}{{- if .container.image }}{{- if .container.image.pullPolicy }} + {{- $pullPolicy = .container.image.pullPolicy }} +{{- end }}{{- end }}{{- end }} +imagePullPolicy: {{ $pullPolicy }} +{{- if or .Values.configs .Values.secrets }} +env: +{{- if .Values.configs.data.natsUri }} + - name: NATS_CLIENT_ID + valueFrom: + fieldRef: + fieldPath: metadata.name +{{- end }} +{{ include "common.transformers" (set . "container" .container ) | indent 2 }} +{{- end }} +ports: +{{- $port := .Values.service.port }} +{{- if .container }}{{- if .container.port }} + {{- $port = .container.port }} +{{- end }}{{- end }} +- containerPort: {{ $port }} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end }}{{- end }} +{{- $hasSecret := false -}} +{{- if $secrets.stringData }} +{{- $hasSecret = true -}} +{{- end }} +{{- if $secrets.data }} +{{- $hasSecret = true -}} +{{- end }} +{{- $hasVolumeMounts := false -}} +{{- if $root.Values.persistence }}{{- if $root.Values.persistence.enabled }}{{- if .container }}{{- if .container.volumeMounts }} +{{- $hasVolumeMounts = true -}} +{{- end -}}{{- end -}}{{- end -}}{{- end -}} +{{- if or $hasSecret $hasVolumeMounts }} +volumeMounts: +{{- end }} +{{- if $hasSecret }} +{{- if or (contains $name $releaseName) (eq $releaseName "messaging") }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +{{- else }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +- name: {{ .Release.Name }}-secrets + mountPath: "/run/secrets/qlik.com/{{ .Release.Name }}" + readOnly: true +{{- end }} +{{- end }} +{{- if $hasVolumeMounts }} +{{- range $key, $val:= .container.volumeMounts }} +{{- if kindIs "map" $val }} +{{- if eq $key "default" }} +{{ include "common.volume.mount" (list (include "common.fullname" $root) $root.Values.deployment.container.volumeMounts.mountPath) }} +{{- else }} +{{ include "common.volume.mount" (list (printf "%s-%s" (include "common.fullname" $root) $key) $val) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- include "common.ca-certificates.volumeMount" . }} +{{- $probes := .Values.probes }} +{{- if .container }}{{- if .container.probes }} + {{- $probes = .container.probes }} +{{- end }}{{- end }} +livenessProbe: + httpGet: + path: /health + port: {{ $port }} +{{- if $probes }} +{{ toYaml $probes | indent 2 }} +{{- end }} +readinessProbe: + httpGet: + path: /ready + port: {{ $port }} +{{- if $probes }} +{{ toYaml $probes | indent 2 }} +{{- end }} +{{- if .container }}{{- if .container.resources }} +resources: +{{ toYaml .container.resources | indent 2 }} +{{- end -}}{{- end -}} +{{- end -}} +{{- define "common.container" -}} +{{- /* clear new line so indentation works correctly */ -}} +{{- println "" -}} +{{- include "common.util.merge" (append . "common.container.tpl") | indent 8 -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_deployment.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_deployment.yaml new file mode 100644 index 0000000..73eb7aa --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_deployment.yaml @@ -0,0 +1,94 @@ +{{- define "common.deployment.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $releaseName := .Release.Name -}} +apiVersion: apps/v1 +kind: Deployment +{{ template "common.metadata.workload" . }} +spec: +{{- if .Values.deployment.replicas}} + replicas: {{ .Values.deployment.replicas }} +{{- end}} + template: + metadata: + annotations: + checksum/configs: {{ (print (include "common.configmap.tpl" .)) | sha256sum }} + checksum/secrets: {{ (print (include "common.secret.tpl" .)) | sha256sum }} +{{- if .Values.deployment }}{{- if .Values.deployment.annotations }} +{{ include "common.annote" (dict "annotations" .Values.deployment.annotations "root" . ) | indent 8 }} +{{- end }}{{- end }} + labels: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} +{{- if .Values.deployment }}{{- if .Values.deployment.labels }} +{{ include "common.labelize" .Values.deployment.labels | indent 8 }} +{{- end }}{{- end }} +{{- if .Values.configs }}{{- if .Values.configs.data }}{{- if .Values.configs.data.natsUri }} + {{ tpl .Values.configs.data.natsUri . | regexFind "//.*:" | trimAll ":" | trimAll "/" }}: "true" +{{- end }}{{- end }}{{- end }} + spec: +{{- if ne (.Values.rbacEnabled | default false) false }} + serviceAccountName: {{ template "common.fullname" . }} +{{- end }} +{{- if .Values.image }}{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }}{{- end }} + volumes: +{{- if or (contains $name $releaseName) (eq $releaseName "messaging") }} + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} +{{- else }} + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} + - {{ template "common.volume.secret" (list (printf "%s-secrets" (.Release.Name)) (printf "%s" (.Release.Name))) }} +{{- end }} + +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.persistentVolumeClaim }} +{{- range $name, $claim:= .Values.persistence.persistentVolumeClaim }} +{{- if kindIs "map" $claim }} +{{- if eq $name "default" }} + - {{ template "common.volume.pvc" (list (include "common.fullname" $root) (include "common.fullname" $root) $claim) }} +{{- else }} + - {{ template "common.volume.pvc" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $claim) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.hostPath }} +{{- range $name, $hostPath:= .Values.persistence.hostPath }} +{{- if kindIs "map" $hostPath }} + - {{ template "common.volume.hostpath" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $hostPath) }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{- if .Values.persistence }}{{- if .Values.persistence.enabled }}{{- if .Values.persistence.emptyDir }} +{{- range $name, $dir:= .Values.persistence.emptyDir }} +{{- if kindIs "map" $dir }} +{{- if $dir.create }} + - {{ template "common.volume.emptydir" (list (printf "%s-%s" (include "common.fullname" $root) $name) (printf "%s-%s" (include "common.fullname" $root) $name) $root.Values.persistence) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }}{{- end }}{{- end }} +{{ include "common.ca-certificates.volume" . | nindent 6 }} +{{- if .Values.configs }}{{- if .Values.configs.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} +{{- end }}{{- end }} +{{- if .Values.deployment }}{{- if .Values.deployment.initContainer }} + initContainers: + - +{{ include "common.initContainer.tpl" (set . "container" .Values.deployment.initContainer ) | indent 8 }} +{{- end }}{{- end }} + containers: + - +{{ include "common.container.tpl" (set . "container" .Values.deployment.container ) | indent 8 }} + selector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} +{{- end -}} +{{- define "common.deployment" -}} +{{- $top := first . -}} +{{- if and $top.Values.deployment }} +{{- template "common.util.merge" (append . "common.deployment.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_envvar.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_envvar.tpl new file mode 100644 index 0000000..39a997a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_envvar.tpl @@ -0,0 +1,32 @@ +{{- define "common.envvar.value" -}} + {{- $name := index . 0 -}} + {{- $value := index . 1 -}} + + name: {{ $name }} + value: {{ default "" $value | quote }} +{{- end -}} + +{{- define "common.envvar.configmap" -}} + {{- $name := index . 0 -}} + {{- $configMapName := index . 1 -}} + {{- $configMapKey := index . 2 -}} + + name: {{ $name }} + valueFrom: + configMapKeyRef: + name: {{ $configMapName }}-configs + key: {{ $configMapKey }} +{{- end -}} + +{{- define "common.envvar.secret" -}} + {{- $name := index . 0 -}} + {{- $secretName := index . 1 -}} + {{- $secretKey := index . 2 -}} + + name: {{ $name }} + valueFrom: + secretKeyRef: + name: {{ $secretName }}-secrets + key: {{ $secretKey }} +{{- end -}} + diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_fullname.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_fullname.tpl new file mode 100644 index 0000000..0f6bc77 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_fullname.tpl @@ -0,0 +1,42 @@ +{{- /* +fullname defines a suitably unique name for a resource by combining +the release name and the chart name. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.fullnameOverride: Replaces the computed name with this given name +- .Values.fullnamePrefix: Prefix +- .Values.global.fullnamePrefix: Global prefix +- .Values.fullnameSuffix: Suffix +- .Values.global.fullnameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "common.fullname" . -}}"' +*/ -}} +{{- define "common.fullname" -}} + {{- $global := default (dict) .Values.global -}} + {{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- $name := default .Chart.Name .Values.nameOverride -}} + {{- if contains $name .Release.Name -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{- /* +common.fullname.unique adds a random suffix to the unique name. + +This takes the same parameters as common.fullname + +*/ -}} +{{- define "common.fullname.unique" -}} + {{ template "common.fullname" . }}-{{ randAlphaNum 7 | lower }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_hpa.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_hpa.yaml new file mode 100644 index 0000000..be4215d --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_hpa.yaml @@ -0,0 +1,31 @@ +{{- define "common.hpa.tpl" -}} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +{{ template "common.metadata" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "common.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: +{{ if .Values.hpa.targetAverageUtilizationCpu }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationCpu }} +{{- end }} +{{ if .Values.hpa.targetAverageUtilizationMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpa.targetAverageUtilizationMemory }} +{{- end }} +{{- end -}} +{{- define "common.hpa" -}} +{{- $top := first . -}} +{{- if and $top.Values.hpa }} +{{- template "common.util.merge" (append . "common.hpa.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_image.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_image.tpl new file mode 100644 index 0000000..6a2335a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_image.tpl @@ -0,0 +1,21 @@ +{{/* Return the proper collections image name */}} +{{- define "common.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{ $image := .Values.image }} + {{- if .container }}{{- if .container.image }} + {{ $image = .container.image }} + {{- end -}}{{- end -}} + {{- $registry := default "docker.io" (default .Values.image.registry $image.registry) -}} + {{- $repository := $image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default .Values.image.tag $image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_ingress.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_ingress.yaml new file mode 100644 index 0000000..ab9a75d --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_ingress.yaml @@ -0,0 +1,49 @@ +{{- define "common.ingress.tpl" -}} +apiVersion: extensions/v1beta1 +kind: Ingress +{{ template "common.metadata" . }} + annotations: + kubernetes.io/ingress.class: {{ template "common.ingress.class" . }} + {{- if .Values.configs }}{{- if .Values.configs.data }}{{- if .Values.configs.data.ingressAuthUrl }} + nginx.ingress.kubernetes.io/auth-url: {{ tpl .Values.configs.data.ingressAuthUrl . | quote }} + {{- end }}{{- end }}{{- end }} + {{- if .Values.ingress}}{{- if .Values.ingress.annotations }} + {{ include "common.annote" (dict "annotations" .Values.ingress.annotations "root" . ) | indent 4 }} + {{- end }}{{- end }} +spec: + {{- if .Values.ingress }} + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: / + backend: + serviceName: {{ template "common.fullname" $ }} + servicePort: 80 + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} + {{- end }} +{{- define "common.ingress" -}} +{{- $top := first . -}} +{{- if and $top.Values.ingress }} +{{- template "common.util.merge" (append . "common.ingress.tpl") -}} +{{- end -}} +{{- end -}} + +{{- define "common.ingress.class" -}} + {{- $ingressClass := "nginx" }} + {{- if .Values.ingress }}{{- if .Values.ingress.class }} + {{- $ingressClass = .Values.ingress.class -}} + {{- end -}}{{- end -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_initContainer.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_initContainer.yaml new file mode 100644 index 0000000..3b12f55 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_initContainer.yaml @@ -0,0 +1,74 @@ +{{- define "common.initContainer.tpl" -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +name: {{ .container.name }} +image: {{ include "common.image" (set . "container" .container ) }} +{{- $pullPolicy := .Values.image.pullPolicy }} +{{- if .container }}{{- if .container.image }}{{- if .container.image.pullPolicy }} + {{- $pullPolicy = .container.image.pullPolicy }} +{{- end }}{{- end }}{{- end }} +imagePullPolicy: {{ $pullPolicy }} +env: + - name: SERVICE_NAME + value: {{ .Chart.Name }} +{{- if or .container.configs .container.secrets }} +{{ include "common.transformers" (set . "container" .container ) | indent 2 }} +{{- end }} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end }}{{- end }} +{{- $hasSecret := false -}} +{{- if $secrets.stringData }} +{{- $hasSecret = true -}} +{{- end }} +{{- if $secrets.data }} +{{- $hasSecret = true -}} +{{- end }} +{{- $hasVolumeMounts := false -}} +{{- if .container }}{{- if .container.volumeMounts }} +{{- $hasVolumeMounts = true -}} +{{- end -}}{{- end -}} +{{- if or $hasSecret $hasVolumeMounts }} +volumeMounts: +{{- end }} +{{- if $hasSecret }} +{{- if contains $name .Release.Name }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +{{- else }} +- name: {{ include "common.fullname" . }}-secrets + mountPath: "/run/secrets/qlik.com/{{ include "common.fullname" . }}" + readOnly: true +- name: {{ .Release.Name }}-secrets + mountPath: "/run/secrets/qlik.com/{{ .Release.Name }}" + readOnly: true +{{- end }} +{{- end }} +{{- if $hasVolumeMounts }} +{{- range $key, $val:= .container.volumeMounts }} +{{- if kindIs "map" $val }} +{{- if eq $key "default" }} +{{ include "common.volume.mount" (list (include "common.fullname" $root) $root.Values.deployment.container.volumeMounts.mountPath) }} +{{- else }} +{{ include "common.volume.mount" (list (printf "%s-%s" (include "common.fullname" $root) $key) $val) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- include "common.ca-certificates.volumeMount" . }} +{{- $probes := .Values.probes }} +{{- if .container }}{{- if .container.probes }} + {{- $probes = .container.probes }} +{{- end }}{{- end }} +{{- if .container }}{{- if .container.resources }} +resources: +{{ toYaml .container.resources | indent 2 }} +{{- end -}}{{- end -}} +{{- end -}} +{{- define "common.initContainer" -}} +{{- /* clear new line so indentation works correctly */ -}} +{{- println "" -}} +{{- include "common.util.merge" (append . "common.initContainer.tpl") | indent 8 -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata.yaml new file mode 100644 index 0000000..83c42d5 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata.yaml @@ -0,0 +1,35 @@ +{{- /* +common.metadata creates a standard metadata header. +It creates a 'metadata:' section with name and labels. +*/ -}} +{{ define "common.metadata" -}} +metadata: + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.configs" -}} +metadata: + name: {{ template "common.fullname" . }}-configs + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.secrets" -}} +metadata: + name: {{ template "common.fullname" . }}-secrets + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} + +{{ define "common.metadata.workload" -}} +metadata: + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata_annotations.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata_annotations.tpl new file mode 100644 index 0000000..ed28474 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata_annotations.tpl @@ -0,0 +1,23 @@ +{{- /* +common.hook defines a hook. + +This is to be used in a 'metadata.annotations' section. + +This should be called as 'template "common.metadata.hook" "post-install"' + +Any valid hook may be passed in. Separate multiple hooks with a ",". +*/ -}} +{{- define "common.hook" -}} +"helm.sh/hook": {{printf "%s" . | quote}} +{{- end -}} + +{{- define "common.annote" -}} +{{ $root := .root}} +{{- range $k, $v := .annotations }} +{{- if kindIs "string" $v }} +{{ $k | quote }}: {{ tpl $v $root | quote }} +{{- else -}} +{{ $k | quote }}: {{ $v }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata_labels.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata_labels.tpl new file mode 100644 index 0000000..15fe00c --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_metadata_labels.tpl @@ -0,0 +1,28 @@ +{{- /* +common.labelize takes a dict or map and generates labels. + +Values will be quoted. Keys will not. + +Example output: + + first: "Matt" + last: "Butcher" + +*/ -}} +{{- define "common.labelize" -}} +{{- range $k, $v := . }} +{{ $k }}: {{ $v | quote }} +{{- end -}} +{{- end -}} + +{{- /* +common.labels.standard prints the standard Helm labels. + +The standard labels are frequently used in metadata. +*/ -}} +{{- define "common.labels.standard" -}} +app: {{ template "common.name" . }} +chart: {{ template "common.chartref" . }} +heritage: {{ .Release.Service | quote }} +release: {{ .Release.Name | quote }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_name.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_name.tpl new file mode 100644 index 0000000..1d42fb0 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_name.tpl @@ -0,0 +1,29 @@ +{{- /* +name defines a template for the name of the chart. It should be used for the `app` label. +This is common practice in many Kubernetes manifests, and is not Helm-specific. + +The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should +not exceed 63 characters. + +Parameters: + +- .Values.nameOverride: Replaces the computed name with this given name +- .Values.namePrefix: Prefix +- .Values.global.namePrefix: Global prefix +- .Values.nameSuffix: Suffix +- .Values.global.nameSuffix: Global suffix + +The applied order is: "global prefix + prefix + name + suffix + global suffix" + +Usage: 'name: "{{- template "common.name" . -}}"' +*/ -}} +{{- define "common.name"}} + {{- $global := default (dict) .Values.global -}} + {{- $base := default .Chart.Name .Values.nameOverride -}} + {{- $gpre := default "" $global.namePrefix -}} + {{- $pre := default "" .Values.namePrefix -}} + {{- $suf := default "" .Values.nameSuffix -}} + {{- $gsuf := default "" $global.nameSuffix -}} + {{- $name := print $gpre $pre $base $suf $gsuf -}} + {{- $name | lower | trunc 54 | trimSuffix "-" -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_networkpolicy.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_networkpolicy.tpl new file mode 100644 index 0000000..21dad10 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_networkpolicy.tpl @@ -0,0 +1,32 @@ +{{- define "common.networkpolicy.podSelection" -}} + {{- $root := index . 0 -}} + {{- $value := index . 1 -}} + {{- $appRelease := $root.Release.Name }} + {{- $appNamespace := $root.Release.Namespace }} + {{- $serviceName := printf "%s" ( tpl $value $root ) | regexFind "//.*:" | trimAll ":" | trimAll "/" }} + {{- $appLabel := printf "%s" $serviceName | replace (printf ".%s.svc.cluster.local" $appNamespace) "" | replace (printf "%s-" $appRelease) "" | replace "-client" "" | replace "-master" "" }} + {{- $appPort := printf "%s" ( tpl $value $root ) | regexFind "[0-9]+" }} + {{- $additionalPodLabels := "" }} + + {{- if $root.Values.networkPolicy.podSelections }}{{- if index $root.Values.networkPolicy.podSelections $appLabel }} + {{- $appRelease = index $root.Values.networkPolicy.podSelections $appLabel "release" | default $appRelease }} + {{- $appNamespace = index $root.Values.networkPolicy.podSelections $appLabel "namespace" | default $appNamespace }} + {{- $additionalPodLabels = index $root.Values.networkPolicy.podSelections $appLabel "additionalPodLabels" | default $additionalPodLabels }} + {{- end }}{{- end }} + - to: + - podSelector: + matchLabels: + app: {{ $appLabel }} + release: {{ $appRelease | quote }} + {{- if $additionalPodLabels -}} + {{- range $key, $val := $additionalPodLabels }} + {{ $key }}: {{ $val | quote }} + {{- end}} + {{- end}} + namespaceSelector: + matchLabels: + name: {{ $appNamespace | quote }} + ports: + - protocol: TCP + port: {{ $appPort }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_networkpolicy.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_networkpolicy.yaml new file mode 100644 index 0000000..c4c46b9 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_networkpolicy.yaml @@ -0,0 +1,89 @@ +{{- define "common.networkpolicy.tpl" -}} +{{- $root := . -}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +{{ template "common.metadata" . }} +spec: + podSelector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} + policyTypes: + - Egress + - Ingress + ingress: + - {} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + {{- if .Values.networkPolicy.ipBlock }} + - to: + - ipBlock: + cidr: {{ .Values.networkPolicy.ipBlock.allowedExtCidr }} + {{- if .Values.networkPolicy.ipBlock.blockedCidrs }} + except: + {{- if .Values.networkPolicy.ipBlock.blockedCidrs.defaultBlock }} + - 100.64.0.0/10 + - 10.0.0.0/8 + - 169.254.0.0/16 + - 172.16.0.0/12 + - 192.168.0.0/16 + - 127.0.0.0/8 + {{- end }} + {{- range .Values.networkPolicy.ipBlock.blockedCidrs.additionalBlockedCidrs }} + - {{ . }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.ipBlock.allowedPorts }} + ports: + {{- range $ports := .Values.networkPolicy.ipBlock.allowedPorts }} + - port: {{ $ports.port }} + protocol: {{ $ports.protocol }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.configs.data }} + {{- if contains "Uri" $key -}} + {{ include "common.networkpolicy.podSelection" (list $root $value) }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.secrets.stringData }} + {{- if contains "Uri" $key -}} + {{ include "common.networkpolicy.podSelection" (list $root $value) }} + {{- end }} + {{- end }} + + {{- if .Values.networkPolicy.podSelections }}{{- if .Values.networkPolicy.podSelections.additionalPodSelections }} + {{- range $appLabel, $value := .Values.networkPolicy.podSelections.additionalPodSelections }} + - to: + - podSelector: + matchLabels: + app: {{ $appLabel }} + release: {{ $value.release | default $root.Release.Name | quote }} + {{- if $value.additionalPodLabels -}} + {{- range $key, $val := $value.additionalPodLabels }} + {{ $key }}: {{ $val | quote }} + {{- end}} + {{- end}} + namespaceSelector: + matchLabels: + name: {{ $value.namespace | default $root.Release.Namespace | quote }} + {{- if $value.port }} + ports: + - protocol: TCP + port: {{ $value.port }} + {{- end }} + {{- end }} + {{- end }}{{- end }} +{{- end }} + +{{- define "common.networkpolicy" -}} +{{- $top := first . -}} +{{- if and $top.Values.networkPolicy }} +{{- template "common.util.merge" (append . "common.networkpolicy.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_persistentvolumeclaim.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_persistentvolumeclaim.yaml new file mode 100644 index 0000000..4c2ed62 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_persistentvolumeclaim.yaml @@ -0,0 +1,47 @@ +{{- define "common.persistentvolumeclaim.tpl" -}} +{{- $persistence := default .Values.persistence .claim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +{{ template "common.metadata" . }} +spec: + accessModes: + - {{ $persistence.accessMode | quote }} + resources: + requests: + storage: {{ $persistence.size | quote }} +{{- if $persistence.matchLabels }} + selector: + matchLabels: +{{- include "common.labelize" $persistence.matchLabels | indent 6 -}} +{{- end -}} +{{- if $persistence.storageClass }} +{{- if (eq "-" $persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ $persistence.storageClass }}" +{{- end }} +{{- else -}} + {{- if .Values.global }} + {{- if .Values.global.persistence }} + {{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) -}} + storageClassName: "" + {{- else -}} + storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- define "common.persistentvolumeclaim" -}} +{{- $top := first . -}} +{{- if $top.Values.persistence -}}{{- if $top.Values.persistence.enabled -}}{{- if $top.Values.persistence.persistentVolumeClaim -}} + {{- if not $top.claim -}} + {{- $top = set $top "claim" $top.Values.persistence.persistentVolumeClaim.default -}} + {{- end -}} + {{- if not $top.claim.existingClaim -}} + {{- template "common.util.merge" (append . "common.persistentvolumeclaim.tpl") -}} + {{- end -}} +{{- end -}}{{- end -}}{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_persistentvolumeclaims.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_persistentvolumeclaims.yaml new file mode 100644 index 0000000..2cb894b --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_persistentvolumeclaims.yaml @@ -0,0 +1,27 @@ +{{- define "common.persistentvolumeclaims" -}} +{{- $root := . -}} +{{- if .Values.persistence -}}{{- if .Values.persistence.enabled -}} + {{- if .Values.persistence.persistentVolumeClaim -}} + {{- range $name, $claim:= .Values.persistence.persistentVolumeClaim }} + {{- if kindIs "map" $claim }} + {{- if eq $name "default" }} + {{- $root = set $root "claim" $claim -}} + {{- include "common.persistentvolumeclaim" (list $root "mychart.persistentvolumeclaim") -}} + {{- else -}} + {{- $values := set $root.Values "fullnameOverride" (printf "%s-%s" (include "common.fullname" $root) $name) -}} + {{- $root = set (set $root "claim" $claim) "Values" $values -}} + {{- include "common.persistentvolumeclaim" (list $root "mychart.persistentvolumeclaim") -}} + {{- end -}} + {{- end -}} + {{- printf "\n" -}}{{- printf "\n" -}} + {{- printf "---" -}} + {{- printf "\n" -}} + {{- $_:= unset $root.Values "fullnameOverride" -}} + {{- end -}} + {{- end -}} +{{- end -}}{{- end -}} +{{- end -}} + +## No override templates are needed for the case of defining multiple PVCs +{{- define "mychart.persistentvolumeclaim" -}} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_podSecurityPolicy.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_podSecurityPolicy.yaml new file mode 100644 index 0000000..c06f607 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_podSecurityPolicy.yaml @@ -0,0 +1,55 @@ +{{- define "common.podsecuritypolicy.tpl" -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +{{ template "common.metadata" . }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end -}} +{{- define "common.podsecuritypolicy" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}}{{- if ne ($top.Values.podSecurityPolicy | default false) false -}} +{{- template "common.util.merge" (append . "common.podsecuritypolicy.tpl") -}} +{{- end -}}{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_role.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_role.yaml new file mode 100644 index 0000000..cf1d6f6 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_role.yaml @@ -0,0 +1,23 @@ +{{- define "common.role.tpl" -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +{{ template "common.metadata" . }} +rules: +{{- if .Values.podSecurityPolicy | default false }} +- apiGroups: + - policy + resourceNames: + - {{ template "common.fullname" . }} + resources: + - podsecuritypolicies + verbs: + - use +{{- end }} +{{- end -}} +{{- define "common.role" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.role.tpl") -}} +{{- end -}} +{{- end -}} + diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_rolebinding.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_rolebinding.yaml new file mode 100644 index 0000000..021e896 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_rolebinding.yaml @@ -0,0 +1,19 @@ +{{- define "common.rolebinding.tpl" -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +{{ template "common.metadata" . }} +roleRef: + kind: Role + name: {{ template "common.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "common.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} +{{- define "common.rolebinding" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.rolebinding.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_secret.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_secret.yaml new file mode 100644 index 0000000..45ec55f --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_secret.yaml @@ -0,0 +1,45 @@ +{{- define "common.secret.tpl" -}} +apiVersion: v1 +kind: Secret +{{ template "common.metadata.secrets" . }} +type: Opaque +data: + {{- $root := . -}} + {{- if .Values.deployment -}} + {{- range $name, $container:= .Values.deployment }} + {{- if kindIs "map" $container }}{{- if $container.secrets }} + {{- range $key, $value := $container.secrets.stringData }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | b64enc }} + {{- else }} + {{ $key }}: {{ $value | b64enc }} + {{- end }} + {{- end }} + {{- range $key, $value := $container.secrets.data }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }}{{- end }} + {{- end }} + {{- end }} + {{- if .Values.secrets -}} + {{- if .Values.secrets.stringData -}} + {{- range $key, $value := .Values.secrets.stringData }} + {{- if kindIs "string" $value }} + {{ $key }}: {{ tpl ( $value ) $root | b64enc }} + {{- else }} + {{ $key }}: {{ $value | b64enc }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.secrets.data -}} + {{- range $key, $value := .Values.secrets.data }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- define "common.secret" -}} +{{- template "common.util.merge" (append . "common.secret.tpl") -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_service.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_service.yaml new file mode 100644 index 0000000..fb4a9e8 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_service.yaml @@ -0,0 +1,25 @@ +{{- define "common.service.tpl" -}} +apiVersion: v1 +kind: Service +{{ template "common.metadata" . }} + annotations: + {{- if .Values.service }}{{- if .Values.service.annotations }} + {{ include "common.annote" (dict "annotations" .Values.service.annotations "root" . ) | indent 4 }} + {{- end }}{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: {{ template "common.name" . }} + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} +{{- end -}} +{{- define "common.service" -}} +{{- $top := first . -}} +{{- if and $top.Values.service}} +{{- template "common.util.merge" (append . "common.service.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_serviceaccount.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_serviceaccount.yaml new file mode 100644 index 0000000..534a4bf --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- define "common.serviceaccount.tpl" -}} +apiVersion: v1 +kind: ServiceAccount +{{ template "common.metadata" . }} +{{- end -}} +{{- define "common.serviceaccount" -}} +{{- $top := first . -}} +{{- if ne ($top.Values.rbacEnabled | default false) false -}} +{{- template "common.util.merge" (append . "common.serviceaccount.tpl") -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_statefulset.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_statefulset.yaml new file mode 100644 index 0000000..7521b15 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_statefulset.yaml @@ -0,0 +1,44 @@ +{{- define "common.statefulset.tpl" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +apiVersion: apps/v1 +kind: statefulset +{{ template "common.metadata.workload" . }} +spec: +{{- if .Values.statefulset.replicas}} + replicas: {{ .Values.statefulset.replicas }} +{{- end}} + template: + metadata: + labels: + app: {{ template "common.name" . }} + release: {{ .Release.Name | quote }} + spec: +{{- if ne (.Values.rbacEnabled | default false) false }} + serviceAccountName: {{ template "common.fullname" . }} +{{- end }} +{{- if .Values.image }}{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }}{{- end }} + volumes: + - {{ template "common.volume.secret" (list (print (include "common.fullname" .) "-secrets") (include "common.fullname" .)) }} +{{- if .Values.persistence }} + - {{ template "common.volume.pvc" (list (include "common.fullname" .) (include "common.fullname" .) .Values.persistence) }} +{{- end }} +{{- if .Values.configs }}{{- if .Values.configs.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} +{{- end }}{{- end }} + containers: + - +{{ include "common.container.tpl" (set . "container" .Values.statefulset.container ) | indent 8 }} + selector: + matchLabels: + app: {{ template "common.name" . }} + release: {{ .Release.Name }} +{{- end -}} +{{- define "common.statefulset" -}} +{{- $top := first . -}} +{{- if and $top.Values.statefulset }} +{{- template "common.util.merge" (append . "common.statefulset.tpl") -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_storageclass.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_storageclass.yaml new file mode 100644 index 0000000..32838f8 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_storageclass.yaml @@ -0,0 +1,38 @@ +{{- define "common.storageclass.tpl" -}} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ .Values.persistence.storageClass.name | default (include "common.name" .) }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "common.labels.standard" . | indent 4 }} +provisioner: {{ .Values.persistence.storageClass.provisioner }} +{{- if .Values.persistence.storageClass.reclaimPolicy }} +reclaimPolicy: {{ .Values.persistence.storageClass.reclaimPolicy }} +{{- end }} +{{- if .Values.persistence.storageClass.volumeBindingMode }} +volumeBindingMode: {{ .Values.persistence.storageClass.volumeBindingMode }} +{{- end }} +{{- if .Values.persistence.storageClass.allowVolumeExpansion }} +allowVolumeExpansion: {{ .Values.persistence.storageClass.allowVolumeExpansion }} +{{- end }} +{{- if .Values.persistence.storageClass.parameters }} +parameters: +{{- range $key, $val := .Values.persistence.storageClass.parameters }} + {{ $key }}: {{ $val | quote }} +{{- end}} +{{- end }} +{{- if .Values.persistence.storageClass.mountOptions }} +mountOptions: +{{- range .Values.persistence.storageClass.mountOptions }} + - {{ . }} +{{- end }} +{{- end }} +{{- end -}} + +{{- define "common.storageclass" -}} +{{- $top := first . -}} +{{- if $top.Values.persistence -}}{{- if $top.Values.persistence.enabled -}}{{- if $top.Values.persistence.storageClass -}} +{{- template "common.util.merge" (append . "common.storageclass.tpl") -}} +{{- end -}}{{- end -}}{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_transformers.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_transformers.tpl new file mode 100644 index 0000000..f42e742 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_transformers.tpl @@ -0,0 +1,41 @@ +{{- define "common.transformers" -}} +{{- $fullname := include "common.fullname" . -}} +{{- $root := . -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $release := .Release.Name -}} +{{- $commonSecretList := list "mongodbUri" "redisUri" "redisPassword" -}} +{{ $secrets := .Values.secrets}} +{{- if .container }}{{- if .container.secrets }} +{{ $secrets = .container.secrets}} +{{- end -}}{{- end -}} +{{- if $secrets -}} +{{- range $key, $value := $secrets.stringData }} +{{- if has $key $commonSecretList -}}{{- if not (contains $name $release) }} +{{- $fullname = $release -}} +{{- end }}{{- end }} +- {{ template "common.envvar.value" (list (print $key "_FILE" | snakecase | upper) ( print "/run/secrets/qlik.com/" $fullname ( print "/" $key ) )) }} +{{- $fullname = include "common.fullname" $root -}} +{{- end }} +{{- range $key, $value := $secrets.data }} +{{- if has $key $commonSecretList -}}{{- if not (contains $name $release) }} +{{- $fullname = $release -}} +{{- end }}{{- end }} +- {{ template "common.envvar.value" (list (print $key "_FILE" | snakecase | upper) ( print "/run/secrets/qlik.com/" $fullname ( print "/" $key ) )) }} +{{- $fullname = include "common.fullname" $root -}} +{{- end }} +{{- end }} +{{ $configs := .Values.configs}} +{{- if .container }}{{- if .container.configs }} +{{ $configs = .container.configs}} +{{- end -}}{{- end -}} +{{- if $configs -}} +{{- range $key, $value := $configs.data }} +- {{ template "common.envvar.configmap" (list (print $key | snakecase | upper) $fullname $key ) }} +{{- end }} +{{- range $key, $value := $configs }} +{{- if ne $key "data" }} +- {{ template "common.envvar.value" (list (print $key | snakecase | upper) $value ) }} +{{- end }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_util.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_util.tpl new file mode 100644 index 0000000..6abeec0 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_util.tpl @@ -0,0 +1,15 @@ +{{- /* +common.util.merge will merge two YAML templates and output the result. + +This takes an array of three values: +- the top context +- the template name of the overrides (destination) +- the template name of the base (source) + +*/ -}} +{{- define "common.util.merge" -}} +{{- $top := first . -}} +{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}} +{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}} +{{- regexReplaceAll ".*: null|.*: nil" (toYaml (merge $overrides $tpl)) "${1}" -}} +{{- end -}} \ No newline at end of file diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/templates/_volume.tpl b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_volume.tpl new file mode 100644 index 0000000..360e239 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/templates/_volume.tpl @@ -0,0 +1,62 @@ +{{- define "common.volume.configmap" -}} + {{- $name := index . 0 -}} + {{- $configMapName := index . 1 -}} + + name: {{ $name }} + configMap: + name: {{ $configMapName }}-configs +{{- end -}} + +{{- define "common.volume.secret" -}} + {{- $name := index . 0 -}} + {{- $secretName := index . 1 -}} + + name: {{ $name }} + secret: + secretName: {{ $secretName }}-secrets +{{- end -}} + +{{- define "common.volume.pvc" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $claim := index . 2 -}} + + name: {{ $name }} + {{- if $claim }} + persistentVolumeClaim: + claimName: {{ $claim.existingClaim | default $claimName }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end -}} + +{{- define "common.volume.emptydir" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $persistence := index . 2 -}} + + {{- if $persistence.emptyDir }} + name: {{ $name }} + emptyDir: {} + {{- end -}} +{{- end -}} + +{{- define "common.volume.hostpath" -}} + {{- $name := index . 0 -}} + {{- $claimName := index . 1 -}} + {{- $persistence := index . 2 -}} + + name: {{ $name }} + hostPath: + path: {{ $persistence.path }} + type: {{ $persistence.type }} +{{- end -}} + + +{{- define "common.volume.mount" -}} +{{- $volume := index . 0 -}} +{{- $mountPath := index . 1 -}} +- name: {{ $volume }} + mountPath: {{ default "/tmp" $mountPath.mountPath | quote }} + readOnly: {{ default false $mountPath.readOnly }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/qlikcommon/values.yaml b/qliksense/charts/edge-auth/charts/qlikcommon/values.yaml new file mode 100644 index 0000000..b7cf514 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/qlikcommon/values.yaml @@ -0,0 +1,4 @@ +# Default values for commons. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value diff --git a/qliksense/charts/edge-auth/charts/redis/.helmignore b/qliksense/charts/edge-auth/charts/redis/.helmignore new file mode 100644 index 0000000..b2767ae --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/.helmignore @@ -0,0 +1,3 @@ +.git +# OWNERS file for Kubernetes +OWNERS diff --git a/qliksense/charts/edge-auth/charts/redis/Chart.yaml b/qliksense/charts/edge-auth/charts/redis/Chart.yaml new file mode 100644 index 0000000..0b1ce8a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 5.0.7 +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: http://redis.io/ +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +version: 10.5.6 diff --git a/qliksense/charts/edge-auth/charts/redis/README.md b/qliksense/charts/edge-auth/charts/redis/README.md new file mode 100644 index 0000000..72eb836 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/README.md @@ -0,0 +1,497 @@ + +# Redis + +[Redis](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +## TL;DR; + +```bash +# Testing configuration +$ helm install my-release stable/redis +``` + +```bash +# Production configuration +$ helm install my-release stable/redis --values values-production.yaml +``` + +## Introduction + +This chart bootstraps a [Redis](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release stable/redis +``` + +The command deploys Redis on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the Redis chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | Redis password (overrides `password`) | `nil` | +| `image.registry` | Redis Image registry | `docker.io` | +| `image.repository` | Redis Image name | `bitnami/redis` | +| `image.tag` | Redis Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `1` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | Redis password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common Redis node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.runAsUser` | User ID for the container (both redis master and slave pods) | `1001` | +| `securityContext.sysctls` | Set namespaced sysctls for the container (both redis master and slave pods) | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Redis exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | {} | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.podLabels` | Additional labels for Redis master pod | {} | +| `master.podAnnotations` | Additional annotations for Redis master pod | {} | +| `redisPort` | Redis port (in both master and slaves) | `6379` | +| `master.command` | Redis master entrypoint string. The command `redis-server` is executed if this is not provided. | `/run.sh` | +| `master.configmap` | Additional Redis configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of Redis commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | Redis master additional command line flags | [] | +| `master.nodeSelector` | Redis master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for Redis master pod assignment | [] | +| `master.affinity` | Affinity settings for Redis master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | Redis master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `30` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `30` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.priorityClassName` | Redis Master pod priorityClassName | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if Redis slave service type is `LoadBalancer` | `nil` | +| `slave.command` | Redis slave entrypoint array. The docker image's ENTRYPOINT is used if this is not provided. | `/run.sh` | +| `slave.configmap` | Additional Redis configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of Redis commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | Redis slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `30` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `10` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `10` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.podLabels` | Additional labels for Redis slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for Redis slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | Redis slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.priorityClassName` | Redis Slave pod priorityClassName | {} | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | Redis Sentinel port | `26379` | +| `sentinel.configmap` | Additional Redis configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for Redis read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for Redis sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for Redis read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for Redis sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if Redis sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | Redis sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | Redis Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | Redis Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + stable/redis +``` + +The above command sets the Redis server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml stable/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the Redis pod as it attempts to write to the `/bitnami` directory. Consider installing Redis with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of slaves: +```diff +- cluster.slaveCount: 2 ++ cluster.slaveCount: 3 +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a Redis master StatefulSet (only one master node allowed) and a Redis slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - Redis Master service: Points to the master, where read-write operations can be performed + - Redis Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a Redis master StatefulSet (only one master allowed) and a Redis slave StatefulSet. In this case, the pods will contain en extra container with Redis Sentinel. This container will form a cluster of Redis Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - Redis service: Exposes port 6379 for Redis read-only operations and port 26379 for accesing Redis Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for Redis you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Host Kernel Settings +Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME stable/redis +``` + +## NetworkPolicy + +To enable network policy for Redis, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to Redis. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: +* Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +* Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + + - Recommended: Create a clone of the Redis Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release stable/redis --set persistence.existingClaim= + ``` + + - Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install stable/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + + - `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) + - `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis StatefulSet before upgrading: +```bash +$ kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` +And edit the Redis slave (and metrics if enabled) deployment: +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Notable changes + +### 9.0.0 +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### 7.0.0 +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis Sentinel containers inside of the Redis Pods (feature disabled by default). In case the master crashes, a new Redis node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/qliksense/charts/edge-auth/charts/redis/ci/default-values.yaml b/qliksense/charts/edge-auth/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/qliksense/charts/edge-auth/charts/redis/ci/dev-values.yaml b/qliksense/charts/edge-auth/charts/redis/ci/dev-values.yaml new file mode 100644 index 0000000..be01913 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/ci/dev-values.yaml @@ -0,0 +1,9 @@ +master: + persistence: + enabled: false + +cluster: + enabled: true + slaveCount: 1 + +usePassword: false diff --git a/qliksense/charts/edge-auth/charts/redis/ci/extra-flags-values.yaml b/qliksense/charts/edge-auth/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/qliksense/charts/edge-auth/charts/redis/ci/insecure-sentinel-values.yaml b/qliksense/charts/edge-auth/charts/redis/ci/insecure-sentinel-values.yaml new file mode 100644 index 0000000..2e9174f --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/ci/insecure-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: false + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/edge-auth/charts/redis/ci/production-sentinel-values.yaml b/qliksense/charts/edge-auth/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..36a00e3 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,524 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/edge-auth/charts/redis/ci/production-values.yaml b/qliksense/charts/edge-auth/charts/redis/ci/production-values.yaml new file mode 100644 index 0000000..6fa9c88 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/ci/production-values.yaml @@ -0,0 +1,525 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r36 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.5-debian-9-r37 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + loadBalancerIP: + + ## Redis slave port + port: 6379 + + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.0.3-debian-9-r0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # maxmemory-policy volatile-lru + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m diff --git a/qliksense/charts/edge-auth/charts/redis/ci/redis-lib-values.yaml b/qliksense/charts/edge-auth/charts/redis/ci/redis-lib-values.yaml new file mode 100644 index 0000000..e03382b --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/ci/redis-lib-values.yaml @@ -0,0 +1,13 @@ +## Redis library image +## ref: https://hub.docker.com/r/library/redis/ +## +image: + registry: docker.io + repository: redis + tag: '5.0.5' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/edge-auth/charts/redis/ci/redisgraph-module-values.yaml b/qliksense/charts/edge-auth/charts/redis/ci/redisgraph-module-values.yaml new file mode 100644 index 0000000..8096020 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/ci/redisgraph-module-values.yaml @@ -0,0 +1,10 @@ +image: + registry: docker.io + repository: redislabs/redisgraph + tag: '1.0.0' + +master: + command: "redis-server" + +slave: + command: "redis-server" diff --git a/qliksense/charts/edge-auth/charts/redis/templates/NOTES.txt b/qliksense/charts/edge-auth/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..5b1089e --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/NOTES.txt @@ -0,0 +1,104 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis server: + +1. Run a Redis pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash + +2. Connect using the Redis CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/_helpers.tpl b/qliksense/charts/edge-auth/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..3397a7b --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/_helpers.tpl @@ -0,0 +1,355 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/configmap.yaml b/qliksense/charts/edge-auth/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..d17ec26 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{ tpl .Values.configmap . | indent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{ tpl .Values.master.configmap . | indent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{ tpl .Values.slave.configmap . | indent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{ tpl .Values.sentinel.configmap . | indent 4 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/headless-svc.yaml b/qliksense/charts/edge-auth/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..909cbce --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis +{{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel +{{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/health-configmap.yaml b/qliksense/charts/edge-auth/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..35c61b5 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,134 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_PASSWORD --no-auth-warning \ +{{- end }} + -h localhost \ + -p $REDIS_SENTINEL_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + response=$( + timeout -s 9 $1 \ + redis-cli \ +{{- if .Values.usePassword }} + -a $REDIS_MASTER_PASSWORD --no-auth-warning \ +{{- end }} + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/qliksense/charts/edge-auth/charts/redis/templates/metrics-prometheus.yaml b/qliksense/charts/edge-auth/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..3f33454 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,30 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/metrics-svc.yaml b/qliksense/charts/edge-auth/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..74f6fa8 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,30 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.service.labels -}} + {{ toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" -}} {{ if .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/networkpolicy.yaml b/qliksense/charts/edge-auth/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..da05552 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,73 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/prometheusrule.yaml b/qliksense/charts/edge-auth/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..500c3b3 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/psp.yaml b/qliksense/charts/edge-auth/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..28ae22a --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/psp.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.runAsUser }} + max: {{ .Values.securityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/redis-master-statefulset.yaml b/qliksense/charts/edge-auth/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..b61c539 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,419 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master +{{- if .Values.master.podLabels }} +{{ toYaml .Values.master.podLabels | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} +{{ toYaml .Values.master.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" + {{- end }} + {{- with .Values.master.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: "{{ .Values.master.schedulerName }}" + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: "{{ template "redis.image" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.command }} + {{ .Values.master.command }} ${ARGS[@]} + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.master.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + echo "Getting information about current running sentinels" + # Get information from existing sentinels + existing_sentinels=$(timeout -s 9 {{ .Values.sentinel.initialCheckTimeout }} redis-cli --raw -h {{ template "redis.fullname" . }} -a "$REDIS_PASSWORD" -p {{ .Values.sentinel.service.sentinelPort }} SENTINEL sentinels {{ .Values.sentinel.masterSet }}) + echo "$existing_sentinels" | awk -f /health/parse_sentinels.awk | tee -a /opt/bitnami/redis-sentinel/etc/sentinel.conf + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc/ + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.master.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel-tmp-conf + emptyDir: {} + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.master.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.master.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/redis-master-svc.yaml b/qliksense/charts/edge-auth/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..3a98e66 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,39 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{ toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{ toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/redis-role.yaml b/qliksense/charts/edge-auth/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..71f75ef --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/redis-role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{ toYaml .Values.rbac.role.rules | indent 2 }} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/redis-rolebinding.yaml b/qliksense/charts/edge-auth/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..aceb258 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/redis-serviceaccount.yaml b/qliksense/charts/edge-auth/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..f027176 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/redis-slave-statefulset.yaml b/qliksense/charts/edge-auth/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..d5a8db5 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,437 @@ +{{- if .Values.cluster.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: +{{ toYaml .Values.slave.updateStrategy | indent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} +{{ toYaml .Values.slave.podLabels | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} +{{ toYaml .Values.slave.podAnnotations | indent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- include "redis.imagePullSecrets" . | indent 6 }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if .Values.securityContext.sysctls }} + sysctls: +{{ toYaml .Values.securityContext.sysctls | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: "{{ template "redis.serviceAccountName" . }}" + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: "{{ .Values.slave.schedulerName }}" + {{- end }} + {{- with .Values.slave.affinity }} + affinity: +{{ tpl (toYaml .) $ | indent 8 }} + {{- end }} + containers: + - name: {{ template "redis.fullname" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + {{- if (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.command }} + {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + redis-server "${ARGS[@]}" + {{- end }} + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.slave.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: "{{ template "sentinel.image" . }}" + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass $REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.sentinel.resources | indent 10 }} + volumeMounts: + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.slave.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.sysctlImage.resources | indent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: +{{ toYaml .Values.sysctlImage.command | indent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: +{{ toYaml .Values.slave.persistence.matchLabels | indent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: +{{ toYaml .Values.slave.persistence.matchExpressions | indent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/redis-slave-svc.yaml b/qliksense/charts/edge-auth/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..052ecea --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{ toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: +{{ toYaml .Values.slave.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/redis-with-sentinel-svc.yaml b/qliksense/charts/edge-auth/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..5017c22 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,40 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{ toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: +{{ toYaml .Values.sentinel.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/edge-auth/charts/redis/templates/secret.yaml b/qliksense/charts/edge-auth/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..ead9c61 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/templates/secret.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/qliksense/charts/edge-auth/charts/redis/values-production.yaml b/qliksense/charts/edge-auth/charts/redis/values-production.yaml new file mode 100644 index 0000000..cae2af1 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/values-production.yaml @@ -0,0 +1,630 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/edge-auth/charts/redis/values.schema.json b/qliksense/charts/edge-auth/charts/redis/values.schema.json new file mode 100644 index 0000000..2138e45 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "condition": false, + "value": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "master.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "condition": false, + "value": "cluster.enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "slave.persistence.enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/qliksense/charts/edge-auth/charts/redis/values.yaml b/qliksense/charts/edge-auth/charts/redis/values.yaml new file mode 100644 index 0000000..2649466 --- /dev/null +++ b/qliksense/charts/edge-auth/charts/redis/values.yaml @@ -0,0 +1,631 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r32 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.7-debian-10-r27 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: {} + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + +# Redis port +redisPort: 6379 + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + # priorityClassName: {} + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.4.0-debian-10-r3 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} down + # description: Redis instance {{ "{{ $instance }}" }} is down. + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 =< 100 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} is using too much memory + # description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis instance {{ "{{ $instance }}" }} has evicted keys + # description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/qliksense/charts/edge-auth/requirements.yaml b/qliksense/charts/edge-auth/requirements.yaml new file mode 100644 index 0000000..49aa32f --- /dev/null +++ b/qliksense/charts/edge-auth/requirements.yaml @@ -0,0 +1,26 @@ +dependencies: + - name: qlikcommon + version: "1.5.0" + repository: "@qlik" + condition: global.component-common-imports + - name: nginx-ingress + repository: https://kubernetes-charts.storage.googleapis.com/ + version: ~0.11.3 + condition: nginx-ingress.enabled + - name: mongodb + version: 4.5.0 + repository: "@stable" + condition: global.component-common-imports,mongodb.enabled + - name: redis + version: 10.5.6 + repository: "@stable" + condition: global.component-common-imports,redis.enabled + - name: redis + version: 10.5.6 + repository: "@stable" + alias: edge-auth-redis + condition: edge-auth-redis.enabled + - name: messaging + version: 2.0.29 + repository: "@qlik" + condition: global.component-common-imports,messaging.enabled diff --git a/qliksense/charts/edge-auth/templates/manifest.yaml b/qliksense/charts/edge-auth/templates/manifest.yaml new file mode 100644 index 0000000..650adfd --- /dev/null +++ b/qliksense/charts/edge-auth/templates/manifest.yaml @@ -0,0 +1,136 @@ +{{- template "common.configmap" (list . "edge-auth.configmap") -}} +{{- define "edge-auth.configmap" -}} +{{- end }} + +--- +{{ template "common.secret" (list . "edge-auth.secret") -}} +{{- define "edge-auth.secret" -}} +{{- end }} + +--- +{{ template "common.ingress" (list . "edge-auth.ingress") -}} +{{- define "edge-auth.ingress" -}} +metadata: + annotations: + nginx.ingress.kubernetes.io/auth-url: nil +spec: + rules: + - http: + paths: + - path: /login + backend: + serviceName: {{ template "common.fullname" . }} + servicePort: {{ .Values.service.port }} + - path: /logout + backend: + serviceName: {{ template "common.fullname" . }} + servicePort: {{ .Values.service.port }} +{{- end }} + +--- +{{ $root := . }} +{{ $values := set $root.Values "ingress" $root.Values.apiIngress }} +{{ $root = set $root "Values" $values }} +{{- template "common.ingress" (list $root "edge-auth.apiIngress") -}} +{{- define "edge-auth.apiIngress" -}} +metadata: + name: {{ template "common.fullname" . }}-api +spec: + rules: + - http: + paths: + - path: /api/v1/diagnose-claims + backend: + serviceName: {{ template "common.fullname" . }} + servicePort: {{ .Values.service.port }} + - path: /api/v1/csrf-token + backend: + serviceName: {{ template "common.fullname" . }} + servicePort: {{ .Values.service.port }} +{{- end }} + +--- +{{ template "common.service" (list . "edge-auth.service") -}} +{{- define "edge-auth.service" -}} +spec: + ports: + - name: {{ template "common.fullname" . }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.port }} +{{- if .Values.deployment.oidc.enabled }} + - name: oidc + port: {{ .Values.deployment.oidc.configs.port }} + targetPort: {{ .Values.deployment.oidc.configs.port }} + nodePort: {{ .Values.deployment.oidc.configs.port }} + protocol: TCP +{{- end }} +{{- end }} + +--- +{{ template "common.deployment" (list . "edge-auth.deployment") -}} +{{- define "edge-auth.deployment" -}} +spec: + template: + spec: + dnsConfig: + options: + - name: timeout + value: "1" + hostAliases: + - ip: "127.0.0.1" + hostnames: + - "{{ .Values.service.podAlias }}" + containers: + - +{{ include "common.container" (list (set . "container" .Values.deployment.container) "edge-auth.deployment.main") | indent 8 }} +{{- if .Values.deployment.oidc.enabled }} + - +{{ include "common.container" (list (set . "container" .Values.deployment.oidc) "edge-auth.deployment.oidc") | indent 8 }} +{{- end }} +{{- end }} + +{{- define "edge-auth.deployment.main" -}} +{{- if .Values.deployment }}{{- if .Values.deployment.container }}{{- if .Values.deployment.container.command }} +command: ["{{ .Values.deployment.container.command }}"] +{{- end }}{{- end }}{{- end }} +livenessProbe: + httpGet: + path: /live + port: http +readinessProbe: + httpGet: + path: /ready + port: http +ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP +{{- end }} + +{{- define "edge-auth.deployment.oidc" -}} +{{- if .Values.deployment.oidc.enabled }} +name: oidc +livenessProbe: nil +readinessProbe: nil +ports: + - name: http + containerPort: {{ .Values.deployment.oidc.configs.port }} + protocol: TCP +{{- end }} +{{- end }} + +--- +{{ template "common.hpa" (list . "edge-auth.hpa") -}} +{{- define "edge-auth.hpa" -}} +{{- end }} + +--- +{{ template "common.persistentvolumeclaims" . -}} + +--- +{{ template "common.networkpolicy" (list . "edge-auth.networkpolicy") -}} +{{- define "edge-auth.networkpolicy" -}} +{{- end }} + +--- diff --git a/qliksense/charts/edge-auth/values.yaml b/qliksense/charts/edge-auth/values.yaml new file mode 100644 index 0000000..05f01ae --- /dev/null +++ b/qliksense/charts/edge-auth/values.yaml @@ -0,0 +1,250 @@ +## Default values for edge-auth Service Helm Chart. +image: + ## Default registry where this repository should be pulled from. + ## Will be overridden by `global.imageRegistry` if set + registry: ghcr.io + ## edge-auth image name. + repository: qlik-download/edge-auth + ## edge-auth image version. + ## + tag: 4.0.8 + + ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: + pullPolicy: IfNotPresent + ## Secrets for pulling images from a private docker registry. + ## + pullSecrets: + - name: artifactory-docker-secret + +configs: + environment: qseok + region: example + natsEnabled: "true" + ## When enabled edge-auth will reject non-TLS requests on it's external endpoints + enforceTLS: "true" + ## When enabled cookies can only be sent over SSL + ## An error will be thrown if you try to send secure cookies over an insecure socket. + ## + secureCookies: "true" + # Toggle for enabling Redis caching + cacheRedisEnabled: "false" + # The maximum age of internal call cache in ms, default is 600000 (10 mins) + cacheMaxAge: 7200000 + # Global setting for cache count limits + cacheMaxSize: 250 + ## The length of time in seconds that a cookie and session is valid for + sessionTTL: 1800 + ## The maximum length of time in seconds that a session can exist + sessionMaxLifetime: 86400 + ## The length of time between initiating and completing login is allowed to take + stateLifetime: "7d" + data: + ## Log level (silly|debug|verbose|info|warn|error) + logLevel: "verbose" + # Feature flags service URL + featureFlagsUri: "http://{{ .Release.Name }}-feature-flags.{{ .Release.Namespace }}.svc.cluster.local:8080" + # Users service URL + usersUri: "http://{{ .Release.Name }}-users.{{ .Release.Namespace }}.svc.cluster.local:8080" + # Groups service URL + groupsUri: "http://{{ .Release.Name }}-groups.{{ .Release.Namespace }}.svc.cluster.local:8080" + # Tenants service URL + tenantsUri: "http://{{ .Release.Name }}-tenants.{{ .Release.Namespace }}.svc.cluster.local:8080" + # Keys service URL + keysUri: "http://{{ .Release.Name }}-keys.{{ .Release.Namespace }}.svc.cluster.local:8080" + # Apikeys service URL + apiKeysUri: "http://{{ .Release.Name }}-api-keys.{{ .Release.Namespace }}.svc.cluster.local:8080" + # Identity provider service service URL + identityProvidersUri: "http://{{ .Release.Name }}-identity-providers.{{ .Release.Namespace }}.svc.cluster.local:8080" + # Identity provider Ext service service URL + identityProvidersExtUri: "http://{{ .Release.Name }}-identity-providers-ext.{{ .Release.Namespace }}.svc.cluster.local:8080" + # Address of NATS server + natsUri: nats://{{ .Release.Name }}-nats-client:4222 + # NATS Streaming cluster ID + natsStreamingClusterId: "{{ .Release.Name }}-nats-streaming-cluster" + # Ingress auth URL + ingressAuthUrl: "http://{{ .Release.Name }}-edge-auth.{{ .Release.Namespace }}.svc.cluster.local:8080/v1/auth" + ## Custom URI (port included) where Redis can be found + redisUri: "redis://{{ .Release.Name }}-redis-master:6379" + +secrets: + ## Array of strings used to create keys for signing cookies (These keys can be rotated) + ## + cookieKeys: + - "UiL6YePEcUMk2MqiGk1BuGsqMWfoHz+b1SuX13lKuvU=" + stringData: + ## JSON Value for cookieKeys - Don't alter this value but use `secrets.cookieKeys` to configure the cookie keys value - + cookiesKeys: "{{ toJson .Values.secrets.cookieKeys }}" + ## Specify a custom mongo uri. + mongodbUri: "mongodb://{{ .Release.Name }}-mongodb:27017/{{ .Release.Name }}?ssl=false" + ## The key with which to sign the state parameter (encoded in base64), must be larger than 256 bits + ## openssl rand -base64 32 + ## + loginStateKey: rLk9dMkw1nmcnRPma+0CQ8v6PBZAPxQaeenoQPJc4PI= + ## RSA or EC Private signing key used to sign internal JWTs + ## ssh-keygen -t ecdsa -b 384 -f jwtPrivateKey -N '' + ## + tokenAuthPrivateKey: | + -----BEGIN EC PRIVATE KEY----- + MIGkAgEBBDDaSeK3WLOpOG9NxvocZ5Eot/Vo7n6Q5jkkessw3Wwxw3b7xSC5dWMV + XutbEWBhZ8ygBwYFK4EEACKhZANiAARqB/ngEZXNDXKigrwm9hV4JcHU6FqbrhC9 + V7feiuqPwuvGUS6tmfJ8V1QorVD7SFf+xNWCSGl4CJzhqc7al92gFOCd1EmCbcZb + 9nz0VSNKEjnl9yCcT3Thdr75wRPZ6zo= + -----END EC PRIVATE KEY----- + +## Service configuration. +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: NodePort + port: 8080 + ## Metrics configuration + ## Prometheus configuration + ## The annotations for prometheus scraping are included + annotations: + prometheus.io/scrape: "true" + prometheus.io/scrape_high_cardinality: "true" + prometheus.io/port: "{{ .Values.service.port }}" + # Hostname Alias for the pod internal ip + podAlias: elastic.example + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + # class provides an kubernetes.io/ingress.class override of default nginx + class: "nginx" + annotations: + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/configuration-snippet: | + set $host_and_uri $http_host$request_uri; + if ( $host_and_uri ~* "^.{2049,}$" ) { return 414; } + more_clear_input_headers "X-Forwarded-Host" "X-Forwarded-For" "X-Forwarded-Proto" "X-Original-URI" "X-Original-URL"; + more_set_headers 'Access-Control-Allow-Origin: $http_origin'; + # host: elastic.example + +## Api Ingress configuration. +## +apiIngress: + ## class provides an kubernetes.io/ingress.class override of default nginx + class: "nginx" + ## Annotations to be added to the ingress. + ## + annotations: + nginx.ingress.kubernetes.io/auth-response-headers: Authorization + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/configuration-snippet: | + set $host_and_uri $http_host$request_uri; + if ( $host_and_uri ~* "^.{2049,}$" ) { return 414; } + rewrite (?i)/api/(.*) /$1 break; + more_clear_input_headers "X-Forwarded-Host" "X-Forwarded-For" "X-Forwarded-Proto" "X-Original-URI" "X-Original-URL"; + + ## Currently templates a "main" container +deployment: + ## Number of replicas. + ## + replicas: 1 + ## A simple OIDC compliant identity provider will be spun up as a sidecar to edge-auth + ## Check the reference page for the default users + ## ref: https://github.com/qlik-trial/simple-oidc-provider + oidc: + enabled: false + image: + registry: qlik + ## oidc image + repository: simple-oidc-provider + # Tag of the docker image + tag: 0.2.2 + ## oidc configs + configs: + ## oidc port + ## + port: 32123 + data: + ## whitelist for where the OIDC will allow post logout redirects to + redirects: "https://elastic.example/login/callback" + ## A url for where the OIDC will allow post logout redirects to + # postLogoutRedirects: http://elastic.example + ## oidc secrets + secrets: + data: + +## Subcharts +## MongoDB configuration +mongodb: + image: + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## This value overrides the mongo image tag in chart v.4.5.0 (tag: 4.0.3-debian-9) + tag: 3.6.12 + ## Enables a local mongo chart + enabled: false + ## disable password for local dev mode + usePassword: false + +## Redis configuration +redis: + enabled: false + ## Image pull policy for Redis chart + image: + pullPolicy: IfNotPresent + ## Disable password authentication by default (for local development for example) + usePassword: false + ## Disable master-secondary topology by default (for local development for example) + cluster: + enabled: false + ## master node configurations + master: + securityContext: + enabled: false + statefulset: + ## Updating all Pods in a StatefulSet, in reverse ordinal order, while respecting the StatefulSet guarantees + updateStrategy: RollingUpdate + slave: + securityContext: + enabled: false + ## metrics configurations + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + +## Do not enable here, for QSEoK only +edge-auth-redis: + enabled: false + +## nginx-ingress sub-chart configuration. +## +nginx-ingress: + enabled: false + +## Messaging sub-chart configuration +## (Only for purposes of CI & local-dev) +messaging: + ## Set messaging.enabled to true for localdev and CI builds + enabled: false + nats: + enabled: true + replicaCount: 1 + auth: + enabled: false + clusterAuth: + enabled: false + nats-streaming: + enabled: true + replicaCount: 3 + auth: + enabled: false + +# Included if certs are required to be mounted into the pod +certs: + mountPath: "/etc/ssl/certs" + +## Horizontal Pod Autoscaler setting +hpa: + minReplicas: 1 + maxReplicas: 1 diff --git a/qliksense/charts/elastic-infra/.helmignore b/qliksense/charts/elastic-infra/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/elastic-infra/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/elastic-infra/Chart.yaml b/qliksense/charts/elastic-infra/Chart.yaml new file mode 100644 index 0000000..f0a5ffb --- /dev/null +++ b/qliksense/charts/elastic-infra/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: Base infrastructure resources for elastic +home: https://www.qlik.com +name: elastic-infra +sources: +- https://github.com/qlik-trial/elastic-infra +version: 3.0.5 diff --git a/qliksense/charts/elastic-infra/README.md b/qliksense/charts/elastic-infra/README.md new file mode 100644 index 0000000..b358f2a --- /dev/null +++ b/qliksense/charts/elastic-infra/README.md @@ -0,0 +1,61 @@ +# elastic-infra + +## Introduction + +This chart bootstraps a elastic-infra deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. What this does is stand up the "base" resources needed to connect all the dots and create a functioning Elastic environment. + +Today, this includes: + +1. `nginx-ingress` so we can do routing and exposure of the environment +1. `traefik` so services in the environment can be exposed at `/api` and internally for service to service calls +2. An ingress rule for `/api` to ensure all requests that get that far are authenticated and proxied to `traefik` to be then proxied to the backend service responsible for the resource. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/elastic-infra +``` + +The command deploys elastic-infra on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the elastic-infra chart and their default values. + +| Parameter | Description | Default | +|-------------------------|-------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------| +| `config.qixSessionsURI` | overrides the generated qix-sessions URI for engine load balancing rules | `http://{.Release.Name}-qix-sessions.{.Release.Namespace}.svc.cluster.local:8080/engine-selection` | +| `config.dataprepURI` | overrides the generated data-prep URI for session stickiness rules | `http://{.Release.Name}-data-prep.{.Release.Namespace}.svc.cluster.local:9072/session/route` | +| `ingress.annotations` | elastic-infra additional annotations | `` | +| `ingress.host` | host for ingress | `nil` | +| `ingress.tls` | ingress TLS configuration | `nil` | +| `ingress.class` | the `kubernetes.io/ingress.class` to use | `nginx` | +| `ingress.authURL` | The URL to use for nginx's `auth-url` configuration to authenticate `/api` requests | `http://{.Release.Name}-edge-auth.{.Release.Namespace}.svc.cluster.local:8080/v1/auth` | +| `ingress.forceHttps` | forces all HTTP requests to HTTPS if set to true | `false` | +| `nginx-ingress.enabled` | whether nginx-ingress dependency is enabled | `true` | +| `mongodb.enabled` | whether mongodb dependency is enabled | `true` | +| `mongodb.password` | the mongodb password | `""` | +| `tlsCert.fqdn` | creates certificate resource for the fqdn | `` | +| `tlsCert.fqdnList` | creates certificate resource for the list of fqdn's, overrides `tlsCert.fqdn` | `` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install --name my-release -f values.yaml qlik/elastic-infra +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/qliksense/charts/elastic-infra/charts/mongodb/.helmignore b/qliksense/charts/elastic-infra/charts/mongodb/.helmignore new file mode 100644 index 0000000..6b8710a --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/.helmignore @@ -0,0 +1 @@ +.git diff --git a/qliksense/charts/elastic-infra/charts/mongodb/Chart.yaml b/qliksense/charts/elastic-infra/charts/mongodb/Chart.yaml new file mode 100644 index 0000000..cc8038a --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 4.0.3 +description: NoSQL document-oriented database that stores JSON-like documents with + dynamic schemas, simplifying the integration of data in content-driven applications. +home: https://mongodb.org +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mongodb +sources: +- https://github.com/bitnami/bitnami-docker-mongodb +version: 4.5.0 diff --git a/qliksense/charts/elastic-infra/charts/mongodb/OWNERS b/qliksense/charts/elastic-infra/charts/mongodb/OWNERS new file mode 100644 index 0000000..2c3e9fa --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/OWNERS @@ -0,0 +1,12 @@ +approvers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 +reviewers: +- prydonius +- tompizmor +- sameersbn +- carrodher +- juan131 diff --git a/qliksense/charts/elastic-infra/charts/mongodb/README.md b/qliksense/charts/elastic-infra/charts/mongodb/README.md new file mode 100644 index 0000000..1b9d003 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/README.md @@ -0,0 +1,158 @@ +# MongoDB + +[MongoDB](https://www.mongodb.com/) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster. + +## TL;DR; + +```bash +$ helm install stable/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/mongodb +``` + +The command deploys MongoDB on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the MongoDB chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------|----------------------------------------------------------------------------------------------|----------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | MongoDB image registry | `docker.io` | +| `image.repository` | MongoDB Image name | `bitnami/mongodb` | +| `image.tag` | MongoDB Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `usePassword` | Enable password authentication | `true` | +| `existingSecret` | Existing secret with MongoDB credentials | `nil` | +| `mongodbRootPassword` | MongoDB admin password | `random alhpanumeric string (10)` | +| `mongodbUsername` | MongoDB custom user | `nil` | +| `mongodbPassword` | MongoDB custom user password | `random alhpanumeric string (10)` | +| `mongodbDatabase` | Database to create | `nil` | +| `mongodbEnableIPv6` | Switch to enable/disable IPv6 on MongoDB | `true` | +| `mongodbExtraFlags` | MongoDB additional command line flags | [] | +| `service.annotations` | Kubernetes service annotations | `{}` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.nodePort` | Port to bind to for NodePort service type | `nil` | +| `port` | MongoDB service port | `27017` | +| `replicaSet.enabled` | Switch to enable/disable replica set configuration | `false` | +| `replicaSet.name` | Name of the replica set | `rs0` | +| `replicaSet.useHostnames` | Enable DNS hostnames in the replica set config | `true` | +| `replicaSet.key` | Key used for authentication in the replica set | `nil` | +| `replicaSet.replicas.secondary` | Number of secondary nodes in the replica set | `1` | +| `replicaSet.replicas.arbiter` | Number of arbiter nodes in the replica set | `1` | +| `replicaSet.pdb.minAvailable.primary` | PDB for the MongoDB Primary nodes | `1` | +| `replicaSet.pdb.minAvailable.secondary` | PDB for the MongoDB Secondary nodes | `1` | +| `replicaSet.pdb.minAvailable.arbiter` | PDB for the MongoDB Arbiter nodes | `1` | +| `podAnnotations` | Annotations to be added to pods | {} | +| `resources` | Pod resources | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | {} | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.annotations` | Persistent Volume annotations | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (avoids creating one if this is given) | `nil` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `configmap` | MongoDB configuration file to be used | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set mongodbRootPassword=secretpassword,mongodbUsername=my-user,mongodbPassword=my-password,mongodbDatabase=my-database \ + stable/mongodb +``` + +The above command sets the MongoDB `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/mongodb +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Replication + +You can start the MongoDB chart in replica set mode with the following command: + +```bash +$ helm install --name my-release stable/mongodb --set replication.enabled=true +``` +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available MongoDB deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/mongodb/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/mongodb +``` + +To horizontally scale this chart, run the following command to scale the number of secondary nodes in your MongoDB replica set. + +```console +$ kubectl scale statefulset my-release-mongodb-secondary --replicas=3 +``` + +Some characteristics of this chart are: + +* Each of the participants in the replication has a fixed stateful set so you always know where to find the primary, secondary or arbiter nodes. +* The number of secondary and arbiter nodes can be scaled out independently. +* Easy to move an application from using a standalone MongoDB server to use a replica set. + +## Initialize a fresh instance + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +The allowed extensions are `.sh`, and `.js`. + +## Persistence + +The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. diff --git a/qliksense/charts/elastic-infra/charts/mongodb/files/docker-entrypoint-initdb.d/README.md b/qliksense/charts/elastic-infra/charts/mongodb/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..a929990 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom .sh, or .js file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-mongodb](https://github.com/bitnami/bitnami-docker-mongodb#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/NOTES.txt b/qliksense/charts/elastic-infra/charts/mongodb/templates/NOTES.txt new file mode 100644 index 0000000..af81001 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/NOTES.txt @@ -0,0 +1,66 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.mongodbRootPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer" and not specifying "mongodbRootPassword" + you have most likely exposed the MongoDB service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "mongodbRootPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB can be accessed via port 27017 on the following DNS name from within your cluster: + + {{ template "mongodb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.usePassword -}} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode) + +{{- end }} +{{- if and .Values.mongodbUsername .Values.mongodbDatabase }} +{{- if .Values.mongodbPassword }} + +To get the password for "{{ .Values.mongodbUsername }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} -o jsonpath="{.data.mongodb-password}" | base64 --decode) + +{{- end }} +{{- end }} + +To connect to your database run the following command: + + kubectl run --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --image bitnami/mongodb --command -- mongo admin --host {{ template "mongodb.fullname" . }} {{- if .Values.usePassword }} -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.fullname" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "mongodb.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mongodb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + mongo --host $SERVICE_IP --port {{ .Values.service.nodePort }} {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "mongodb.fullname" . }} 27017:27017 & + mongo --host 127.0.0.1 {{- if .Values.usePassword }} -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/_helpers.tpl b/qliksense/charts/elastic-infra/charts/mongodb/templates/_helpers.tpl new file mode 100644 index 0000000..855dc29 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "mongodb.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name for the admin secret. +*/}} +{{- define "mongodb.adminSecret" -}} + {{- if .Values.auth.existingAdminSecret -}} + {{- .Values.auth.existingAdminSecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-admin + {{- end -}} +{{- end -}} + +{{/* +Create the name for the key secret. +*/}} +{{- define "mongodb.keySecret" -}} + {{- if .Values.auth.existingKeySecret -}} + {{- .Values.auth.existingKeySecret -}} + {{- else -}} + {{- template "mongodb.fullname" . -}}-keyfile + {{- end -}} +{{- end -}} + +{{/* +Return the proper MongoDB image name +*/}} +{{- define "mongodb.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/configmap.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/configmap.yaml new file mode 100644 index 0000000..66dc853 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +data: + mongodb.conf: |- +{{ toYaml .Values.configmap | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/deployment-standalone.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/deployment-standalone.yaml new file mode 100644 index 0000000..d8ff01b --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/deployment-standalone.yaml @@ -0,0 +1,143 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + chart: {{ template "mongodb.chart" . }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end -}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.fullname" . }} + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + {{- if .Values.usePassword }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + {{- end }} + - name: MONGODB_USERNAME + value: {{ default "" .Values.mongodbUsername | quote }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_DATABASE + value: {{ default "" .Values.mongodbDatabase | quote }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + ports: + - name: mongodb + containerPort: 27017 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + + {{- else }} + emptyDir: {} + {{- end -}} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} + {{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/headless-svc-rs.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/headless-svc-rs.yaml new file mode 100644 index 0000000..29fcf34 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/headless-svc-rs.yaml @@ -0,0 +1,24 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }}-headless + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: mongodb + port: {{ .Values.service.port }} + selector: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/initialization-configmap.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/initialization-configmap.yaml new file mode 100644 index 0000000..840e77c --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/initialization-configmap.yaml @@ -0,0 +1,13 @@ +{{ if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "mongodb.fullname" . }}-init-scripts + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]").AsConfig | indent 2 }} +{{ end }} \ No newline at end of file diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml new file mode 100644 index 0000000..eb7f14a --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-arbiter-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: arbiter + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.arbiter }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml new file mode 100644 index 0000000..6434e3f --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-primary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: primary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.primary }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml new file mode 100644 index 0000000..03f317d --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/poddisruptionbudget-secondary-rs.yaml @@ -0,0 +1,18 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + component: secondary + minAvailable: {{ .Values.replicaSet.pdb.minAvailable.secondary }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/pvc-standalone.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/pvc-standalone.yaml new file mode 100644 index 0000000..8182ce7 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/pvc-standalone.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.replicaSet.enabled) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/secrets.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/secrets.yaml new file mode 100644 index 0000000..ecbf1eb --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/secrets.yaml @@ -0,0 +1,34 @@ +{{ if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{- if .Values.usePassword }} + {{- if .Values.mongodbRootPassword }} + mongodb-root-password: {{ .Values.mongodbRootPassword | b64enc | quote }} + {{- else }} + mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if and .Values.mongodbUsername .Values.mongodbDatabase }} + {{- if .Values.mongodbPassword }} + mongodb-password: {{ .Values.mongodbPassword | b64enc | quote }} + {{- else }} + mongodb-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if .Values.replicaSet.enabled }} + {{- if .Values.replicaSet.key }} + mongodb-replica-set-key: {{ .Values.replicaSet.key | b64enc | quote }} + {{- else }} + mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-arbiter-rs.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-arbiter-rs.yaml new file mode 100644 index 0000000..4ed30a1 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-arbiter-rs.yaml @@ -0,0 +1,121 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-arbiter +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: arbiter + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.arbiter }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: arbiter + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-arbiter + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: mongodb + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-primary-rs.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-primary-rs.yaml new file mode 100644 index 0000000..8dcb004 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-primary-rs.yaml @@ -0,0 +1,174 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-primary +spec: + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: 1 + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: primary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-primary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "primary" + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + - name: MONGODB_USERNAME + value: {{ .Values.mongodbUsername | quote }} + - name: MONGODB_DATABASE + value: {{ .Values.mongodbDatabase | quote }} + {{- if .Values.usePassword }} + {{- if .Values.mongodbPassword }} + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-password + {{- end }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if (.Files.Glob "files/docker-entrypoint-initdb.d/*[sh|js]") }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.fullname" . }}-init-scripts + {{- end }} + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-secondary-rs.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-secondary-rs.yaml new file mode 100644 index 0000000..d4c4a97 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/statefulset-secondary-rs.yaml @@ -0,0 +1,157 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "mongodb.fullname" . }}-secondary +spec: + selector: + matchLabels: + app: {{ template "mongodb.name" . }} + release: {{ .Release.Name }} + chart: {{ template "mongodb.chart" . }} + component: secondary + podManagementPolicy: "Parallel" + serviceName: {{ template "mongodb.fullname" . }}-headless + replicas: {{ .Values.replicaSet.replicas.secondary }} + template: + metadata: + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: {{ .Release.Name }} + component: secondary + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ template "mongodb.name" . }}-secondary + image: {{ template "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: mongodb + env: + - name: MONGODB_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MONGODB_REPLICA_SET_MODE + value: "secondary" + - name: MONGODB_PRIMARY_HOST + value: {{ template "mongodb.fullname" . }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSet.name | quote }} + {{- if .Values.replicaSet.useHostnames }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MONGODB_POD_NAME).{{ template "mongodb.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- if .Values.usePassword }} + - name: MONGODB_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{- else }}{{ template "mongodb.fullname" . }}{{- end }} + key: mongodb-replica-set-key + {{- end }} + - name: MONGODB_ENABLE_IPV6 + {{- if .Values.mongodbEnableIPv6 }} + value: "yes" + {{- else }} + value: "no" + {{- end }} + - name: MONGODB_EXTRA_FLAGS + value: {{ default "" .Values.mongodbExtraFlags | join " " }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - mongo + - --eval + - "db.adminCommand('ping')" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: /bitnami/mongodb + {{- if .Values.configmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + volumes: + {{- if .Values.configmap }} + - name: config + configMap: + name: {{ template "mongodb.fullname" . }} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: datadir + emptyDir: {} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/svc-primary-rs.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/svc-primary-rs.yaml new file mode 100644 index 0000000..fd440c8 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/svc-primary-rs.yaml @@ -0,0 +1,28 @@ +{{- if .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" + component: primary +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/mongodb/templates/svc-standalone.yaml b/qliksense/charts/elastic-infra/charts/mongodb/templates/svc-standalone.yaml new file mode 100644 index 0000000..4ca9443 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/templates/svc-standalone.yaml @@ -0,0 +1,27 @@ +{{- if not .Values.replicaSet.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "mongodb.fullname" . }} + labels: + app: {{ template "mongodb.name" . }} + chart: {{ template "mongodb.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: mongodb + port: 27017 + targetPort: mongodb +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + selector: + app: {{ template "mongodb.name" . }} + release: "{{ .Release.Name }}" +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/mongodb/values-production.yaml b/qliksense/charts/elastic-infra/charts/mongodb/values-production.yaml new file mode 100644 index 0000000..9070f3b --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/values-production.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: true + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# replication: +# replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/elastic-infra/charts/mongodb/values.yaml b/qliksense/charts/elastic-infra/charts/mongodb/values.yaml new file mode 100644 index 0000000..4b090d4 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/mongodb/values.yaml @@ -0,0 +1,213 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.0.3-debian-9 + + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +# mongodbRootPassword: + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +# mongodbUsername: username +# mongodbPassword: password +# mongodbDatabase: database + + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: true + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + annotations: {} + type: ClusterIP + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Entries for the MongoDB config file +configmap: +# # Where and how to store data. +# storage: +# dbPath: /opt/bitnami/mongodb/data/db +# journal: +# enabled: true +# #engine: +# #wiredTiger: +# # where to write logging data. +# systemLog: +# destination: file +# logAppend: true +# path: /opt/bitnami/mongodb/logs/mongodb.log +# # network interfaces +# net: +# port: 27017 +# bindIp: 0.0.0.0 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# # replica set options +# #replication: +# # replSetName: replicaset +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: enabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/.helmignore b/qliksense/charts/elastic-infra/charts/nginx-ingress/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/Chart.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/Chart.yaml new file mode 100644 index 0000000..32648e7 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +appVersion: 0.30.0 +description: An nginx Ingress controller that uses ConfigMap to store the nginx configuration. +home: https://github.com/kubernetes/ingress-nginx +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +kubeVersion: '>=1.10.0-0' +name: nginx-ingress +sources: +- https://github.com/kubernetes/ingress-nginx +version: 1.36.2 diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/README.md b/qliksense/charts/elastic-infra/charts/nginx-ingress/README.md new file mode 100644 index 0000000..87dfdb4 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/README.md @@ -0,0 +1,361 @@ +# nginx-ingress + +[nginx-ingress](https://github.com/kubernetes/ingress-nginx) is an Ingress controller that uses ConfigMap to store the nginx configuration. + +To use, add the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +## TL;DR; + +```console +$ helm install stable/nginx-ingress +``` + +## Introduction + +This chart bootstraps an nginx-ingress deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + + - Kubernetes 1.6+ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/nginx-ingress +``` + +The command deploys nginx-ingress on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the nginx-ingress chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`controller.name` | name of the controller component | `controller` +`controller.image.repository` | controller container image repository | `quay.io/kubernetes-ingress-controller/nginx-ingress-controller` +`controller.image.tag` | controller container image tag | `0.30.0` +`controller.image.pullPolicy` | controller container image pull policy | `IfNotPresent` +`controller.image.runAsUser` | User ID of the controller process. Value depends on the Linux distribution used inside of the container image. | `101` +`controller.useComponentLabel` | Wether to add component label so the HPA can work separately for controller and defaultBackend. *Note: don't change this if you have an already running deployment as it will need the recreation of the controller deployment* | `false` +`controller.containerPort.http` | The port that the controller container listens on for http connections. | `80` +`controller.containerPort.https` | The port that the controller container listens on for https connections. | `443` +`controller.config` | nginx [ConfigMap](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md) entries | none +`controller.hostNetwork` | If the nginx deployment / daemonset should run on the host's network namespace. Do not set this when `controller.service.externalIPs` is set and `kube-proxy` is used as there will be a port-conflict for port `80` | false +`controller.defaultBackendService` | default 404 backend service; needed only if `defaultBackend.enabled = false` and version < 0.21.0| `""` +`controller.dnsPolicy` | If using `hostNetwork=true`, change to `ClusterFirstWithHostNet`. See [pod's dns policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) for details | `ClusterFirst` +`controller.dnsConfig` | custom pod dnsConfig. See [pod's dns config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-config) for details | `{}` +`controller.reportNodeInternalIp` | If using `hostNetwork=true`, setting `reportNodeInternalIp=true`, will pass the flag `report-node-internal-ip-address` to nginx-ingress. This sets the status of all Ingress objects to the internal IP address of all nodes running the NGINX Ingress controller. +`controller.electionID` | election ID to use for the status update | `ingress-controller-leader` +`controller.extraEnvs` | any additional environment variables to set in the pods | `{}` +`controller.extraContainers` | Sidecar containers to add to the controller pod. See [LemonLDAP::NG controller](https://github.com/lemonldap-ng-controller/lemonldap-ng-controller) as example | `{}` +`controller.extraVolumeMounts` | Additional volumeMounts to the controller main container | `{}` +`controller.extraVolumes` | Additional volumes to the controller pod | `{}` +`controller.extraInitContainers` | Containers, which are run before the app containers are started | `[]` +`controller.ingressClass` | name of the ingress class to route through this controller | `nginx` +`controller.maxmindLicenseKey` | Maxmind license key to download GeoLite2 Databases. See [Accessing and using GeoLite2 database](https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases/) | `""` +`controller.scope.enabled` | limit the scope of the ingress controller | `false` (watch all namespaces) +`controller.scope.namespace` | namespace to watch for ingress | `""` (use the release namespace) +`controller.extraArgs` | Additional controller container arguments | `{}` +`controller.kind` | install as Deployment, DaemonSet or Both | `Deployment` +`controller.deploymentAnnotations` | annotations to be added to deployment | `{}` +`controller.autoscaling.enabled` | If true, creates Horizontal Pod Autoscaler | false +`controller.autoscaling.minReplicas` | If autoscaling enabled, this field sets minimum replica count | `2` +`controller.autoscaling.maxReplicas` | If autoscaling enabled, this field sets maximum replica count | `11` +`controller.autoscaling.targetCPUUtilizationPercentage` | Target CPU utilization percentage to scale | `"50"` +`controller.autoscaling.targetMemoryUtilizationPercentage` | Target memory utilization percentage to scale | `"50"` +`controller.daemonset.useHostPort` | If `controller.kind` is `DaemonSet`, this will enable `hostPort` for TCP/80 and TCP/443 | false +`controller.daemonset.hostPorts.http` | If `controller.daemonset.useHostPort` is `true` and this is non-empty, it sets the hostPort | `"80"` +`controller.daemonset.hostPorts.https` | If `controller.daemonset.useHostPort` is `true` and this is non-empty, it sets the hostPort | `"443"` +`controller.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`controller.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`controller.terminationGracePeriodSeconds` | how many seconds to wait before terminating a pod | `60` +`controller.minReadySeconds` | how many seconds a pod needs to be ready before killing the next, during update | `0` +`controller.nodeSelector` | node labels for pod assignment | `{}` +`controller.podAnnotations` | annotations to be added to pods | `{}` +`controller.deploymentLabels` | labels to add to the deployment metadata | `{}` +`controller.podLabels` | labels to add to the pod container metadata | `{}` +`controller.podSecurityContext` | Security context policies to add to the controller pod | `{}` +`controller.replicaCount` | desired number of controller pods | `1` +`controller.minAvailable` | minimum number of available controller pods for PodDisruptionBudget | `1` +`controller.resources` | controller pod resource requests & limits | `{}` +`controller.priorityClassName` | controller priorityClassName | `nil` +`controller.lifecycle` | controller pod lifecycle hooks | `{}` +`controller.service.annotations` | annotations for controller service | `{}` +`controller.service.labels` | labels for controller service | `{}` +`controller.publishService.enabled` | if true, the controller will set the endpoint records on the ingress objects to reflect those on the service | `false` +`controller.publishService.pathOverride` | override of the default publish-service name | `""` +`controller.service.enabled` | if disabled no service will be created. This is especially useful when `controller.kind` is set to `DaemonSet` and `controller.daemonset.useHostPorts` is `true` | true +`controller.service.clusterIP` | internal controller cluster service IP (set to `"-"` to pass an empty value) | `nil` +`controller.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the controller service | `false` +`controller.service.externalIPs` | controller service external IP addresses. Do not set this when `controller.hostNetwork` is set to `true` and `kube-proxy` is used as there will be a port-conflict for port `80` | `[]` +`controller.service.externalTrafficPolicy` | If `controller.service.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable [source IP preservation](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport) | `"Cluster"` +`controller.service.sessionAffinity` | Enables client IP based session affinity. Must be `ClientIP` or `None` if set. | `""` +`controller.service.healthCheckNodePort` | If `controller.service.type` is `NodePort` or `LoadBalancer` and `controller.service.externalTrafficPolicy` is set to `Local`, set this to [the managed health-check port the kube-proxy will expose](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport). If blank, a random port in the `NodePort` range will be assigned | `""` +`controller.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.service.enableHttp` | if port 80 should be opened for service | `true` +`controller.service.enableHttps` | if port 443 should be opened for service | `true` +`controller.service.targetPorts.http` | Sets the targetPort that maps to the Ingress' port 80 | `80` +`controller.service.targetPorts.https` | Sets the targetPort that maps to the Ingress' port 443 | `443` +`controller.service.ports.http` | Sets service http port | `80` +`controller.service.ports.https` | Sets service https port | `443` +`controller.service.type` | type of controller service to create | `LoadBalancer` +`controller.service.nodePorts.http` | If `controller.service.type` is either `NodePort` or `LoadBalancer` and this is non-empty, it sets the nodePort that maps to the Ingress' port 80 | `""` +`controller.service.nodePorts.https` | If `controller.service.type` is either `NodePort` or `LoadBalancer` and this is non-empty, it sets the nodePort that maps to the Ingress' port 443 | `""` +`controller.service.nodePorts.tcp` | Sets the nodePort for an entry referenced by its key from `tcp` | `{}` +`controller.service.nodePorts.udp` | Sets the nodePort for an entry referenced by its key from `udp` | `{}` +`controller.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 10 +`controller.livenessProbe.periodSeconds` | How often to perform the probe | 10 +`controller.livenessProbe.timeoutSeconds` | When the probe times out | 5 +`controller.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`controller.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`controller.livenessProbe.port` | The port number that the liveness probe will listen on. | 10254 +`controller.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 10 +`controller.readinessProbe.periodSeconds` | How often to perform the probe | 10 +`controller.readinessProbe.timeoutSeconds` | When the probe times out | 1 +`controller.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`controller.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`controller.readinessProbe.port` | The port number that the readiness probe will listen on. | 10254 +`controller.metrics.enabled` | if `true`, enable Prometheus metrics | `false` +`controller.metrics.service.annotations` | annotations for Prometheus metrics service | `{}` +`controller.metrics.service.clusterIP` | cluster IP address to assign to service (set to `"-"` to pass an empty value) | `nil` +`controller.metrics.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the metrics service | `false` +`controller.metrics.service.externalIPs` | Prometheus metrics service external IP addresses | `[]` +`controller.metrics.service.labels` | labels for metrics service | `{}` +`controller.metrics.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.metrics.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.metrics.service.servicePort` | Prometheus metrics service port | `9913` +`controller.metrics.service.type` | type of Prometheus metrics service to create | `ClusterIP` +`controller.metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` +`controller.metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` +`controller.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels. | `false` +`controller.metrics.serviceMonitor.namespace` | namespace where servicemonitor resource should be created | `the same namespace as nginx ingress` +`controller.metrics.serviceMonitor.namespaceSelector` | [namespaceSelector](https://github.com/coreos/prometheus-operator/blob/v0.34.0/Documentation/api.md#namespaceselector) to configure what namespaces to scrape | `will scrape the helm release namespace only` +`controller.metrics.serviceMonitor.scrapeInterval` | interval between Prometheus scraping | `30s` +`controller.metrics.prometheusRule.enabled` | Set this to `true` to create prometheusRules for Prometheus operator | `false` +`controller.metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` +`controller.metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `the same namespace as nginx ingress` +`controller.metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be prometheus in YAML format, check values for an example. | `[]` +`controller.admissionWebhooks.enabled` | Create Ingress admission webhooks. Validating webhook will check the ingress syntax. | `false` +`controller.admissionWebhooks.failurePolicy` | Failure policy for admission webhooks | `Fail` +`controller.admissionWebhooks.port` | Admission webhook port | `8080` +`controller.admissionWebhooks.service.annotations` | Annotations for admission webhook service | `{}` +`controller.admissionWebhooks.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the admission webhook service | `false` +`controller.admissionWebhooks.service.clusterIP` | cluster IP address to assign to admission webhook service (set to `"-"` to pass an empty value) | `nil` +`controller.admissionWebhooks.service.externalIPs` | Admission webhook service external IP addresses | `[]` +`controller.admissionWebhooks.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.admissionWebhooks.service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.admissionWebhooks.service.servicePort` | Admission webhook service port | `443` +`controller.admissionWebhooks.service.type` | Type of admission webhook service to create | `ClusterIP` +`controller.admissionWebhooks.patch.enabled` | If true, will use a pre and post install hooks to generate a CA and certificate to use for validating webhook endpoint, and patch the created webhooks with the CA. | `true` +`controller.admissionWebhooks.patch.image.repository` | Repository to use for the webhook integration jobs | `jettech/kube-webhook-certgen` +`controller.admissionWebhooks.patch.image.tag` | Tag to use for the webhook integration jobs | `v1.0.0` +`controller.admissionWebhooks.patch.image.pullPolicy` | Image pull policy for the webhook integration jobs | `IfNotPresent` +`controller.admissionWebhooks.patch.priorityClassName` | Priority class for the webhook integration jobs | `""` +`controller.admissionWebhooks.patch.podAnnotations` | Annotations for the webhook job pods | `{}` +`controller.admissionWebhooks.patch.nodeSelector` | Node selector for running admission hook patch jobs | `{}` +`controller.customTemplate.configMapName` | configMap containing a custom nginx template | `""` +`controller.customTemplate.configMapKey` | configMap key containing the nginx template | `""` +`controller.addHeaders` | configMap key:value pairs containing [custom headers](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers) added before sending response to the client | `{}` +`controller.proxySetHeaders` | configMap key:value pairs containing [custom headers](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#proxy-set-headers) added before sending request to the backends| `{}` +`controller.headers` | DEPRECATED, Use `controller.proxySetHeaders` instead. | `{}` +`controller.updateStrategy` | allows setting of RollingUpdate strategy | `{}` +`controller.configMapNamespace` | The nginx-configmap namespace name | `""` +`controller.tcp.configMapNamespace` | The tcp-services-configmap namespace name | `""` +`controller.udp.configMapNamespace` | The udp-services-configmap namespace name | `""` +`defaultBackend.enabled` | Use default backend component | `true` +`defaultBackend.name` | name of the default backend component | `default-backend` +`defaultBackend.image.repository` | default backend container image repository | `k8s.gcr.io/defaultbackend-amd64` +`defaultBackend.image.tag` | default backend container image tag | `1.5` +`defaultBackend.image.pullPolicy` | default backend container image pull policy | `IfNotPresent` +`defaultBackend.image.runAsUser` | User ID of the controller process. Value depends on the Linux distribution used inside of the container image. By default uses nobody user. | `65534` +`defaultBackend.useComponentLabel` | Whether to add component label so the HPA can work separately for controller and defaultBackend. *Note: don't change this if you have an already running deployment as it will need the recreation of the defaultBackend deployment* | `false` +`defaultBackend.extraArgs` | Additional default backend container arguments | `{}` +`defaultBackend.extraEnvs` | any additional environment variables to set in the defaultBackend pods | `[]` +`defaultBackend.port` | Http port number | `8080` +`defaultBackend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 +`defaultBackend.livenessProbe.periodSeconds` | How often to perform the probe | 10 +`defaultBackend.livenessProbe.timeoutSeconds` | When the probe times out | 5 +`defaultBackend.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`defaultBackend.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`defaultBackend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 0 +`defaultBackend.readinessProbe.periodSeconds` | How often to perform the probe | 5 +`defaultBackend.readinessProbe.timeoutSeconds` | When the probe times out | 5 +`defaultBackend.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`defaultBackend.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 +`defaultBackend.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`defaultBackend.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`defaultBackend.nodeSelector` | node labels for pod assignment | `{}` +`defaultBackend.podAnnotations` | annotations to be added to pods | `{}` +`defaultBackend.deploymentLabels` | labels to add to the deployment metadata | `{}` +`defaultBackend.podLabels` | labels to add to the pod container metadata | `{}` +`defaultBackend.replicaCount` | desired number of default backend pods | `1` +`defaultBackend.minAvailable` | minimum number of available default backend pods for PodDisruptionBudget | `1` +`defaultBackend.resources` | default backend pod resource requests & limits | `{}` +`defaultBackend.priorityClassName` | default backend priorityClassName | `nil` +`defaultBackend.podSecurityContext` | Security context policies to add to the default backend | `{}` +`defaultBackend.service.annotations` | annotations for default backend service | `{}` +`defaultBackend.service.clusterIP` | internal default backend cluster service IP (set to `"-"` to pass an empty value) | `nil` +`defaultBackend.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the default backend service | `false` +`defaultBackend.service.externalIPs` | default backend service external IP addresses | `[]` +`defaultBackend.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`defaultBackend.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`defaultBackend.service.type` | type of default backend service to create | `ClusterIP` +`defaultBackend.serviceAccount.create` | if `true`, create a backend service account. Only useful if you need a pod security policy to run the backend. | `true` +`defaultBackend.serviceAccount.name` | The name of the backend service account to use. If not set and `create` is `true`, a name is generated using the fullname template. Only useful if you need a pod security policy to run the backend. | `` +`imagePullSecrets` | name of Secret resource containing private registry credentials | `nil` +`rbac.create` | if `true`, create & use RBAC resources | `true` +`rbac.scope` | if `true`, do not create & use clusterrole and -binding. Set to `true` in combination with `controller.scope.enabled=true` to disable load-balancer status updates and scope the ingress entirely. | `false` +`podSecurityPolicy.enabled` | if `true`, create & use Pod Security Policy resources | `false` +`serviceAccount.create` | if `true`, create a service account for the controller | `true` +`serviceAccount.name` | The name of the controller service account to use. If not set and `create` is `true`, a name is generated using the fullname template. | `` +`revisionHistoryLimit` | The number of old history to retain to allow rollback. | `10` +`tcp` | TCP service key:value pairs. The value is evaluated as a template. | `{}` +`udp` | UDP service key:value pairs The value is evaluated as a template. | `{}` +`releaseLabelOverride` | If provided, the value will be used as the `release` label instead of .Release.Name | `""` + +These parameters can be passed via Helm's `--set` option +```console +$ helm install stable/nginx-ingress --name my-release \ + --set controller.metrics.enabled=true +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install stable/nginx-ingress --name my-release -f values.yaml +``` + +A useful trick to debug issues with ingress is to increase the logLevel +as described [here](https://github.com/kubernetes/ingress-nginx/blob/master/docs/troubleshooting.md#debug) + +```console +$ helm install stable/nginx-ingress --set controller.extraArgs.v=2 +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +## Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics. + +```console +$ helm install stable/nginx-ingress --name my-release \ + --set controller.metrics.enabled=true +``` + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. + +## nginx-ingress nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: +* in [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +* in [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230) to re-enable the http server + +## ExternalDNS Service configuration + +Add an [ExternalDNS](https://github.com/kubernetes-sigs/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +## AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/master/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +## AWS L4 NLB with SSL Redirection + +`ssl-redirect` and `force-ssl-redirect` flag are not working with AWS Network Load Balancer. You need to turn if off and add additional port with `server-snippet` in order to make it work. + +The port NLB `80` will be mapped to nginx container port `80` and NLB port `443` will be mapped to nginx container port `8000` (special). Then we use `$server_port` to manage redirection on port `80` +``` +controller: + config: + ssl-redirect: "false" # we use `special` port to control ssl redirection + server-snippet: | + listen 8000; + if ( $server_port = 80 ) { + return 308 https://$host$request_uri; + } + containerPort: + http: 80 + https: 443 + special: 8000 + service: + targetPorts: + http: http + https: special + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443" + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "your-arn" + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +## AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +## Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +## Helm error when upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +``` +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/NOTES.txt b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/NOTES.txt new file mode 100644 index 0000000..e18a901 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/NOTES.txt @@ -0,0 +1,71 @@ +The nginx-ingress controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ template "nginx-ingress.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ template "nginx-ingress.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ template "nginx-ingress.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "nginx-ingress.name" . }},component={{ .Values.controller.name }},release={{ template "nginx-ingress.releaseLabel" . }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + name: example + namespace: foo + spec: + rules: + - host: www.example.com + http: + paths: + - backend: + serviceName: exampleService + servicePort: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/_helpers.tpl b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/_helpers.tpl new file mode 100644 index 0000000..f9220e7 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/_helpers.tpl @@ -0,0 +1,181 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nginx-ingress.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nginx-ingress.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.controller.fullname" -}} +{{- printf "%s-%s" (include "nginx-ingress.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* +Allow for the ability to override the release name used as a label in many places. +*/}} +{{- define "nginx-ingress.releaseLabel" -}} +{{- .Values.releaseLabelOverride | default .Release.Name | trunc 63 -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "nginx-ingress.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" .Release.Namespace (include "nginx-ingress.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "nginx-ingress.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "nginx-ingress.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "nginx-ingress.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "nginx-ingress.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "nginx-ingress.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "deployment.apiVersion" -}} +{{- if semverCompare ">=1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper nginx-ingress controller image name +*/}} +{{- define "nginx-ingress.image" -}} +{{- $registryName := default "docker.io" .Values.controller.image.registry -}} +{{- $repositoryName := .Values.controller.image.repository -}} +{{- $tag := (default "latest" .Values.controller.image.tag) | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper default backend image name +*/}} +{{- define "defaultBackend.image" -}} +{{- $registryName := default "docker.io" .Values.defaultBackend.image.registry -}} +{{- $repositoryName := .Values.defaultBackend.image.repository -}} +{{- $tag := .Values.defaultBackend.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{- define "nginxIngressClass" -}} + {{- $ingressClass := .Values.controller.ingressClass -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/addheaders-configmap.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/addheaders-configmap.yaml new file mode 100644 index 0000000..534b133 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/addheaders-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.controller.addHeaders }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-custom-add-headers +data: +{{ toYaml .Values.controller.addHeaders | indent 2 }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..97d7a2a --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "nginx-ingress.fullname" . }}-admission +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..57c2104 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "nginx-ingress.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..4e4b6b5 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,55 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-create + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + {{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 + {{- end }} + template: + metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-create +{{- with .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + containers: + - name: create + image: {{ .Values.controller.admissionWebhooks.patch.image.repository }}:{{ .Values.controller.admissionWebhooks.patch.image.tag }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ template "nginx-ingress.controller.fullname" . }}-admission,{{ template "nginx-ingress.controller.fullname" . }}-admission.{{ .Release.Namespace }}.svc + - --namespace={{ .Release.Namespace }} + - --secret-name={{ template "nginx-ingress.fullname". }}-admission + restartPolicy: OnFailure + serviceAccountName: {{ template "nginx-ingress.fullname" . }}-admission + {{- with .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 2000 +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..2182e53 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-patch + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + {{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 + {{- end }} + template: + metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-patch +{{- with .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + containers: + - name: patch + image: {{ .Values.controller.admissionWebhooks.patch.image.repository }}:{{ .Values.controller.admissionWebhooks.patch.image.tag }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.pullPolicy }} + args: + - patch + - --webhook-name={{ template "nginx-ingress.fullname" . }}-admission + - --namespace={{ .Release.Namespace }} + - --patch-mutating=false + - --secret-name={{ template "nginx-ingress.fullname". }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + restartPolicy: OnFailure + serviceAccountName: {{ template "nginx-ingress.fullname" . }}-admission + {{- with .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 2000 +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..3b69e00 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/role.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..4557662 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..0e0907d --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..11d249c --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/validating-webhook.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..53f37b2 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,31 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app: {{ template "nginx-ingress.name" . }}-admission + chart: {{ template "nginx-ingress.chart" . }} + component: "admission-webhook" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - extensions + - networking.k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + clientConfig: + service: + namespace: {{ .Release.Namespace }} + name: {{ template "nginx-ingress.controller.fullname" . }}-admission + path: /extensions/v1beta1/ingresses +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/clusterrole.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/clusterrole.yaml new file mode 100644 index 0000000..14667eb --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/clusterrole.yaml @@ -0,0 +1,71 @@ +{{- if and (.Values.rbac.create) (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update +{{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/clusterrolebinding.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..39decda --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and (.Values.rbac.create) (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "nginx-ingress.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-configmap.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-configmap.yaml new file mode 100644 index 0000000..25625b4 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-configmap.yaml @@ -0,0 +1,22 @@ +{{- if or .Values.controller.config (or (or .Values.controller.proxySetHeaders .Values.controller.headers) .Values.controller.addHeaders) }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +data: +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.controller.config }} +{{ toYaml .Values.controller.config | indent 2 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-daemonset.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-daemonset.yaml new file mode 100644 index 0000000..2a95df9 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-daemonset.yaml @@ -0,0 +1,257 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") }} +{{- $useHostPort := .Values.controller.daemonset.useHostPort -}} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: DaemonSet +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + name: {{ template "nginx-ingress.controller.fullname" . }} + annotations: +{{ toYaml .Values.controller.deploymentAnnotations | indent 4}} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.useComponentLabel }} + app.kubernetes.io/component: controller + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + updateStrategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + component: "{{ .Values.controller.name }}" + app.kubernetes.io/component: controller + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8}} + {{- end }} + spec: +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: "{{ .Values.controller.priorityClassName }}" +{{- end }} + {{- if .Values.controller.podSecurityContext }} + securityContext: +{{ toYaml .Values.controller.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.controller.name }} + image: {{ template "nginx-ingress.image" . }} + imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" + {{- if .Values.controller.lifecycle }} + lifecycle: +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + {{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "nginx-ingress.defaultBackend.fullname" . }} + {{- else }} + {{- if (semverCompare "<0.21.0" .Values.controller.image.tag) }} + - --default-backend-service={{ required ".Values.controller.defaultBackendService is required if .Values.defaultBackend.enabled=false and .Values.controller.image.tag < 0.21.0" .Values.controller.defaultBackendService }} + {{- else if .Values.controller.defaultBackendService }} + - --default-backend-service={{ .Values.controller.defaultBackendService }} + {{- end }} + {{- end }} + {{- if and (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) .Values.controller.publishService.enabled }} + - --publish-service={{ template "nginx-ingress.controller.publishServicePath" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --election-id={{ .Values.controller.electionID }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --ingress-class={{ template "nginxIngressClass" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- else }} + - --nginx-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- end }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- if and (.Values.controller.reportNodeInternalIp) (.Values.controller.hostNetwork)}} + - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + {{- end }} + {{- if .Values.controller.maxmindLicenseKey }} + - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + {{- if (semverCompare ">=0.16.0" .Values.controller.image.tag) }} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ .Values.controller.image.runAsUser }} + allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.extraEnvs }} +{{ toYaml .Values.controller.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.livenessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ index $hostPorts $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + containerPort: {{ $key }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + containerPort: {{ $key }} + protocol: UDP + {{- if $useHostPort }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.readinessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} + volumeMounts: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: "/usr/local/certificates/" + readOnly: true +{{- end }} +{{- if .Values.controller.extraVolumeMounts }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12}} +{{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 12 }} +{{- if .Values.controller.extraContainers }} +{{ toYaml .Values.controller.extraContainers | indent 8}} +{{- end }} +{{- if .Values.controller.extraInitContainers }} + initContainers: +{{ toYaml .Values.controller.extraInitContainers | indent 8}} +{{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: 60 +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} + volumes: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ template "nginx-ingress.fullname". }}-admission +{{- end }} +{{- if .Values.controller.extraVolumes }} +{{ toYaml .Values.controller.extraVolumes | indent 8}} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-deployment.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-deployment.yaml new file mode 100644 index 0000000..dbbf3b6 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-deployment.yaml @@ -0,0 +1,255 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") }} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + {{- if .Values.controller.deploymentLabels }} +{{ toYaml .Values.controller.deploymentLabels | indent 4 }} + {{- end }} + name: {{ template "nginx-ingress.controller.fullname" . }} + annotations: +{{ toYaml .Values.controller.deploymentAnnotations | indent 4}} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.useComponentLabel }} + app.kubernetes.io/component: controller + {{- end }} +{{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + strategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + component: "{{ .Values.controller.name }}" + app.kubernetes.io/component: controller + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + spec: +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: "{{ .Values.controller.priorityClassName }}" +{{- end }} + {{- if .Values.controller.podSecurityContext }} + securityContext: +{{ toYaml .Values.controller.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.controller.name }} + image: {{ template "nginx-ingress.image" . }} + imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" + {{- if .Values.controller.lifecycle }} + lifecycle: +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + {{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "nginx-ingress.defaultBackend.fullname" . }} + {{- else }} + {{- if (semverCompare "<0.21.0" .Values.controller.image.tag) }} + - --default-backend-service={{ required ".Values.controller.defaultBackendService is required if .Values.defaultBackend.enabled=false and .Values.controller.image.tag < 0.21.0" .Values.controller.defaultBackendService }} + {{- else if .Values.controller.defaultBackendService }} + - --default-backend-service={{ .Values.controller.defaultBackendService }} + {{- end }} + {{- end }} + {{- if and (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) .Values.controller.publishService.enabled }} + - --publish-service={{ template "nginx-ingress.controller.publishServicePath" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --election-id={{ .Values.controller.electionID }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --ingress-class={{ template "nginxIngressClass" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --configmap={{ default .Release.Namespace .Values.controller.configMapNamespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- else }} + - --nginx-configmap={{ default .Release.Namespace .Values.controller.configMapNamespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- end }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ default .Release.Namespace .Values.controller.tcp.configMapNamespace }}/{{ template "nginx-ingress.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ default .Release.Namespace .Values.controller.udp.configMapNamespace }}/{{ template "nginx-ingress.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- if and (.Values.controller.scope.enabled) (.Values.rbac.scope) }} + - --update-status=false + {{- end }} + {{- if and (.Values.controller.reportNodeInternalIp) (.Values.controller.hostNetwork) }} + - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + {{- end }} + {{- if .Values.controller.maxmindLicenseKey }} + - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + {{- if (semverCompare ">=0.16.0" .Values.controller.image.tag) }} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ .Values.controller.image.runAsUser }} + allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.extraEnvs }} +{{ toYaml .Values.controller.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.livenessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + containerPort: {{ $key }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + containerPort: {{ $key }} + protocol: UDP + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.readinessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} + volumeMounts: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: "/usr/local/certificates/" + readOnly: true +{{- end }} +{{- if .Values.controller.extraVolumeMounts }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12}} +{{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 12 }} +{{- if .Values.controller.extraContainers }} +{{ toYaml .Values.controller.extraContainers | indent 8}} +{{- end }} +{{- if .Values.controller.extraInitContainers }} + initContainers: +{{ toYaml .Values.controller.extraInitContainers | indent 8}} +{{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} + volumes: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ template "nginx-ingress.fullname". }}-admission +{{- end }} +{{- if .Values.controller.extraVolumes }} +{{ toYaml .Values.controller.extraVolumes | indent 8}} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-hpa.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-hpa.yaml new file mode 100644 index 0000000..77d3533 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-hpa.yaml @@ -0,0 +1,34 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") }} +{{- if .Values.controller.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + scaleTargetRef: + apiVersion: {{ template "deployment.apiVersion" . }} + kind: Deployment + name: {{ template "nginx-ingress.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: +{{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-metrics-service.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-metrics-service.yaml new file mode 100644 index 0000000..ff9d9f1 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-metrics-service.yaml @@ -0,0 +1,45 @@ +{{- if .Values.controller.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: + {{- range $key, $value := .Values.controller.metrics.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: +{{- if .Values.controller.metrics.service.labels }} +{{ toYaml .Values.controller.metrics.service.labels | indent 4 }} +{{- end }} + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }}-metrics +spec: +{{- if not .Values.controller.metrics.service.omitClusterIP }} + clusterIP: "{{ .Values.controller.service.clusterIP }}" +{{- end }} +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.metrics.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.metrics.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: metrics + port: {{ .Values.controller.metrics.service.servicePort }} + targetPort: metrics + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + type: "{{ .Values.controller.metrics.service.type }}" +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-poddisruptionbudget.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..d1dab8f --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (gt (.Values.controller.replicaCount | int) 1) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + app.kubernetes.io/component: controller + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.useComponentLabel }} + app.kubernetes.io/component: controller + {{- end }} + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-prometheusrules.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..4a43957 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-prometheusrules.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "nginx-ingress.controller.fullname" . }} + {{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} +{{ toYaml .Values.controller.metrics.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "nginx-ingress.name" $ }} + rules: {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-psp.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-psp.yaml new file mode 100644 index 0000000..ccbf636 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-psp.yaml @@ -0,0 +1,80 @@ +{{- if .Values.podSecurityPolicy.enabled}} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + #- 'emptyDir' + - 'projected' + - 'secret' + #- 'downwardAPI' + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- if or .Values.controller.hostNetwork .Values.controller.daemonset.useHostPort }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.daemonset.useHostPort }} +{{- range $key, $value := .Values.controller.daemonset.hostPorts }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-role.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-role.yaml new file mode 100644 index 0000000..5f64e78 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-role.yaml @@ -0,0 +1,91 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }}-{{ template "nginxIngressClass" . }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "nginx-ingress.fullname" . }}] +{{- end }} + +{{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-rolebinding.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..c1186c0 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-service.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-service.yaml new file mode 100644 index 0000000..a2cd600 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-service.yaml @@ -0,0 +1,92 @@ +{{- if .Values.controller.service.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.service.annotations }} + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: +{{- if .Values.controller.service.labels }} +{{ toYaml .Values.controller.service.labels | indent 4 }} +{{- end }} + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: +{{- if not .Values.controller.service.omitClusterIP }} + clusterIP: "{{ .Values.controller.service.clusterIP }}" +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} +{{- if and (semverCompare ">=1.7-0" .Capabilities.KubeVersion.GitVersion) (.Values.controller.service.externalTrafficPolicy) }} + externalTrafficPolicy: "{{ .Values.controller.service.externalTrafficPolicy }}" +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: "{{ .Values.controller.service.sessionAffinity }}" +{{- end }} +{{- if and (semverCompare ">=1.7-0" .Capabilities.KubeVersion.GitVersion) (.Values.controller.service.healthCheckNodePort) }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + port: {{ $key }} + protocol: TCP + targetPort: "{{ $key }}-tcp" + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + port: {{ $key }} + protocol: UDP + targetPort: "{{ $key }}-udp" + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + type: "{{ .Values.controller.service.type }}" +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-serviceaccount.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..7b688e6 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.serviceAccountName" . }} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-servicemonitor.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..f3129ea --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-servicemonitor.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "nginx-ingress.controller.fullname" . }} + {{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: +{{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | indent 4 -}} + {{ else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + {{- end }} + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.controller.name }}" + release: {{ template "nginx-ingress.releaseLabel" . }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-webhook-service.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-webhook-service.yaml new file mode 100644 index 0000000..499c3a6 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/controller-webhook-service.yaml @@ -0,0 +1,42 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.admissionWebhooks.service.annotations }} + annotations: + {{- range $key, $value := .Values.controller.admissionWebhooks.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }}-admission +spec: +{{- if not .Values.controller.admissionWebhooks.service.omitClusterIP }} + clusterIP: "{{ .Values.controller.service.clusterIP }}" +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.admissionWebhooks.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: https-webhook + port: 443 + targetPort: webhook + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + type: "{{ .Values.controller.admissionWebhooks.service.type }}" +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-deployment.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..c2b41ed --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-deployment.yaml @@ -0,0 +1,110 @@ +{{- if .Values.defaultBackend.enabled }} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + {{- if .Values.defaultBackend.deploymentLabels }} +{{ toYaml .Values.defaultBackend.deploymentLabels | indent 4 }} + {{- end }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.defaultBackend.useComponentLabel }} + app.kubernetes.io/component: default-backend + {{- end }} + replicas: {{ .Values.defaultBackend.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: + {{- range $key, $value := .Values.defaultBackend.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + {{- if .Values.defaultBackend.podLabels }} +{{ toYaml .Values.defaultBackend.podLabels | indent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: "{{ .Values.defaultBackend.priorityClassName }}" +{{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: +{{ toYaml .Values.defaultBackend.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.defaultBackend.name }} + image: "{{ .Values.defaultBackend.image.repository }}:{{ .Values.defaultBackend.image.tag }}" + imagePullPolicy: "{{ .Values.defaultBackend.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + securityContext: + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + {{- if .Values.defaultBackend.extraEnvs }} + env: +{{ toYaml .Values.defaultBackend.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + resources: +{{ toYaml .Values.defaultBackend.resources | indent 12 }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: +{{ toYaml .Values.defaultBackend.nodeSelector | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: +{{ toYaml .Values.defaultBackend.tolerations | indent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: +{{ toYaml .Values.defaultBackend.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..5719fd9 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if gt (.Values.defaultBackend.replicaCount | int) 1 }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.defaultBackend.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + component: "{{ .Values.defaultBackend.name }}" + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-psp.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-psp.yaml new file mode 100644 index 0000000..38191d4 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-psp.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }}-backend + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-role.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-role.yaml new file mode 100644 index 0000000..11fbba9 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-role.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-backend +rules: + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "nginx-ingress.fullname" . }}-backend] +{{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-rolebinding.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..7d03ef4 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-backend +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-service.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-service.yaml new file mode 100644 index 0000000..d3a3c8f --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-service.yaml @@ -0,0 +1,43 @@ +{{- if .Values.defaultBackend.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: + {{- range $key, $value := .Values.defaultBackend.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.defaultBackend.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: +{{- if not .Values.defaultBackend.service.omitClusterIP }} + clusterIP: "{{ .Values.controller.service.clusterIP }}" +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: +{{ toYaml .Values.defaultBackend.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.defaultBackend.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + type: "{{ .Values.defaultBackend.service.type }}" +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-serviceaccount.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..94689a6 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/proxyheaders-configmap.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/proxyheaders-configmap.yaml new file mode 100644 index 0000000..ae918ae --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/proxyheaders-configmap.yaml @@ -0,0 +1,18 @@ +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-custom-proxy-headers +data: +{{- if .Values.controller.proxySetHeaders }} +{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} +{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/tcp-configmap.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/tcp-configmap.yaml new file mode 100644 index 0000000..96de14f --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/tcp-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.tcp }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-tcp +data: +{{ tpl (toYaml .Values.tcp) . | indent 2 }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/udp-configmap.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/udp-configmap.yaml new file mode 100644 index 0000000..69ee361 --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/templates/udp-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.udp }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-udp +data: +{{ tpl (toYaml .Values.udp) . | indent 2 }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/charts/nginx-ingress/values.yaml b/qliksense/charts/elastic-infra/charts/nginx-ingress/values.yaml new file mode 100644 index 0000000..9019b0b --- /dev/null +++ b/qliksense/charts/elastic-infra/charts/nginx-ingress/values.yaml @@ -0,0 +1,576 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md +## +controller: + name: controller + image: + registry: ghcr.io + repository: qlik-download/nginx-ingress-controller + tag: "0.30.0" + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: false + + # Configures the ports the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # Maxmind license key to download GeoLite2 Databases + # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + # Optionally customize the pod dnsConfig. + dnsConfig: {} + + # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + ## Use host ports 80 and 443 + daemonset: + useHostPort: false + + hostPorts: + http: 80 + https: 443 + + ## Required only if defaultBackend.enabled = false + ## Must be / + ## + defaultBackendService: "" + + ## Election ID to use for status update + ## + electionID: ingress-controller-leader + + ## Name of the ingress class to route through this controller + ## + ingressClass: nginx + + # labels to add to the deployment metadata + deploymentLabels: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + ## Allows customization of the external service + ## the ingress will be bound to via DNS + publishService: + enabled: false + ## Allows overriding of the publish service to bind to + ## Must be / + ## + pathOverride: "" + + ## Limit the scope of the controller + ## + scope: + enabled: false + namespace: "" # defaults to .Release.Namespace + + ## Allows customization of the configmap / nginx-configmap namespace + ## + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the tcp-services-configmap namespace + ## + tcp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the udp-services-configmap namespace + ## + udp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Additional command line arguments to pass to nginx-ingress-controller + ## E.g. to specify the default SSL certificate you can use + ## extraArgs: + ## default-ssl-certificate: "/" + extraArgs: {} + + ## Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + ## DaemonSet or Deployment + ## + kind: Deployment + + ## Annotations to be added to the controller deployment + ## + deploymentAnnotations: {} + + # The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # minReadySeconds to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: "kubernetes.io/hostname" + + ## terminationGracePeriodSeconds + ## + terminationGracePeriodSeconds: 60 + + ## Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 100m + # memory: 64Mi + + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + annotations: {} + labels: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + clusterIP: "" + + ## List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + externalTrafficPolicy: "" + + # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: "" + + healthCheckNodePort: 0 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + # type: NodePort + # nodePorts: + # http: 32080 + # https: 32443 + # tcp: + # 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + extraContainers: [] + ## Additional containers to be added to the controller pod. + ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + extraVolumeMounts: [] + ## Additional volumeMounts to the controller main container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the controller pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraInitContainers: [] + ## Containers, which are run before the app containers are started. + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + admissionWebhooks: + enabled: false + failurePolicy: Fail + port: 8443 + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + clusterIP: "" + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + patch: + enabled: true + image: + repository: qlik-download/jettech/kube-webhook-certgen + tag: v1.0.0 + pullPolicy: IfNotPresent + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9913 + type: ClusterIP + + serviceMonitor: + enabled: false + additionalLabels: {} + namespace: "" + namespaceSelector: {} + # Default: scrape .Release.Namespace only + # To scrape all, use the following: + # namespaceSelector: + # any: true + scrapeInterval: 30s + # honorLabels: true + + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: TooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 5XXs + # summary: More than 5% of the all requests did return 5XX, this require your attention + # - alert: TooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 4XXs + # summary: More than 5% of the all requests did return 4XX, this require your attention + + + lifecycle: {} + + priorityClassName: "" + +## Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + + ## If false, controller.defaultBackendService must be provided + ## + enabled: true + + name: default-backend + image: + repository: qlik-download/k8s.gcr.io/defaultbackend-amd64 + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: false + + extraArgs: {} + + serviceAccount: + create: true + name: + ## Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + # labels to add to the deployment metadata + deploymentLabels: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + clusterIP: "" + + ## List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + +# If provided, the value will be used as the `release` label instead of .Release.Name +releaseLabelOverride: "" + +## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 +rbac: + create: true + scope: true + +# If true, create & use Pod Security Policy resources +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: + +## Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: +- name: artifactory-docker-secret + +# TCP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# UDP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp +## +udp: {} +# 53: "kube-system/kube-dns:53" diff --git a/qliksense/charts/elastic-infra/nginx-ingress/.helmignore b/qliksense/charts/elastic-infra/nginx-ingress/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/qliksense/charts/elastic-infra/nginx-ingress/Chart.yaml b/qliksense/charts/elastic-infra/nginx-ingress/Chart.yaml new file mode 100644 index 0000000..f17e37b --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +name: nginx-ingress +version: 1.36.2 +appVersion: 0.30.0 +home: https://github.com/kubernetes/ingress-nginx +description: An nginx Ingress controller that uses ConfigMap to store the nginx configuration. +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: + - ingress + - nginx +sources: + - https://github.com/kubernetes/ingress-nginx +engine: gotpl +kubeVersion: ">=1.10.0-0" diff --git a/qliksense/charts/elastic-infra/nginx-ingress/README.md b/qliksense/charts/elastic-infra/nginx-ingress/README.md new file mode 100644 index 0000000..87dfdb4 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/README.md @@ -0,0 +1,361 @@ +# nginx-ingress + +[nginx-ingress](https://github.com/kubernetes/ingress-nginx) is an Ingress controller that uses ConfigMap to store the nginx configuration. + +To use, add the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +## TL;DR; + +```console +$ helm install stable/nginx-ingress +``` + +## Introduction + +This chart bootstraps an nginx-ingress deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + + - Kubernetes 1.6+ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/nginx-ingress +``` + +The command deploys nginx-ingress on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the nginx-ingress chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`controller.name` | name of the controller component | `controller` +`controller.image.repository` | controller container image repository | `quay.io/kubernetes-ingress-controller/nginx-ingress-controller` +`controller.image.tag` | controller container image tag | `0.30.0` +`controller.image.pullPolicy` | controller container image pull policy | `IfNotPresent` +`controller.image.runAsUser` | User ID of the controller process. Value depends on the Linux distribution used inside of the container image. | `101` +`controller.useComponentLabel` | Wether to add component label so the HPA can work separately for controller and defaultBackend. *Note: don't change this if you have an already running deployment as it will need the recreation of the controller deployment* | `false` +`controller.containerPort.http` | The port that the controller container listens on for http connections. | `80` +`controller.containerPort.https` | The port that the controller container listens on for https connections. | `443` +`controller.config` | nginx [ConfigMap](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md) entries | none +`controller.hostNetwork` | If the nginx deployment / daemonset should run on the host's network namespace. Do not set this when `controller.service.externalIPs` is set and `kube-proxy` is used as there will be a port-conflict for port `80` | false +`controller.defaultBackendService` | default 404 backend service; needed only if `defaultBackend.enabled = false` and version < 0.21.0| `""` +`controller.dnsPolicy` | If using `hostNetwork=true`, change to `ClusterFirstWithHostNet`. See [pod's dns policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) for details | `ClusterFirst` +`controller.dnsConfig` | custom pod dnsConfig. See [pod's dns config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-config) for details | `{}` +`controller.reportNodeInternalIp` | If using `hostNetwork=true`, setting `reportNodeInternalIp=true`, will pass the flag `report-node-internal-ip-address` to nginx-ingress. This sets the status of all Ingress objects to the internal IP address of all nodes running the NGINX Ingress controller. +`controller.electionID` | election ID to use for the status update | `ingress-controller-leader` +`controller.extraEnvs` | any additional environment variables to set in the pods | `{}` +`controller.extraContainers` | Sidecar containers to add to the controller pod. See [LemonLDAP::NG controller](https://github.com/lemonldap-ng-controller/lemonldap-ng-controller) as example | `{}` +`controller.extraVolumeMounts` | Additional volumeMounts to the controller main container | `{}` +`controller.extraVolumes` | Additional volumes to the controller pod | `{}` +`controller.extraInitContainers` | Containers, which are run before the app containers are started | `[]` +`controller.ingressClass` | name of the ingress class to route through this controller | `nginx` +`controller.maxmindLicenseKey` | Maxmind license key to download GeoLite2 Databases. See [Accessing and using GeoLite2 database](https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases/) | `""` +`controller.scope.enabled` | limit the scope of the ingress controller | `false` (watch all namespaces) +`controller.scope.namespace` | namespace to watch for ingress | `""` (use the release namespace) +`controller.extraArgs` | Additional controller container arguments | `{}` +`controller.kind` | install as Deployment, DaemonSet or Both | `Deployment` +`controller.deploymentAnnotations` | annotations to be added to deployment | `{}` +`controller.autoscaling.enabled` | If true, creates Horizontal Pod Autoscaler | false +`controller.autoscaling.minReplicas` | If autoscaling enabled, this field sets minimum replica count | `2` +`controller.autoscaling.maxReplicas` | If autoscaling enabled, this field sets maximum replica count | `11` +`controller.autoscaling.targetCPUUtilizationPercentage` | Target CPU utilization percentage to scale | `"50"` +`controller.autoscaling.targetMemoryUtilizationPercentage` | Target memory utilization percentage to scale | `"50"` +`controller.daemonset.useHostPort` | If `controller.kind` is `DaemonSet`, this will enable `hostPort` for TCP/80 and TCP/443 | false +`controller.daemonset.hostPorts.http` | If `controller.daemonset.useHostPort` is `true` and this is non-empty, it sets the hostPort | `"80"` +`controller.daemonset.hostPorts.https` | If `controller.daemonset.useHostPort` is `true` and this is non-empty, it sets the hostPort | `"443"` +`controller.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`controller.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`controller.terminationGracePeriodSeconds` | how many seconds to wait before terminating a pod | `60` +`controller.minReadySeconds` | how many seconds a pod needs to be ready before killing the next, during update | `0` +`controller.nodeSelector` | node labels for pod assignment | `{}` +`controller.podAnnotations` | annotations to be added to pods | `{}` +`controller.deploymentLabels` | labels to add to the deployment metadata | `{}` +`controller.podLabels` | labels to add to the pod container metadata | `{}` +`controller.podSecurityContext` | Security context policies to add to the controller pod | `{}` +`controller.replicaCount` | desired number of controller pods | `1` +`controller.minAvailable` | minimum number of available controller pods for PodDisruptionBudget | `1` +`controller.resources` | controller pod resource requests & limits | `{}` +`controller.priorityClassName` | controller priorityClassName | `nil` +`controller.lifecycle` | controller pod lifecycle hooks | `{}` +`controller.service.annotations` | annotations for controller service | `{}` +`controller.service.labels` | labels for controller service | `{}` +`controller.publishService.enabled` | if true, the controller will set the endpoint records on the ingress objects to reflect those on the service | `false` +`controller.publishService.pathOverride` | override of the default publish-service name | `""` +`controller.service.enabled` | if disabled no service will be created. This is especially useful when `controller.kind` is set to `DaemonSet` and `controller.daemonset.useHostPorts` is `true` | true +`controller.service.clusterIP` | internal controller cluster service IP (set to `"-"` to pass an empty value) | `nil` +`controller.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the controller service | `false` +`controller.service.externalIPs` | controller service external IP addresses. Do not set this when `controller.hostNetwork` is set to `true` and `kube-proxy` is used as there will be a port-conflict for port `80` | `[]` +`controller.service.externalTrafficPolicy` | If `controller.service.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable [source IP preservation](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport) | `"Cluster"` +`controller.service.sessionAffinity` | Enables client IP based session affinity. Must be `ClientIP` or `None` if set. | `""` +`controller.service.healthCheckNodePort` | If `controller.service.type` is `NodePort` or `LoadBalancer` and `controller.service.externalTrafficPolicy` is set to `Local`, set this to [the managed health-check port the kube-proxy will expose](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport). If blank, a random port in the `NodePort` range will be assigned | `""` +`controller.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.service.enableHttp` | if port 80 should be opened for service | `true` +`controller.service.enableHttps` | if port 443 should be opened for service | `true` +`controller.service.targetPorts.http` | Sets the targetPort that maps to the Ingress' port 80 | `80` +`controller.service.targetPorts.https` | Sets the targetPort that maps to the Ingress' port 443 | `443` +`controller.service.ports.http` | Sets service http port | `80` +`controller.service.ports.https` | Sets service https port | `443` +`controller.service.type` | type of controller service to create | `LoadBalancer` +`controller.service.nodePorts.http` | If `controller.service.type` is either `NodePort` or `LoadBalancer` and this is non-empty, it sets the nodePort that maps to the Ingress' port 80 | `""` +`controller.service.nodePorts.https` | If `controller.service.type` is either `NodePort` or `LoadBalancer` and this is non-empty, it sets the nodePort that maps to the Ingress' port 443 | `""` +`controller.service.nodePorts.tcp` | Sets the nodePort for an entry referenced by its key from `tcp` | `{}` +`controller.service.nodePorts.udp` | Sets the nodePort for an entry referenced by its key from `udp` | `{}` +`controller.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 10 +`controller.livenessProbe.periodSeconds` | How often to perform the probe | 10 +`controller.livenessProbe.timeoutSeconds` | When the probe times out | 5 +`controller.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`controller.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`controller.livenessProbe.port` | The port number that the liveness probe will listen on. | 10254 +`controller.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 10 +`controller.readinessProbe.periodSeconds` | How often to perform the probe | 10 +`controller.readinessProbe.timeoutSeconds` | When the probe times out | 1 +`controller.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`controller.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`controller.readinessProbe.port` | The port number that the readiness probe will listen on. | 10254 +`controller.metrics.enabled` | if `true`, enable Prometheus metrics | `false` +`controller.metrics.service.annotations` | annotations for Prometheus metrics service | `{}` +`controller.metrics.service.clusterIP` | cluster IP address to assign to service (set to `"-"` to pass an empty value) | `nil` +`controller.metrics.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the metrics service | `false` +`controller.metrics.service.externalIPs` | Prometheus metrics service external IP addresses | `[]` +`controller.metrics.service.labels` | labels for metrics service | `{}` +`controller.metrics.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.metrics.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.metrics.service.servicePort` | Prometheus metrics service port | `9913` +`controller.metrics.service.type` | type of Prometheus metrics service to create | `ClusterIP` +`controller.metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` +`controller.metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` +`controller.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels. | `false` +`controller.metrics.serviceMonitor.namespace` | namespace where servicemonitor resource should be created | `the same namespace as nginx ingress` +`controller.metrics.serviceMonitor.namespaceSelector` | [namespaceSelector](https://github.com/coreos/prometheus-operator/blob/v0.34.0/Documentation/api.md#namespaceselector) to configure what namespaces to scrape | `will scrape the helm release namespace only` +`controller.metrics.serviceMonitor.scrapeInterval` | interval between Prometheus scraping | `30s` +`controller.metrics.prometheusRule.enabled` | Set this to `true` to create prometheusRules for Prometheus operator | `false` +`controller.metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` +`controller.metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `the same namespace as nginx ingress` +`controller.metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be prometheus in YAML format, check values for an example. | `[]` +`controller.admissionWebhooks.enabled` | Create Ingress admission webhooks. Validating webhook will check the ingress syntax. | `false` +`controller.admissionWebhooks.failurePolicy` | Failure policy for admission webhooks | `Fail` +`controller.admissionWebhooks.port` | Admission webhook port | `8080` +`controller.admissionWebhooks.service.annotations` | Annotations for admission webhook service | `{}` +`controller.admissionWebhooks.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the admission webhook service | `false` +`controller.admissionWebhooks.service.clusterIP` | cluster IP address to assign to admission webhook service (set to `"-"` to pass an empty value) | `nil` +`controller.admissionWebhooks.service.externalIPs` | Admission webhook service external IP addresses | `[]` +`controller.admissionWebhooks.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.admissionWebhooks.service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.admissionWebhooks.service.servicePort` | Admission webhook service port | `443` +`controller.admissionWebhooks.service.type` | Type of admission webhook service to create | `ClusterIP` +`controller.admissionWebhooks.patch.enabled` | If true, will use a pre and post install hooks to generate a CA and certificate to use for validating webhook endpoint, and patch the created webhooks with the CA. | `true` +`controller.admissionWebhooks.patch.image.repository` | Repository to use for the webhook integration jobs | `jettech/kube-webhook-certgen` +`controller.admissionWebhooks.patch.image.tag` | Tag to use for the webhook integration jobs | `v1.0.0` +`controller.admissionWebhooks.patch.image.pullPolicy` | Image pull policy for the webhook integration jobs | `IfNotPresent` +`controller.admissionWebhooks.patch.priorityClassName` | Priority class for the webhook integration jobs | `""` +`controller.admissionWebhooks.patch.podAnnotations` | Annotations for the webhook job pods | `{}` +`controller.admissionWebhooks.patch.nodeSelector` | Node selector for running admission hook patch jobs | `{}` +`controller.customTemplate.configMapName` | configMap containing a custom nginx template | `""` +`controller.customTemplate.configMapKey` | configMap key containing the nginx template | `""` +`controller.addHeaders` | configMap key:value pairs containing [custom headers](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers) added before sending response to the client | `{}` +`controller.proxySetHeaders` | configMap key:value pairs containing [custom headers](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#proxy-set-headers) added before sending request to the backends| `{}` +`controller.headers` | DEPRECATED, Use `controller.proxySetHeaders` instead. | `{}` +`controller.updateStrategy` | allows setting of RollingUpdate strategy | `{}` +`controller.configMapNamespace` | The nginx-configmap namespace name | `""` +`controller.tcp.configMapNamespace` | The tcp-services-configmap namespace name | `""` +`controller.udp.configMapNamespace` | The udp-services-configmap namespace name | `""` +`defaultBackend.enabled` | Use default backend component | `true` +`defaultBackend.name` | name of the default backend component | `default-backend` +`defaultBackend.image.repository` | default backend container image repository | `k8s.gcr.io/defaultbackend-amd64` +`defaultBackend.image.tag` | default backend container image tag | `1.5` +`defaultBackend.image.pullPolicy` | default backend container image pull policy | `IfNotPresent` +`defaultBackend.image.runAsUser` | User ID of the controller process. Value depends on the Linux distribution used inside of the container image. By default uses nobody user. | `65534` +`defaultBackend.useComponentLabel` | Whether to add component label so the HPA can work separately for controller and defaultBackend. *Note: don't change this if you have an already running deployment as it will need the recreation of the defaultBackend deployment* | `false` +`defaultBackend.extraArgs` | Additional default backend container arguments | `{}` +`defaultBackend.extraEnvs` | any additional environment variables to set in the defaultBackend pods | `[]` +`defaultBackend.port` | Http port number | `8080` +`defaultBackend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 +`defaultBackend.livenessProbe.periodSeconds` | How often to perform the probe | 10 +`defaultBackend.livenessProbe.timeoutSeconds` | When the probe times out | 5 +`defaultBackend.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`defaultBackend.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`defaultBackend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 0 +`defaultBackend.readinessProbe.periodSeconds` | How often to perform the probe | 5 +`defaultBackend.readinessProbe.timeoutSeconds` | When the probe times out | 5 +`defaultBackend.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`defaultBackend.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 +`defaultBackend.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`defaultBackend.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`defaultBackend.nodeSelector` | node labels for pod assignment | `{}` +`defaultBackend.podAnnotations` | annotations to be added to pods | `{}` +`defaultBackend.deploymentLabels` | labels to add to the deployment metadata | `{}` +`defaultBackend.podLabels` | labels to add to the pod container metadata | `{}` +`defaultBackend.replicaCount` | desired number of default backend pods | `1` +`defaultBackend.minAvailable` | minimum number of available default backend pods for PodDisruptionBudget | `1` +`defaultBackend.resources` | default backend pod resource requests & limits | `{}` +`defaultBackend.priorityClassName` | default backend priorityClassName | `nil` +`defaultBackend.podSecurityContext` | Security context policies to add to the default backend | `{}` +`defaultBackend.service.annotations` | annotations for default backend service | `{}` +`defaultBackend.service.clusterIP` | internal default backend cluster service IP (set to `"-"` to pass an empty value) | `nil` +`defaultBackend.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the default backend service | `false` +`defaultBackend.service.externalIPs` | default backend service external IP addresses | `[]` +`defaultBackend.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`defaultBackend.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`defaultBackend.service.type` | type of default backend service to create | `ClusterIP` +`defaultBackend.serviceAccount.create` | if `true`, create a backend service account. Only useful if you need a pod security policy to run the backend. | `true` +`defaultBackend.serviceAccount.name` | The name of the backend service account to use. If not set and `create` is `true`, a name is generated using the fullname template. Only useful if you need a pod security policy to run the backend. | `` +`imagePullSecrets` | name of Secret resource containing private registry credentials | `nil` +`rbac.create` | if `true`, create & use RBAC resources | `true` +`rbac.scope` | if `true`, do not create & use clusterrole and -binding. Set to `true` in combination with `controller.scope.enabled=true` to disable load-balancer status updates and scope the ingress entirely. | `false` +`podSecurityPolicy.enabled` | if `true`, create & use Pod Security Policy resources | `false` +`serviceAccount.create` | if `true`, create a service account for the controller | `true` +`serviceAccount.name` | The name of the controller service account to use. If not set and `create` is `true`, a name is generated using the fullname template. | `` +`revisionHistoryLimit` | The number of old history to retain to allow rollback. | `10` +`tcp` | TCP service key:value pairs. The value is evaluated as a template. | `{}` +`udp` | UDP service key:value pairs The value is evaluated as a template. | `{}` +`releaseLabelOverride` | If provided, the value will be used as the `release` label instead of .Release.Name | `""` + +These parameters can be passed via Helm's `--set` option +```console +$ helm install stable/nginx-ingress --name my-release \ + --set controller.metrics.enabled=true +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install stable/nginx-ingress --name my-release -f values.yaml +``` + +A useful trick to debug issues with ingress is to increase the logLevel +as described [here](https://github.com/kubernetes/ingress-nginx/blob/master/docs/troubleshooting.md#debug) + +```console +$ helm install stable/nginx-ingress --set controller.extraArgs.v=2 +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +## Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics. + +```console +$ helm install stable/nginx-ingress --name my-release \ + --set controller.metrics.enabled=true +``` + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. + +## nginx-ingress nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: +* in [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +* in [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230) to re-enable the http server + +## ExternalDNS Service configuration + +Add an [ExternalDNS](https://github.com/kubernetes-sigs/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +## AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/master/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +## AWS L4 NLB with SSL Redirection + +`ssl-redirect` and `force-ssl-redirect` flag are not working with AWS Network Load Balancer. You need to turn if off and add additional port with `server-snippet` in order to make it work. + +The port NLB `80` will be mapped to nginx container port `80` and NLB port `443` will be mapped to nginx container port `8000` (special). Then we use `$server_port` to manage redirection on port `80` +``` +controller: + config: + ssl-redirect: "false" # we use `special` port to control ssl redirection + server-snippet: | + listen 8000; + if ( $server_port = 80 ) { + return 308 https://$host$request_uri; + } + containerPort: + http: 80 + https: 443 + special: 8000 + service: + targetPorts: + http: http + https: special + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443" + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "your-arn" + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +## AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +## Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +## Helm error when upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +``` +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/NOTES.txt b/qliksense/charts/elastic-infra/nginx-ingress/templates/NOTES.txt new file mode 100644 index 0000000..e18a901 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/NOTES.txt @@ -0,0 +1,71 @@ +The nginx-ingress controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ template "nginx-ingress.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ template "nginx-ingress.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ template "nginx-ingress.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "nginx-ingress.name" . }},component={{ .Values.controller.name }},release={{ template "nginx-ingress.releaseLabel" . }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + name: example + namespace: foo + spec: + rules: + - host: www.example.com + http: + paths: + - backend: + serviceName: exampleService + servicePort: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/_helpers.tpl b/qliksense/charts/elastic-infra/nginx-ingress/templates/_helpers.tpl new file mode 100644 index 0000000..f9220e7 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/_helpers.tpl @@ -0,0 +1,181 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nginx-ingress.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nginx-ingress.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.controller.fullname" -}} +{{- printf "%s-%s" (include "nginx-ingress.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* +Allow for the ability to override the release name used as a label in many places. +*/}} +{{- define "nginx-ingress.releaseLabel" -}} +{{- .Values.releaseLabelOverride | default .Release.Name | trunc 63 -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "nginx-ingress.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" .Release.Namespace (include "nginx-ingress.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "nginx-ingress.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "nginx-ingress.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "nginx-ingress.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "nginx-ingress.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "nginx-ingress.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "deployment.apiVersion" -}} +{{- if semverCompare ">=1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper nginx-ingress controller image name +*/}} +{{- define "nginx-ingress.image" -}} +{{- $registryName := default "docker.io" .Values.controller.image.registry -}} +{{- $repositoryName := .Values.controller.image.repository -}} +{{- $tag := (default "latest" .Values.controller.image.tag) | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper default backend image name +*/}} +{{- define "defaultBackend.image" -}} +{{- $registryName := default "docker.io" .Values.defaultBackend.image.registry -}} +{{- $repositoryName := .Values.defaultBackend.image.repository -}} +{{- $tag := .Values.defaultBackend.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{- define "nginxIngressClass" -}} + {{- $ingressClass := .Values.controller.ingressClass -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/addheaders-configmap.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/addheaders-configmap.yaml new file mode 100644 index 0000000..534b133 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/addheaders-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.controller.addHeaders }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-custom-add-headers +data: +{{ toYaml .Values.controller.addHeaders | indent 2 }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..97d7a2a --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "nginx-ingress.fullname" . }}-admission +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..57c2104 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "nginx-ingress.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..4e4b6b5 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,55 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-create + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + {{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 + {{- end }} + template: + metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-create +{{- with .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + containers: + - name: create + image: {{ .Values.controller.admissionWebhooks.patch.image.repository }}:{{ .Values.controller.admissionWebhooks.patch.image.tag }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ template "nginx-ingress.controller.fullname" . }}-admission,{{ template "nginx-ingress.controller.fullname" . }}-admission.{{ .Release.Namespace }}.svc + - --namespace={{ .Release.Namespace }} + - --secret-name={{ template "nginx-ingress.fullname". }}-admission + restartPolicy: OnFailure + serviceAccountName: {{ template "nginx-ingress.fullname" . }}-admission + {{- with .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 2000 +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..2182e53 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-patch + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + {{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 + {{- end }} + template: + metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-patch +{{- with .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + containers: + - name: patch + image: {{ .Values.controller.admissionWebhooks.patch.image.repository }}:{{ .Values.controller.admissionWebhooks.patch.image.tag }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.pullPolicy }} + args: + - patch + - --webhook-name={{ template "nginx-ingress.fullname" . }}-admission + - --namespace={{ .Release.Namespace }} + - --patch-mutating=false + - --secret-name={{ template "nginx-ingress.fullname". }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + restartPolicy: OnFailure + serviceAccountName: {{ template "nginx-ingress.fullname" . }}-admission + {{- with .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 2000 +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..3b69e00 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/role.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..4557662 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..0e0907d --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..11d249c --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/validating-webhook.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..53f37b2 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,31 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app: {{ template "nginx-ingress.name" . }}-admission + chart: {{ template "nginx-ingress.chart" . }} + component: "admission-webhook" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "nginx-ingress.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - extensions + - networking.k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + clientConfig: + service: + namespace: {{ .Release.Namespace }} + name: {{ template "nginx-ingress.controller.fullname" . }}-admission + path: /extensions/v1beta1/ingresses +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/clusterrole.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/clusterrole.yaml new file mode 100644 index 0000000..14667eb --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/clusterrole.yaml @@ -0,0 +1,71 @@ +{{- if and (.Values.rbac.create) (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update +{{- end -}} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/clusterrolebinding.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..39decda --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and (.Values.rbac.create) (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "nginx-ingress.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-configmap.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-configmap.yaml new file mode 100644 index 0000000..25625b4 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-configmap.yaml @@ -0,0 +1,22 @@ +{{- if or .Values.controller.config (or (or .Values.controller.proxySetHeaders .Values.controller.headers) .Values.controller.addHeaders) }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +data: +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.controller.config }} +{{ toYaml .Values.controller.config | indent 2 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-daemonset.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-daemonset.yaml new file mode 100644 index 0000000..2a95df9 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-daemonset.yaml @@ -0,0 +1,257 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") }} +{{- $useHostPort := .Values.controller.daemonset.useHostPort -}} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: DaemonSet +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + name: {{ template "nginx-ingress.controller.fullname" . }} + annotations: +{{ toYaml .Values.controller.deploymentAnnotations | indent 4}} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.useComponentLabel }} + app.kubernetes.io/component: controller + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + updateStrategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + component: "{{ .Values.controller.name }}" + app.kubernetes.io/component: controller + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8}} + {{- end }} + spec: +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: "{{ .Values.controller.priorityClassName }}" +{{- end }} + {{- if .Values.controller.podSecurityContext }} + securityContext: +{{ toYaml .Values.controller.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.controller.name }} + image: {{ template "nginx-ingress.image" . }} + imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" + {{- if .Values.controller.lifecycle }} + lifecycle: +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + {{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "nginx-ingress.defaultBackend.fullname" . }} + {{- else }} + {{- if (semverCompare "<0.21.0" .Values.controller.image.tag) }} + - --default-backend-service={{ required ".Values.controller.defaultBackendService is required if .Values.defaultBackend.enabled=false and .Values.controller.image.tag < 0.21.0" .Values.controller.defaultBackendService }} + {{- else if .Values.controller.defaultBackendService }} + - --default-backend-service={{ .Values.controller.defaultBackendService }} + {{- end }} + {{- end }} + {{- if and (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) .Values.controller.publishService.enabled }} + - --publish-service={{ template "nginx-ingress.controller.publishServicePath" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --election-id={{ .Values.controller.electionID }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --ingress-class={{ template "nginxIngressClass" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- else }} + - --nginx-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- end }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- if and (.Values.controller.reportNodeInternalIp) (.Values.controller.hostNetwork)}} + - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + {{- end }} + {{- if .Values.controller.maxmindLicenseKey }} + - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + {{- if (semverCompare ">=0.16.0" .Values.controller.image.tag) }} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ .Values.controller.image.runAsUser }} + allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.extraEnvs }} +{{ toYaml .Values.controller.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.livenessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ index $hostPorts $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + containerPort: {{ $key }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + containerPort: {{ $key }} + protocol: UDP + {{- if $useHostPort }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.readinessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} + volumeMounts: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: "/usr/local/certificates/" + readOnly: true +{{- end }} +{{- if .Values.controller.extraVolumeMounts }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12}} +{{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 12 }} +{{- if .Values.controller.extraContainers }} +{{ toYaml .Values.controller.extraContainers | indent 8}} +{{- end }} +{{- if .Values.controller.extraInitContainers }} + initContainers: +{{ toYaml .Values.controller.extraInitContainers | indent 8}} +{{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: 60 +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} + volumes: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ template "nginx-ingress.fullname". }}-admission +{{- end }} +{{- if .Values.controller.extraVolumes }} +{{ toYaml .Values.controller.extraVolumes | indent 8}} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-deployment.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-deployment.yaml new file mode 100644 index 0000000..dbbf3b6 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-deployment.yaml @@ -0,0 +1,255 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") }} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + {{- if .Values.controller.deploymentLabels }} +{{ toYaml .Values.controller.deploymentLabels | indent 4 }} + {{- end }} + name: {{ template "nginx-ingress.controller.fullname" . }} + annotations: +{{ toYaml .Values.controller.deploymentAnnotations | indent 4}} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.useComponentLabel }} + app.kubernetes.io/component: controller + {{- end }} +{{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + strategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + component: "{{ .Values.controller.name }}" + app.kubernetes.io/component: controller + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + spec: +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: "{{ .Values.controller.priorityClassName }}" +{{- end }} + {{- if .Values.controller.podSecurityContext }} + securityContext: +{{ toYaml .Values.controller.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.controller.name }} + image: {{ template "nginx-ingress.image" . }} + imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" + {{- if .Values.controller.lifecycle }} + lifecycle: +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + {{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "nginx-ingress.defaultBackend.fullname" . }} + {{- else }} + {{- if (semverCompare "<0.21.0" .Values.controller.image.tag) }} + - --default-backend-service={{ required ".Values.controller.defaultBackendService is required if .Values.defaultBackend.enabled=false and .Values.controller.image.tag < 0.21.0" .Values.controller.defaultBackendService }} + {{- else if .Values.controller.defaultBackendService }} + - --default-backend-service={{ .Values.controller.defaultBackendService }} + {{- end }} + {{- end }} + {{- if and (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) .Values.controller.publishService.enabled }} + - --publish-service={{ template "nginx-ingress.controller.publishServicePath" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --election-id={{ .Values.controller.electionID }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --ingress-class={{ template "nginxIngressClass" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --configmap={{ default .Release.Namespace .Values.controller.configMapNamespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- else }} + - --nginx-configmap={{ default .Release.Namespace .Values.controller.configMapNamespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- end }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ default .Release.Namespace .Values.controller.tcp.configMapNamespace }}/{{ template "nginx-ingress.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ default .Release.Namespace .Values.controller.udp.configMapNamespace }}/{{ template "nginx-ingress.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- if and (.Values.controller.scope.enabled) (.Values.rbac.scope) }} + - --update-status=false + {{- end }} + {{- if and (.Values.controller.reportNodeInternalIp) (.Values.controller.hostNetwork) }} + - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + {{- end }} + {{- if .Values.controller.maxmindLicenseKey }} + - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + {{- if (semverCompare ">=0.16.0" .Values.controller.image.tag) }} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ .Values.controller.image.runAsUser }} + allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.extraEnvs }} +{{ toYaml .Values.controller.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.livenessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + containerPort: {{ $key }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + containerPort: {{ $key }} + protocol: UDP + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.readinessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} + volumeMounts: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: "/usr/local/certificates/" + readOnly: true +{{- end }} +{{- if .Values.controller.extraVolumeMounts }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12}} +{{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 12 }} +{{- if .Values.controller.extraContainers }} +{{ toYaml .Values.controller.extraContainers | indent 8}} +{{- end }} +{{- if .Values.controller.extraInitContainers }} + initContainers: +{{ toYaml .Values.controller.extraInitContainers | indent 8}} +{{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} + volumes: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ template "nginx-ingress.fullname". }}-admission +{{- end }} +{{- if .Values.controller.extraVolumes }} +{{ toYaml .Values.controller.extraVolumes | indent 8}} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-hpa.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-hpa.yaml new file mode 100644 index 0000000..77d3533 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-hpa.yaml @@ -0,0 +1,34 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") }} +{{- if .Values.controller.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + scaleTargetRef: + apiVersion: {{ template "deployment.apiVersion" . }} + kind: Deployment + name: {{ template "nginx-ingress.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: +{{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-metrics-service.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-metrics-service.yaml new file mode 100644 index 0000000..ff9d9f1 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-metrics-service.yaml @@ -0,0 +1,45 @@ +{{- if .Values.controller.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: + {{- range $key, $value := .Values.controller.metrics.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: +{{- if .Values.controller.metrics.service.labels }} +{{ toYaml .Values.controller.metrics.service.labels | indent 4 }} +{{- end }} + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }}-metrics +spec: +{{- if not .Values.controller.metrics.service.omitClusterIP }} + clusterIP: "{{ .Values.controller.service.clusterIP }}" +{{- end }} +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.metrics.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.metrics.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: metrics + port: {{ .Values.controller.metrics.service.servicePort }} + targetPort: metrics + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + type: "{{ .Values.controller.metrics.service.type }}" +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-poddisruptionbudget.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..d1dab8f --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (gt (.Values.controller.replicaCount | int) 1) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + app.kubernetes.io/component: controller + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.useComponentLabel }} + app.kubernetes.io/component: controller + {{- end }} + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-prometheusrules.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..4a43957 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-prometheusrules.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "nginx-ingress.controller.fullname" . }} + {{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} +{{ toYaml .Values.controller.metrics.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "nginx-ingress.name" $ }} + rules: {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-psp.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-psp.yaml new file mode 100644 index 0000000..ccbf636 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-psp.yaml @@ -0,0 +1,80 @@ +{{- if .Values.podSecurityPolicy.enabled}} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + #- 'emptyDir' + - 'projected' + - 'secret' + #- 'downwardAPI' + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- if or .Values.controller.hostNetwork .Values.controller.daemonset.useHostPort }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.daemonset.useHostPort }} +{{- range $key, $value := .Values.controller.daemonset.hostPorts }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-role.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-role.yaml new file mode 100644 index 0000000..5f64e78 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-role.yaml @@ -0,0 +1,91 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }}-{{ template "nginxIngressClass" . }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "nginx-ingress.fullname" . }}] +{{- end }} + +{{- end -}} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-rolebinding.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..c1186c0 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-service.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-service.yaml new file mode 100644 index 0000000..a2cd600 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-service.yaml @@ -0,0 +1,92 @@ +{{- if .Values.controller.service.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.service.annotations }} + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: +{{- if .Values.controller.service.labels }} +{{ toYaml .Values.controller.service.labels | indent 4 }} +{{- end }} + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: +{{- if not .Values.controller.service.omitClusterIP }} + clusterIP: "{{ .Values.controller.service.clusterIP }}" +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} +{{- if and (semverCompare ">=1.7-0" .Capabilities.KubeVersion.GitVersion) (.Values.controller.service.externalTrafficPolicy) }} + externalTrafficPolicy: "{{ .Values.controller.service.externalTrafficPolicy }}" +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: "{{ .Values.controller.service.sessionAffinity }}" +{{- end }} +{{- if and (semverCompare ">=1.7-0" .Capabilities.KubeVersion.GitVersion) (.Values.controller.service.healthCheckNodePort) }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + port: {{ $key }} + protocol: TCP + targetPort: "{{ $key }}-tcp" + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + port: {{ $key }} + protocol: UDP + targetPort: "{{ $key }}-udp" + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + type: "{{ .Values.controller.service.type }}" +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-serviceaccount.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..7b688e6 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.serviceAccountName" . }} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-servicemonitor.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..f3129ea --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-servicemonitor.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "nginx-ingress.controller.fullname" . }} + {{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: +{{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | indent 4 -}} + {{ else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + {{- end }} + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.controller.name }}" + release: {{ template "nginx-ingress.releaseLabel" . }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-webhook-service.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-webhook-service.yaml new file mode 100644 index 0000000..499c3a6 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/controller-webhook-service.yaml @@ -0,0 +1,42 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.admissionWebhooks.service.annotations }} + annotations: + {{- range $key, $value := .Values.controller.admissionWebhooks.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }}-admission +spec: +{{- if not .Values.controller.admissionWebhooks.service.omitClusterIP }} + clusterIP: "{{ .Values.controller.service.clusterIP }}" +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.admissionWebhooks.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: https-webhook + port: 443 + targetPort: webhook + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + type: "{{ .Values.controller.admissionWebhooks.service.type }}" +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-deployment.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..c2b41ed --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-deployment.yaml @@ -0,0 +1,110 @@ +{{- if .Values.defaultBackend.enabled }} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + {{- if .Values.defaultBackend.deploymentLabels }} +{{ toYaml .Values.defaultBackend.deploymentLabels | indent 4 }} + {{- end }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.defaultBackend.useComponentLabel }} + app.kubernetes.io/component: default-backend + {{- end }} + replicas: {{ .Values.defaultBackend.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: + {{- range $key, $value := .Values.defaultBackend.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + {{- if .Values.defaultBackend.podLabels }} +{{ toYaml .Values.defaultBackend.podLabels | indent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: "{{ .Values.defaultBackend.priorityClassName }}" +{{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: +{{ toYaml .Values.defaultBackend.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.defaultBackend.name }} + image: "{{ .Values.defaultBackend.image.repository }}:{{ .Values.defaultBackend.image.tag }}" + imagePullPolicy: "{{ .Values.defaultBackend.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + securityContext: + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + {{- if .Values.defaultBackend.extraEnvs }} + env: +{{ toYaml .Values.defaultBackend.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + resources: +{{ toYaml .Values.defaultBackend.resources | indent 12 }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: +{{ toYaml .Values.defaultBackend.nodeSelector | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: +{{ toYaml .Values.defaultBackend.tolerations | indent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: +{{ toYaml .Values.defaultBackend.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..5719fd9 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if gt (.Values.defaultBackend.replicaCount | int) 1 }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.defaultBackend.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + component: "{{ .Values.defaultBackend.name }}" + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-psp.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-psp.yaml new file mode 100644 index 0000000..38191d4 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-psp.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }}-backend + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end -}} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-role.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-role.yaml new file mode 100644 index 0000000..11fbba9 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-role.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-backend +rules: + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "nginx-ingress.fullname" . }}-backend] +{{- end -}} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-rolebinding.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..7d03ef4 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-backend +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-service.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-service.yaml new file mode 100644 index 0000000..d3a3c8f --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-service.yaml @@ -0,0 +1,43 @@ +{{- if .Values.defaultBackend.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: + {{- range $key, $value := .Values.defaultBackend.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.defaultBackend.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: +{{- if not .Values.defaultBackend.service.omitClusterIP }} + clusterIP: "{{ .Values.controller.service.clusterIP }}" +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: +{{ toYaml .Values.defaultBackend.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.defaultBackend.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + type: "{{ .Values.defaultBackend.service.type }}" +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-serviceaccount.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..94689a6 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/proxyheaders-configmap.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/proxyheaders-configmap.yaml new file mode 100644 index 0000000..ae918ae --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/proxyheaders-configmap.yaml @@ -0,0 +1,18 @@ +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-custom-proxy-headers +data: +{{- if .Values.controller.proxySetHeaders }} +{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} +{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/tcp-configmap.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/tcp-configmap.yaml new file mode 100644 index 0000000..96de14f --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/tcp-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.tcp }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-tcp +data: +{{ tpl (toYaml .Values.tcp) . | indent 2 }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/templates/udp-configmap.yaml b/qliksense/charts/elastic-infra/nginx-ingress/templates/udp-configmap.yaml new file mode 100644 index 0000000..69ee361 --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/templates/udp-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.udp }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-udp +data: +{{ tpl (toYaml .Values.udp) . | indent 2 }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/nginx-ingress/values.yaml b/qliksense/charts/elastic-infra/nginx-ingress/values.yaml new file mode 100644 index 0000000..9019b0b --- /dev/null +++ b/qliksense/charts/elastic-infra/nginx-ingress/values.yaml @@ -0,0 +1,576 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md +## +controller: + name: controller + image: + registry: ghcr.io + repository: qlik-download/nginx-ingress-controller + tag: "0.30.0" + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: false + + # Configures the ports the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # Maxmind license key to download GeoLite2 Databases + # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + # Optionally customize the pod dnsConfig. + dnsConfig: {} + + # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + ## Use host ports 80 and 443 + daemonset: + useHostPort: false + + hostPorts: + http: 80 + https: 443 + + ## Required only if defaultBackend.enabled = false + ## Must be / + ## + defaultBackendService: "" + + ## Election ID to use for status update + ## + electionID: ingress-controller-leader + + ## Name of the ingress class to route through this controller + ## + ingressClass: nginx + + # labels to add to the deployment metadata + deploymentLabels: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + ## Allows customization of the external service + ## the ingress will be bound to via DNS + publishService: + enabled: false + ## Allows overriding of the publish service to bind to + ## Must be / + ## + pathOverride: "" + + ## Limit the scope of the controller + ## + scope: + enabled: false + namespace: "" # defaults to .Release.Namespace + + ## Allows customization of the configmap / nginx-configmap namespace + ## + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the tcp-services-configmap namespace + ## + tcp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the udp-services-configmap namespace + ## + udp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Additional command line arguments to pass to nginx-ingress-controller + ## E.g. to specify the default SSL certificate you can use + ## extraArgs: + ## default-ssl-certificate: "/" + extraArgs: {} + + ## Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + ## DaemonSet or Deployment + ## + kind: Deployment + + ## Annotations to be added to the controller deployment + ## + deploymentAnnotations: {} + + # The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # minReadySeconds to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: "kubernetes.io/hostname" + + ## terminationGracePeriodSeconds + ## + terminationGracePeriodSeconds: 60 + + ## Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 100m + # memory: 64Mi + + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + annotations: {} + labels: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + clusterIP: "" + + ## List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + externalTrafficPolicy: "" + + # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: "" + + healthCheckNodePort: 0 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + # type: NodePort + # nodePorts: + # http: 32080 + # https: 32443 + # tcp: + # 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + extraContainers: [] + ## Additional containers to be added to the controller pod. + ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + extraVolumeMounts: [] + ## Additional volumeMounts to the controller main container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the controller pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraInitContainers: [] + ## Containers, which are run before the app containers are started. + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + admissionWebhooks: + enabled: false + failurePolicy: Fail + port: 8443 + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + clusterIP: "" + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + patch: + enabled: true + image: + repository: qlik-download/jettech/kube-webhook-certgen + tag: v1.0.0 + pullPolicy: IfNotPresent + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9913 + type: ClusterIP + + serviceMonitor: + enabled: false + additionalLabels: {} + namespace: "" + namespaceSelector: {} + # Default: scrape .Release.Namespace only + # To scrape all, use the following: + # namespaceSelector: + # any: true + scrapeInterval: 30s + # honorLabels: true + + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: TooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 5XXs + # summary: More than 5% of the all requests did return 5XX, this require your attention + # - alert: TooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 4XXs + # summary: More than 5% of the all requests did return 4XX, this require your attention + + + lifecycle: {} + + priorityClassName: "" + +## Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + + ## If false, controller.defaultBackendService must be provided + ## + enabled: true + + name: default-backend + image: + repository: qlik-download/k8s.gcr.io/defaultbackend-amd64 + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: false + + extraArgs: {} + + serviceAccount: + create: true + name: + ## Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + # labels to add to the deployment metadata + deploymentLabels: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + clusterIP: "" + + ## List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + +# If provided, the value will be used as the `release` label instead of .Release.Name +releaseLabelOverride: "" + +## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 +rbac: + create: true + scope: true + +# If true, create & use Pod Security Policy resources +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: + +## Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: +- name: artifactory-docker-secret + +# TCP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# UDP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp +## +udp: {} +# 53: "kube-system/kube-dns:53" diff --git a/qliksense/charts/elastic-infra/requirements.yaml b/qliksense/charts/elastic-infra/requirements.yaml new file mode 100644 index 0000000..a2bfe1c --- /dev/null +++ b/qliksense/charts/elastic-infra/requirements.yaml @@ -0,0 +1,9 @@ +dependencies: + - name: nginx-ingress + version: 1.36.2 + repository: "file://./nginx-ingress" + condition: nginx-ingress.enabled + - name: mongodb + version: 4.5.0 + repository: "@stable" + condition: mongodb.enabled diff --git a/qliksense/charts/elastic-infra/templates/_helper.tpl b/qliksense/charts/elastic-infra/templates/_helper.tpl new file mode 100644 index 0000000..af4f10f --- /dev/null +++ b/qliksense/charts/elastic-infra/templates/_helper.tpl @@ -0,0 +1,31 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "elastic-infra.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elastic-infra.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nginxIngress.fullname" -}} +{{- $name := "nginx-ingress-controller" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "ingressClass" -}} + {{- $ingressClass := .Values.ingress.class -}} + {{- if .Values.global -}} + {{- if .Values.global.ingressClass -}} + {{- $ingressClass = .Values.global.ingressClass -}} + {{- end -}} + {{- end -}} + {{- printf "%s" $ingressClass -}} +{{- end -}} diff --git a/qliksense/charts/elastic-infra/templates/ingress.yaml b/qliksense/charts/elastic-infra/templates/ingress.yaml new file mode 100644 index 0000000..ebcf4a1 --- /dev/null +++ b/qliksense/charts/elastic-infra/templates/ingress.yaml @@ -0,0 +1,560 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "elastic-infra.fullname" . }}-api-404 + labels: + app: {{ template "elastic-infra.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + kubernetes.io/ingress.class: {{ template "ingressClass" . }} + nginx.ingress.kubernetes.io/auth-url: {{ default (printf "http://%s-edge-auth.%s.svc.cluster.local:8080/v1/auth" .Release.Name .Release.Namespace ) .Values.ingress.authURL | quote }} + nginx.ingress.kubernetes.io/auth-response-headers: Authorization + nginx.ingress.kubernetes.io/rewrite-target: / + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers 'Access-Control-Allow-Origin: $http_origin'; + more_set_headers 'Access-Control-Allow-Methods: GET, PUT, POST, DELETE, PATCH, OPTIONS'; + if ($request_method = 'OPTIONS') { + more_set_headers 'Access-Control-Max-Age: 1728000'; + more_set_headers 'Content-Type: text/plain charset=UTF-8'; + more_set_headers 'Content-Length: 0'; + return 204; + } + return 404; + nginx.ingress.kubernetes.io/server-snippet: | + opentracing on; + gzip on; + gzip_min_length 10240; + gzip_proxied expired no-cache no-store private auth; + gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/json application/xml; + gzip_disable msie6; + server_tokens off; + + # Do not store anything in cache unless specific location overrides + more_set_headers "Cache-Control: no-store" "Pragma: no-cache"; + + # Enforce HSTS + more_set_headers "Strict-Transport-Security: max-age=15724800; includeSubDomains"; + + {{if .Values.ingress.forceHttps }} + # Redirect all HTTP requests to HTTPS + if ($scheme = http) { + return 308 https://$host$request_uri; + } + {{- end }} + + # The following custom auth block is required so we can have a reliable auth path for engine load balancing (below) + location = /_external-auth-engine { + internal; + + proxy_pass_request_body off; + proxy_set_header X-Forwarded-Proto ""; + + proxy_set_header Host edge-auth.default.svc.cluster.local; + proxy_set_header X-Original-URL $scheme://$http_host$request_uri; + proxy_set_header X-Original-Method $request_method; + proxy_set_header X-Sent-From "nginx-ingress-controller"; + proxy_set_header X-Real-IP $remote_addr; + + proxy_set_header X-Forwarded-For $remote_addr; + + proxy_set_header X-Auth-Request-Redirect $request_uri; + + proxy_buffering off; + proxy_buffer_size 4k; + proxy_buffers 4 4k; + proxy_request_buffering on; + + proxy_http_version 1.1; + proxy_ssl_server_name on; + proxy_pass_request_headers on; + + client_max_body_size "1m"; + + # Pass the extracted client certificate to the auth provider + + set $target {{ default (printf "http://%s-edge-auth.%s.svc.cluster.local:8080/v1/auth" .Release.Name .Release.Namespace ) .Values.ingress.authURL}}; + opentracing_propagate_context; + proxy_pass $target; + } + + # The following is for dataprepservice as a load balancer + location = /_dataprepservice_special { + internal; + + proxy_set_header Content-Type "application/json"; + + set $target {{ default (printf "http://%s-data-prep.%s.svc.cluster.local:9072/session/route" .Release.Name .Release.Namespace ) .Values.config.dataprepURI }}; + opentracing_propagate_context; + proxy_pass $target; + } + + # This block is for app open websocket upgrade requests. It calls qix-sessions to get an engine + # session and then proxies the websocket upgrade to the returned engine. + # + # Here are some example paths that would match this location. + # /app/%3Ftransient%3D + # /app/a774322d-8230-4688-b459-3e037d53a560 + # /app/a774322d-8230-4688-b459-3e037d53a560/identity/36fec6a7-61c5-41ba-b9f5-76997a53a82c + # /app/SessionApp_a774322d-8230-4688-b459-3e037d53a560 + # /app/SessionApp_a774322d-8230-4688-b459-3e037d53a560/identity/36fec6a7-61c5-41ba-b9f5-76997a53a82c + # /qvapp/a774322d-8230-4688-b459-3e037d53a560 + # /qvapp/a774322d-8230-4688-b459-3e037d53a560/identity/36fec6a7-61c5-41ba-b9f5-76997a53a82c + # + # Note that location does not contain the query string (e.g. ?example1=true&example2=yes). + # This golang regex tester is good for testing changes to the localtion regex: https://regex101.com/. + # + # ^/app/(SessionApp_)?([a-zA-Z0-9\-]+|%3Ftransient%3D)(/identity/)?([a-zA-Z0-9\-]+)?.*|^/(qv)app/([a-zA-Z0-9\-])(/identity/)?([a-zA-Z0-9\-]+)?.* + # + location ~ ^/(qv)?app/(SessionApp_)?([a-zA-Z0-9\-]+|%3Ftransient%3D)(/identity/)?([a-zA-Z0-9\-]+)?.* { + set $engine_path ''; + set $app_path $1; + set $session_app $2; + set $app_id $3; + set $session_id $5; + set $auth_path {{ default (printf "http://%s-edge-auth.%s.svc.cluster.local:8080/v1/auth" .Release.Name .Release.Namespace ) .Values.ingress.authURL}}; + set $qix_session_path {{ default (printf "http://%s-qix-sessions.%s.svc.cluster.local:8080/v1/engine-sessions" .Release.Name .Release.Namespace ) .Values.config.qixSessionsURI }}; + + access_by_lua_block { + local bridge_tracer = require("opentracing_bridge_tracer") + local tracer = bridge_tracer.new_from_global() + local parent_context = tracer:binary_extract(ngx.var.opentracing_binary_context) + local span = tracer:start_span("qix_sessions_lua_ws", {["references"] = { {"child_of", parent_context} } }) + + local http = require "resty.http" + local httpc = http.new() + + local headers = {} + -- Copy incoming headers + for h,v in pairs(ngx.req.get_headers()) do + headers[h] = v + end + -- Set specific headers + headers["X-Forwarded-Proto"] = "" + headers["X-Original-URL"] = string.format("%s://%s%s", ngx.var.scheme, ngx.var.http_host, ngx.var.request_uri) + headers["X-Original-Method"] = ngx.var.request_method + headers["X-Original-Origin"] = ngx.var.http_origin + headers["X-Sent-From"] = "nginx-ingress-controller" + headers["X-Real-IP"] = ngx.var.the_real_ip + headers["X-Forwarded-For"] = ngx.var.the_real_ip + headers["X-Auth-Request-Redirect"] = ngx.var.request_uri + headers["Host"] = "edge-auth.default.svc.cluster.local" + headers["qlik-web-integration-id"] = ngx.req.get_uri_args()["qlik-web-integration-id"] + tracer:http_headers_inject(span:context(), headers) + + -- Authenticate + local authres, err = httpc:request_uri(ngx.var.auth_path, { headers = headers }) + + if err then + ngx.log(ngx.ERR, "error calling edge_auth: " .. tostring(err)) + span:set_tag("error", true) + span:finish() + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + + span:set_tag("auth.status", authres.status) + if authres.status ~= ngx.HTTP_OK then + ngx.log(ngx.ERR, authres.status .. " received from edge_auth") + span:set_tag("error", true) + span:finish() + if authres.status == ngx.HTTP_UNAUTHORIZED or authres.status == ngx.HTTP_FORBIDDEN then + ngx.exit(authres.status) + return + else + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + end + + -- Set upstream Authorization header + ngx.req.set_header("Authorization", authres.headers["Authorization"]) + + -- Find upstream engine + + local engReqBody = "{\"appId\":\"" .. ngx.var.app_id .. "\"," + + if string.len(ngx.var.session_id) > 0 then + engReqBody = engReqBody .. "\"id\":\"" .. ngx.var.session_id .. "\"," + end + + if "SessionApp_" == ngx.var.session_app then + engReqBody = engReqBody .. "\"workloadType\":\"sessionapp\"}" + else + engReqBody = engReqBody .. "\"workloadType\":\"analyse\"}" + end + + local reqHeaders = {} + reqHeaders["Content-Type"] = "application/json" + reqHeaders["Authorization"] = authres.headers["Authorization"] + tracer:http_headers_inject(span:context(), reqHeaders) + + local qix_session_res, qix_session_err = httpc:request_uri(ngx.var.qix_session_path, { + method = "POST", + headers = reqHeaders, + body = engReqBody + }) + + if qix_session_err then + ngx.log(ngx.ERR, "error calling qix-sessions: " .. tostring(qix_session_err)) + span:set_tag("error", true) + span:finish() + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + + span:set_tag("qix.sessions.status", qix_session_res.status) + if qix_session_res.status ~= ngx.HTTP_CREATED then + ngx.log(ngx.ERR, qix_session_res.status .. " received from qix-sessions") + span:set_tag("error", true) + span:finish() + ngx.exit(qix_session_res.status) + return + end + + local cjson = require "cjson"; + local qix_sessions_route_data = cjson.decode(qix_session_res.body); + + local engine_path = "" + if qix_sessions_route_data.links.engineUrl.href then + engine_path = qix_sessions_route_data.links.engineUrl.href .. "/TTL/120" + else + ngx.log(ngx.ERR, "invalid or empty engine path received from qix-sessions") + span:set_tag("error", true) + span:finish() + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + + local app_placement_type = qix_sessions_route_data.placementDetails.appPlacementType + local selectors = qix_sessions_route_data.placementDetails.selectors + local selector_used = qix_sessions_route_data.placementDetails.selectorUsed + local from_cache = tostring(qix_sessions_route_data.fromCache) + local resp_value = "" + + local parsed_selectors = "["; + if (type(selectors)) == "table" then + for i,selector in ipairs(selectors) do + parsed_selectors = parsed_selectors .. "\"" .. selector .. "\"," + end + parsed_selectors = parsed_selectors:sub(1, -2) + end + parsed_selectors = parsed_selectors .. "]" + + if app_placement_type and parsed_selectors and selector_used then + resp_value = "{\"appPlacementType\":\"" .. app_placement_type .. "\"," .. "\"selectors\":" .. parsed_selectors .. "," .. "\"selectorUsed\":\"" .. selector_used .. "\"," .. "\"fromCache\":\"" .. from_cache .. "\"}" + ngx.header["Placement-Details"] = resp_value + end + + ngx.var.engine_path = engine_path + ngx.log(ngx.NOTICE, "forwarding request to url " .. engine_path) + + -- Some of these span tags should be logs but as soon as span:log_kv is called the qix_sessions_lua_ws span + -- does not show up in jeager. + -- Some of these span tags shoulg be set much earlier but if we do that, unsuccessful spans to edge auth or + -- qix-sessions do not show up in jaeger. + span:set_tag("engine.path", engine_path) + span:set_tag("response.sessionId", qix_sessions_route_data.id) + span:set_tag("selector", selector_used) + span:set_tag("from.cache", from_cache) + + if "qv" == ngx.var.app_path then + span:set_tag("resource.type", "qvapp") + else + span:set_tag("resource.type", "app") + end + span:set_tag("appId", ngx.var.app_id) + span:set_tag("request.sessionId", ngx.var.session_id) + if "SessionApp_" == ngx.var.session_app then + span:set_tag("session.app", true) + else + span:set_tag("session.app", false) + end + span:finish() + } + + proxy_set_header X-Real-IP $proxy_protocol_addr; + proxy_set_header X-Forwarded-For $proxy_protocol_addr; + proxy_set_header X-Forwarded-Port 80; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $http_host; + proxy_set_header X-NginX-Proxy true; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Upgrade $http_upgrade; + proxy_connect_timeout 7d; + proxy_read_timeout 7d; + proxy_send_timeout 7d; + proxy_http_version 1.1; + opentracing_propagate_context; + proxy_pass $engine_path; + } + + # This block controls the data-prep websocket LB and proxying logic + location ~ ^/dataprepservice/app/(.+) { + set $dps_path ''; + + set $app_id $1; + + access_by_lua_block { + local bridge_tracer = require("opentracing_bridge_tracer") + local tracer = bridge_tracer.new_from_global() + local parent_context = tracer:binary_extract(ngx.var.opentracing_binary_context) + local span = tracer:start_span("dataprep_lua_ws", {["references"] = { {"child_of", parent_context} } }) + + local headers = {} + for h,v in pairs(ngx.req.get_headers()) do + headers[h] = v + end + tracer:http_headers_inject(span:context(), headers) + + local authres = ngx.location.capture("/_external-auth-engine", { headers = headers }) + + span:set_tag("auth.status", authres.status) + if authres.status ~= ngx.HTTP_OK then + ngx.log(ngx.ERR, authres.status .. " received from edge_auth") + span:set_tag("error", true) + span:finish() + if authres.status == ngx.HTTP_UNAUTHORIZED or authres.status == ngx.HTTP_FORBIDDEN then + ngx.exit(authres.status) + return + else + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + end + + -- Set upstream Authorization header + ngx.req.set_header("Authorization", authres.header["Authorization"]) + + local dpsHeaders = {} + for h,v in pairs(ngx.req.get_headers()) do + dpsHeaders[h] = v + end + dpsHeaders["Authorization"] = authres.header["Authorization"] + tracer:http_headers_inject(span:context(), dpsHeaders) + + local reqBody = "{\"appId\":\"" .. ngx.var.app_id .. "\"}" + local dps_res = ngx.location.capture("/_dataprepservice_special", { + method = ngx.HTTP_POST, + headers = dpsHeaders, + body = reqBody + }) + + span:set_tag("data.prep.status", dps_res.status) + if dps_res.status ~= ngx.HTTP_OK then + ngx.log(ngx.ERR, "error from dataprepservice route request = " .. dps_res.status) + span:set_tag("error", true) + span:finish() + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + + local cjson = require "cjson"; + local dps_route_data = cjson.decode(dps_res.body); + local dps_path = dps_route_data.url + if not dps_path then + ngx.log(ngx.ERR, "invalid or empty path received from dataprepservice") + span:set_tag("error", true) + span:finish() + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + + ngx.var.dps_path = string.format("%s/app/%s", dps_path, ngx.var.app_id) + ngx.log(ngx.NOTICE, "forwarding dataprepservice WS request to url " .. ngx.var.dps_path) + span:set_tag("data.prep.path", ngx.var.dps_path) + span:finish() + } + + proxy_set_header X-Real-IP $proxy_protocol_addr; + proxy_set_header X-Forwarded-For $proxy_protocol_addr; + proxy_set_header X-Forwarded-Port 80; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $http_host; + proxy_set_header X-NginX-Proxy true; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Upgrade $http_upgrade; + proxy_connect_timeout 7d; + proxy_read_timeout 7d; + proxy_send_timeout 7d; + proxy_http_version 1.1; + opentracing_propagate_context; + proxy_pass $dps_path; + } + + # This block controls the data-prep REST API LB and proxying logic + location ~ ^/api/dataprepservice/v1/apps/([^/]*)/(.*) { + set $dps_path ''; + set $auth_path {{ default (printf "http://%s-edge-auth.%s.svc.cluster.local:8080/v1/auth" .Release.Name .Release.Namespace ) .Values.ingress.authURL}}; + set $dps_route_path {{ default (printf "http://%s-data-prep.%s.svc.cluster.local:9072/session/route" .Release.Name .Release.Namespace ) .Values.config.dataprepURI }}; + + set $app_id $1; + set $end_point $2; + + set $redir https://$best_http_host$request_uri; + # enforce ssl on server side + access_by_lua_block { + local bridge_tracer = require("opentracing_bridge_tracer") + local tracer = bridge_tracer.new_from_global() + local parent_context = tracer:binary_extract(ngx.var.opentracing_binary_context) + local span = tracer:start_span("dataprep_lua_rest", {["references"] = { {"child_of", parent_context} } }) + + local function redirect_to_https() + return ngx.var.pass_access_scheme == "http" and (ngx.var.scheme == "http" or ngx.var.scheme == "https") + end + + if redirect_to_https() then + span:finish() + ngx.redirect(ngx.var.redir, ngx.HTTP_MOVED_PERMANETLY) + return + end + + local http = require "resty.http" + local httpc = http.new() + + local random = math.random + + local function guid() + local template ='xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx' + return string.gsub(template, '[xy]', function (found) + local gen = (found == 'x') and random(0, 0xf) or random(8, 0xb) + return string.format('%x', gen) + end) + end + + local headers = {} + for h,v in pairs(ngx.req.get_headers()) do + headers[h] = v + end + headers["Host"] = "edge-auth.default.svc.cluster.local" + headers["X-Original-URL"] = string.format("%s://%s%s", ngx.var.scheme, ngx.var.http_host, ngx.var.request_uri) + headers["X-Original-Method"] = ngx.var.request_method + headers["X-Sent-From"] = "nginx-ingress-controller" + headers["X-Real-IP"] = ngx.var.the_real_ip + headers["X-Forwarded-For"] = ngx.var.the_real_ip + headers["X-Auth-Request-Redirect"] = ngx.var.request_uri + headers["X-DataPrep-Lua-Request-ID"] = guid() + tracer:http_headers_inject(span:context(), headers) + + local res, err = httpc:request_uri(ngx.var.auth_path, { + headers = headers, + method = "GET", + version = 1.1, + keepalive_timeout = 60, + keepalive_pool = 10 }) + + if err then + local hdrStr = "" + for key,value in pairs(headers) do + if string.lower(key) ~= "cookie" then + hdrStr = hdrStr .. key .. ":" .. value .. " " + end + end + ngx.log(ngx.ERR, "error calling edge_auth " .. tostring(err) .. " auth headers: " .. hdrStr .. " edge_auth URI: " .. ngx.var.auth_path) + span:set_tag("error", true) + span:finish() + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + + span:set_tag("auth.status", res.status) + if res.status ~= ngx.HTTP_OK then + ngx.log(ngx.ERR, res.status .. " received from edge_auth") + span:set_tag("error", true) + span:finish() + if res.status == ngx.HTTP_UNAUTHORIZED or res.status == ngx.HTTP_FORBIDDEN then + ngx.exit(res.status) + return + else + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + end + + -- Set upstream Authorization header + ngx.req.set_header("Authorization", res.headers["Authorization"]) + + local reqHeaders = {} + reqHeaders["Content-Type"] = "application/json" + reqHeaders["Authorization"] = res.headers["Authorization"] + tracer:http_headers_inject(span:context(), reqHeaders) + + local reqBody = "{\"appId\":\"" .. ngx.var.app_id .. "\"}" + + local dps_res, dps_err = httpc:request_uri(ngx.var.dps_route_path, { + method = "POST", + headers = reqHeaders, + body = reqBody + }) + + if dps_err then + ngx.log(ngx.ERR, "error calling dataprepservice: " .. tostring(dps_err)) + span:set_tag("error", true) + span:finish() + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + + span:set_tag("data.prep.status", dps_res.status) + if dps_res.status ~= ngx.HTTP_OK then + ngx.log(ngx.ERR, "error from dataprepservice route request: " .. dps_res.status) + span:set_tag("error", true) + span:finish() + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + + local cjson = require "cjson"; + local dps_route_data = cjson.decode(dps_res.body); + local dps_path = dps_route_data.url + if not dps_path then + ngx.log(ngx.ERR, "invalid or empty path received from dataprepservice") + span:set_tag("error", true) + span:finish() + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + return + end + + local query_params = ngx.encode_args(ngx.req.get_uri_args()) + if query_params then + query_params = "?" .. query_params + end + + ngx.var.dps_path = string.format("%s/v1/apps/%s/%s%s", dps_path, ngx.var.app_id, ngx.var.end_point, query_params) + ngx.log(ngx.NOTICE, "forwarding dataprepservice request to url " .. ngx.var.dps_path) + span:set_tag("data.prep.path", ngx.var.dps_path) + span:finish() + } + + proxy_set_header X-Real-IP $proxy_protocol_addr; + proxy_set_header X-Forwarded-For $proxy_protocol_addr; + proxy_set_header X-Forwarded-Port 80; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $http_host; + proxy_set_header X-NginX-Proxy true; + proxy_connect_timeout 7d; + proxy_read_timeout 7d; + proxy_send_timeout 7d; + opentracing_propagate_context; + proxy_pass $dps_path; + } + +{{- with .Values.ingress.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + rules: + - http: + paths: # paths are required but we override above with a return 404 so as to not hit the default backend + - path: /api + backend: + serviceName: {{ template "nginxIngress.fullname" . }} + servicePort: 80 + {{- if .Values.ingress.host }} + host: {{ .Values.ingress.host }} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} diff --git a/qliksense/charts/elastic-infra/templates/tls-secret.yaml b/qliksense/charts/elastic-infra/templates/tls-secret.yaml new file mode 100644 index 0000000..ebb584e --- /dev/null +++ b/qliksense/charts/elastic-infra/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if eq .Values.tlsSecret.enabled true }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "elastic-infra.fullname" . }}-tls-secret + labels: + app: {{ template "elastic-infra.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: kubernetes.io/tls +data: + tls.crt: {{ .Values.tlsSecret.crt | b64enc }} + tls.key: {{ .Values.tlsSecret.key | b64enc }} +{{- if .Values.tlsSecret.ca }} + ca.crt: {{ .Values.tlsSecret.ca | b64enc }} +{{- end }} +{{- end }} diff --git a/qliksense/charts/elastic-infra/templates/tlscert.yaml b/qliksense/charts/elastic-infra/templates/tlscert.yaml new file mode 100644 index 0000000..34794a5 --- /dev/null +++ b/qliksense/charts/elastic-infra/templates/tlscert.yaml @@ -0,0 +1,23 @@ +{{- if or .Values.tlsCert.fqdnList .Values.tlsCert.fqdn }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "elastic-infra.fullname" . }}-tls-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "elastic-infra.fullname" . }}-tls-secret + issuerRef: + kind: ClusterIssuer + name: letsencrypt-prod +{{- if .Values.tlsCert.fqdnList }} + commonName: "{{ index .Values.tlsCert.fqdnList 0 }}" +{{- else }} + commonName: "{{ .Values.tlsCert.fqdn }}" +{{- end }} + dnsNames: +{{- if .Values.tlsCert.fqdnList }} +{{ toYaml .Values.tlsCert.fqdnList | indent 2 }} +{{- else }} + - "{{ .Values.tlsCert.fqdn }}" +{{- end -}} +{{- end }} diff --git a/qliksense/charts/elastic-infra/values.yaml b/qliksense/charts/elastic-infra/values.yaml new file mode 100644 index 0000000..7e8ecfd --- /dev/null +++ b/qliksense/charts/elastic-infra/values.yaml @@ -0,0 +1,177 @@ +## Default values for elastic-infra Helm Chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## Override the name of the Chart. +## +# nameOverride: + +## config sets additional chart configurations +config: + ## qixSessionsURI overrides the generated qix-sessions URI for engine load balancing rules + qixSessionsURI: null + ## dataprepURI overrides the generated data-prep URI for data-prep session stickiness rules + dataprepURI: null + +## Ingress configuration. +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Default hostname. + # host: elastic.example + + ## TLS configuration. + ## + # tls: + # - secretName: elastic-infra-elastic-infra-tls-secret + # hosts: + # - elastic.example + + ## Annotations to be added to the ingress. + ## + annotations: + nginx.ingress.kubernetes.io/proxy-body-size: 500m + nginx.org/client-max-body-size: 500m + + ## authURL override of default http://{.Release.Name}.{.Release.Namespace}.svc.cluster.local:8080/v1/auth + # authURL: + + ## kubernetes.io/ingress.class override of default nginx + class: "nginx" + + ## forceHttps forces all HTTP requests to HTTPS + forceHttps: false + +## nginx-ingress dependency configuration +nginx-ingress: + + ## whether to include nginx-ingress + enabled: true + defaultBackend: + enabled: false + imagePullSecrets: + - name: artifactory-docker-secret + controller: + image: + tag: "2.1.1" + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + terminationGracePeriodSeconds: 300 + config: + worker-shutdown-timeout: "300s" + proxy-add-original-uri-header: "true" + enable-opentracing: "true" + jaeger-collector-host: $JAEGER_AGENT_HOST + jaeger-service-name: elastic-infra-nginx-ingress + http-snippet: | + # set some custom opentracing tags + opentracing_tag http.user_agent $http_user_agent; + opentracing_tag http.proto $server_protocol; + opentracing_tag nginx.request_id $request_id; + extraEnvs: + - name: JAEGER_AGENT_HOST # NOTE: point to the jaeger-agent daemon on the node + valueFrom: + fieldRef: + fieldPath: status.hostIP + extraArgs: + default-ssl-certificate: "default/elastic-infra-elastic-infra-tls-secret" + metrics-per-host: "false" + stats: + enabled: true + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "10254" + resources: {} + scope: + enabled: true + namespace: "" + +tlsCert: {} +tlsSecret: + enabled: true + # openssl req -x509 -nodes -days 3600 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=elastic.example/O=elastic-elastic-local-cert" \ + # -config <(cat /etc/ssl/openssl.cnf <(printf "\n[ req ]\nx509_extensions = v3_req\n[ v3_req ]\nkeyUsage = keyEncipherment, dataEncipherment, digitalSignature\nextendedKeyUsage = serverAuth\nsubjectAltName=DNS:elastic.example,DNS:*.elastic.example\n")) + crt: | + -----BEGIN CERTIFICATE----- + MIIDVDCCAjygAwIBAgIJAJ0tqMT5DWpsMA0GCSqGSIb3DQEBCwUAMD8xGDAWBgNV + BAMMD2VsYXN0aWMuZXhhbXBsZTEjMCEGA1UECgwaZWxhc3RpYy1lbGFzdGljLWxv + Y2FsLWNlcnQwHhcNMTgwNjExMTUxMTAzWhcNMjgwNDE5MTUxMTAzWjA/MRgwFgYD + VQQDDA9lbGFzdGljLmV4YW1wbGUxIzAhBgNVBAoMGmVsYXN0aWMtZWxhc3RpYy1s + b2NhbC1jZXJ0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApQLIgVxC + dF27PXs/s8TXvTdUy5pdQgxocZ/Lzu1nRWmmFEG52Z92dcKWI09cuX9eIYg14smY + G3Jkcoo/5KtYKi6hyuPm6VktdrSWBjmUtldlx7QTK6LEVXEyE7T6zAmFWyLe5I0B + l7QBY6vuh+x5vZdIgwK5ew0EfcIQMQkkb35doM1bn5LBDVIqS3f5u150+135F+lY + NsiEqhYiQ1fmO2i3K0K9nS0Iw6no5jv2FZ6tymOscl/if3ud3S5196MN2mhZBDUZ + ipm8eEY15RNkuPIpDO8LbA0fQNqG2aqFkrrlW+lGSh4Xf6K6gmfAQumy+lkGtjVU + eNNtaz6yL1cdSwIDAQABo1MwUTALBgNVHQ8EBAMCBLAwEwYDVR0lBAwwCgYIKwYB + BQUHAwEwLQYDVR0RBCYwJIIPZWxhc3RpYy5leGFtcGxlghEqLmVsYXN0aWMuZXhh + bXBsZTANBgkqhkiG9w0BAQsFAAOCAQEAT1g0XjzSCD0ANAyp1NYDKSvYUGGpjRhY + uBQbtpp5+P3swlou3o05pt3rvVwBlp+kcjQpLBi7p2DSn57EXs9xqDA0GF9LHwiS + spfUbXQk2HkQ7HjGoMEHBKUjNUBhf2dYTn+PmlxhnYif+hSgdx7HgRn18t+HjL/I + UeQI10v/tLbOWbfGAfiFb42Pq/V85hbe5ofSUNlul4VM8eW6MR6PvoWXabXqPvj3 + IzU9Y6QQh7gjbcP7dfYFBwqEFu18yi1GEo71+CmR1E/ei+i3d7YLyLMhT8vg1SCR + 3y0uMVY8y1TzHu/NLIt5Z3bmk4RcqJ505lbkkLwsIQXS2i1Lnwnpyw== + -----END CERTIFICATE----- + key: | + -----BEGIN PRIVATE KEY----- + MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQClAsiBXEJ0Xbs9 + ez+zxNe9N1TLml1CDGhxn8vO7WdFaaYUQbnZn3Z1wpYjT1y5f14hiDXiyZgbcmRy + ij/kq1gqLqHK4+bpWS12tJYGOZS2V2XHtBMrosRVcTITtPrMCYVbIt7kjQGXtAFj + q+6H7Hm9l0iDArl7DQR9whAxCSRvfl2gzVufksENUipLd/m7XnT7XfkX6Vg2yISq + FiJDV+Y7aLcrQr2dLQjDqejmO/YVnq3KY6xyX+J/e53dLnX3ow3aaFkENRmKmbx4 + RjXlE2S48ikM7wtsDR9A2obZqoWSuuVb6UZKHhd/orqCZ8BC6bL6WQa2NVR4021r + PrIvVx1LAgMBAAECggEAeEOYCRhJ4u/xqLSsUARWTLUIG0CNWbcbZDzX2SAUFXYT + sk3Y+3CKcPd6X4/W/+eBnqTcyUauksqDxStXt/zdzBiimPITeN0jEpI5iZ3r8h2u + s6deFX6S48cVfWF5LL5/sFWw6BiRIZTzMka8GdrTO8gH9FxG/RUWweVv0Z1dLdhb + F71pxJ2bQF/ihPNj9Mnw4kv6I+qo560Ce7tunF84g9/u0S3LipTTWsKlMccKIbGU + YJ/4tdENBwq++56qxulbDqp8BrP4rUnqE5H80Fp51PbB4Wa8k+2m+ATufCtigABH + 7gIcNsLDdnFxWF3D6X99IjvwEDfHPaqK0ChQJA2/qQKBgQDQ+3uIMx+Ptd2HOMRf + f8TUD7PnvHWiJ5a16c0pq3kc7Ez/fdypmMfmts0DgQrl50S3F+b3NIAmQXPdwwwp + hXARKkgAyAu+D7eTUDj0hkFsgXTylr/Ag0iC9Om1HETUDljsQ/xMaXq+tGkATHi3 + 0W/f8v3IdA5oEnX5UGZsXm6E9wKBgQDKIrm+ygPwfwpqi+im6v9/770EVCSCI63m + /mykfeuMUQppv6D0YKnakmQqpODR9pTM+MrKMCN/SvFUx7CGApn3T+4JN8LoNdU4 + hm2bkfcNLlkO0kfDuJ8p63VMxK59KamSachASBKVZhdC1+Ii6E0CckTP0XZnSCID + Z8aDzYsZTQKBgFfKPWPC7jBF2xDyFPR1TS80hYQFFHmRHeu/kvM9WHyA+/ucWPVZ + /Cf8hPVC4VwhIim0Pc99W5Q2jMENLe/HYYOkemjxoJUwLxMQcUyvlY5Kd+Fs2JK7 + 09tHnWH6xSM7/7kI+lTzPqAcU7vPZCr8LX4rqtbpgh/QGYUpdE10AgOTAoGAInkj + poc7fsOb2in8RqNjacVQPjG/fDZrjS6tBqy3BFo55WkaITNvJGi9DozKDuT34bE4 + nJzzIN1+JRBdaa195rDKDuZkpkewEpDSlqqhMK1L4Pw54wZUlsiiW7Jbc9ssIV1L + GrNv/+zGYM8CsSNfJuCoHa1CfUDPFDeZopbmdy0CgYBZYNM54YOrMvV0ISqbna/C + OAc+r/2YNABd/4TGpX3TsiLyShnXKN3mW/xD1odTNFMEW0HP3kg748f1SKJ3UZBs + 0Gpo4hz8vVXaBC6lCVGwvspYbdP2A8GPnANDJnwjrPJuClSd/GromIPnIPmA1+Ui + YTDDXYUe6MXgqE1siYSltg== + -----END PRIVATE KEY----- + +## mongodb dependency configuration +mongodb: + image: + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## This value overrides the mongo image tag in chart v.4.5.0 (tag: 4.0.3-debian-9) + tag: 3.6.12 + + ## whether to include mongodb + enabled: true + ## disable password for local dev mode + usePassword: false + + persistence: + enabled: false + + securityContext: + enabled: false + + ## Enable prometheus metrics being exposed + metrics: + enabled: true + ## annotations when metrics deployed as a separate service (in master/slave mode) + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" diff --git a/qliksense/charts/encryption/.helmignore b/qliksense/charts/encryption/.helmignore new file mode 100644 index 0000000..dd3e638 --- /dev/null +++ b/qliksense/charts/encryption/.helmignore @@ -0,0 +1 @@ +dependencies.yaml diff --git a/qliksense/charts/encryption/Chart.yaml b/qliksense/charts/encryption/Chart.yaml new file mode 100644 index 0000000..b9ef071 --- /dev/null +++ b/qliksense/charts/encryption/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: The encryption service provides simple cryptographic functions to other + elastic services. +name: encryption +sources: +- https://github.com/qlik-trial/encryption +version: 2.5.5 diff --git a/qliksense/charts/encryption/README.md b/qliksense/charts/encryption/README.md new file mode 100644 index 0000000..ceba64e --- /dev/null +++ b/qliksense/charts/encryption/README.md @@ -0,0 +1,77 @@ +# encryption + +[encryption](https://github.com/qlik-trial/elastic-encryption) is a service that provides simple cryptographic functions (at `/v1/encryption`). + +## Introduction + +This chart bootstraps an encryption deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install --name my-release qlik/encryption +``` + +The command deploys encryption on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the encryption chart. +Refer to the [values.yaml](https://github.com/qlik-trial/elastic-charts/blob/master/charts/encryption/values.yaml) for the default values. + +| Parameter | Description | +| ---------------------------- | ---------------------------------- | +| `image.pullPolicy` | encryption image pull policy | +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | +| `image.repository` | encryption image with no registry| +| `image.tag` | encryption image version | +| `image.pullPolicy` | encryption image pull policy | +| `imagePullSecrets` | A list of secret names for accessing private image registries | +| `replicaCount` | Number of encryption replicas | +| `service.type` | Service type | +| `service.port` | encryption listen port | +| `metrics.prometheus.enabled` | whether prometheus metrics are enabled | +| `config.logLevel` | Encryption log level - debug, info, error | +| `config.auth.enabled` | Authentication of JWT in incoming requests. true, false. If true, edge-auth and keys service will be used to eval the JWT. +| `config.auth.jwtAud` | "Audience" in JWT, default is "qlik.api.internal/encryption" +| `jwks.uri` | URI where the JWKS to validate JWTs is located | + +### Backends + +#### Loopback + +| Parameter | Description | +| ---------------------------- | ---------------------------------- | +| `backend.type` | Backend to use. Set this to `loopback` | + +#### Vault + +| Parameter | Description | +| ---------------------------- | ---------------------------------- | +| `backend.type` | Backend to use. Set this to `vault` | +| `backend.uri` | URI where vault is located | +| `backend.auth.type` | type of auth to use when communicating with Vault. Set this to `token` | +| `backend.auth.token` | Token to use when communicating with Vault | +| `backend.auth.tokenRenew` | Should encryption auto-renew the token | +| `backend.auth.tokenRenewFrequency` | How often should the token be auto-renewed | +| `backend.auth.tokenRenewTTL` | How long should the renewed token be good for | + +Any of these parameters can be overridden using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install --name my-release -f my-custom-values.yaml qlik/encryption +``` diff --git a/qliksense/charts/encryption/templates/_helpers.tpl b/qliksense/charts/encryption/templates/_helpers.tpl new file mode 100644 index 0000000..d798500 --- /dev/null +++ b/qliksense/charts/encryption/templates/_helpers.tpl @@ -0,0 +1,50 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "encryption.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "encryption.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "encryption.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* Return encryption image name */}} +{{- define "encryption.image" -}} + {{/* docker.io is the default registry - e.g. "qlik/myimage" resolves to "docker.io/qlik/myimage" */}} + {{- $registry := default "docker.io" .Values.image.registry -}} + {{- $repository := required "A valid image.repository entry required!" .Values.image.repository -}} + {{/* omitting the tag assumes "latest" */}} + {{- $tag := (default "latest" .Values.image.tag) | toString -}} + {{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repository $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} + {{- else -}} + {{- printf "%s/%s:%s" $registry $repository $tag -}} + {{- end -}} +{{- end -}} diff --git a/qliksense/charts/encryption/templates/deployment.yaml b/qliksense/charts/encryption/templates/deployment.yaml new file mode 100644 index 0000000..548cb36 --- /dev/null +++ b/qliksense/charts/encryption/templates/deployment.yaml @@ -0,0 +1,117 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: {{ template "encryption.fullname" . }} + labels: + app: {{ template "encryption.fullname" . }} + chart: {{ template "encryption.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "encryption.fullname" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "encryption.fullname" . }} + release: {{ .Release.Name }} + spec: + {{- if .Values.serviceAccount.name }} + serviceAccountName: {{ .Values.serviceAccount.name }} + {{- else }} + serviceAccountName: {{ template "encryption.fullname" . }} + {{- end }} + containers: + - name: {{ template "encryption.fullname" . }} + image: {{ template "encryption.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AUTH_JWKS_URI + value: {{ default (printf "http://%s-keys:8080/v1/keys/qlik.api.internal" .Release.Name ) .Values.jwks.uri | quote }} + - name: AUTH_JWT_AUD + value: {{ .Values.config.auth.jwtAud | quote }} + - name: ENABLE_ADMIN_ENDPOINT + value: "false" + - name: AUTH_ENABLED + value: {{ .Values.config.auth.enabled | quote }} + - name: LOG_LEVEL + value: {{ default "info" .Values.config.logLevel | quote }} + + {{- if .Values.enabled }} + - name: BACKEND_TYPE + value: {{ .Values.backend.type}} + + {{- if .Values.backend.uri }} + - name: BACKEND_URI + value: {{ .Values.backend.uri}} + {{- end }} + + {{- if .Values.backend.auth }} + {{- if .Values.backend.auth.type }} + - name: BACKEND_OPTIONS_AUTH_TYPE + value: {{ .Values.backend.auth.type}} + {{- end }} + {{- if .Values.backend.auth.token }} + - name: BACKEND_OPTIONS_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-auth-token + key: token + {{- end }} + {{- if .Values.backend.auth.tokenRenew }} + - name: BACKEND_OPTIONS_AUTH_TOKEN_RENEW + value: {{ .Values.backend.auth.tokenRenew | quote }} + {{- end }} + {{- if .Values.backend.auth.tokenRenewFrequency }} + - name: BACKEND_OPTIONS_AUTH_TOKEN_RENEW_FREQUENCY + value: {{ .Values.backend.auth.tokenRenewFrequency | quote }} + {{- end }} + {{- if .Values.backend.auth.tokenRenewTTL }} + - name: BACKEND_OPTIONS_AUTH_TOKEN_RENEW_TTL + value: {{ .Values.backend.auth.tokenRenewTTL | quote }} + {{- end }} + {{- end }} + + {{- else }} + - name: BACKEND_TYPE + value: loopback + {{- end }} +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} + volumeMounts: +{{- include "qlik.ca-certificates.volumeMount" . | nindent 10 }} +{{- end }}{{- end }}{{- end }} + ports: + - containerPort: {{ .Values.service.port }} + livenessProbe: + httpGet: + path: /health + port: 8080 + readinessProbe: + httpGet: + path: /health + port: 8080 + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- if .Values.global }}{{- if .Values.global.certs }}{{- if .Values.global.certs.enabled }} + volumes: +{{- include "qlik.ca-certificates.volume" . | nindent 6 }} +{{- end }}{{- end }}{{- end }} diff --git a/qliksense/charts/encryption/templates/service.yaml b/qliksense/charts/encryption/templates/service.yaml new file mode 100644 index 0000000..35f20de --- /dev/null +++ b/qliksense/charts/encryption/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "encryption.fullname" . }} + labels: + app: {{ template "encryption.fullname" . }} + chart: {{ template "encryption.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- if .Values.metrics.prometheus.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ default .Values.service.port .Values.metrics.prometheus.port | quote }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: {{ template "encryption.fullname" . }} + selector: + app: {{ template "encryption.fullname" . }} + release: {{ .Release.Name }} diff --git a/qliksense/charts/encryption/templates/serviceaccount.yaml b/qliksense/charts/encryption/templates/serviceaccount.yaml new file mode 100644 index 0000000..c28294d --- /dev/null +++ b/qliksense/charts/encryption/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + {{- if .Values.serviceAccount.name }} + name: {{ .Values.serviceAccount.name }} + {{- else }} + name: {{ template "encryption.fullname" . }} + {{- end }} +{{- end }} diff --git a/qliksense/charts/encryption/templates/token-secret.yaml b/qliksense/charts/encryption/templates/token-secret.yaml new file mode 100644 index 0000000..ed65cde --- /dev/null +++ b/qliksense/charts/encryption/templates/token-secret.yaml @@ -0,0 +1,9 @@ +{{- if .Values.backend }}{{- if .Values.backend.auth }}{{- if .Values.backend.auth.token }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-auth-token +type: Opaque +data: + token: {{ .Values.backend.auth.token | b64enc }} +{{- end}}{{- end}}{{- end}} diff --git a/qliksense/charts/encryption/values.yaml b/qliksense/charts/encryption/values.yaml new file mode 100644 index 0000000..c4aa01d --- /dev/null +++ b/qliksense/charts/encryption/values.yaml @@ -0,0 +1,121 @@ +# Default values for encryption. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Override the name of the Chart. +## +# nameOverride: + +image: + ## Default registry where the repository is pulled from. + ## `global.imageRegistry` if set override this value. + ## + registry: ghcr.io + + ## encryption image + ## + repository: qlik-download/encryption + + ## encryption image version + ## ref: https://qliktech.jfrog.io/qliktech/webapp/#/packages/docker/encryption/ + ## + tag: 3.2.1 + + ## Specify an imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + # pullPolicy: + +## Secrets for pulling images from a private docker registry. +## +imagePullSecrets: + - name: artifactory-docker-secret + +## Number of replicas. +## +replicaCount: 1 + +## Service configuration +## ref: https://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 8080 + +## Allows encryption to be turned off easily. +## false in this cause means ignoreing the below `backend` settings and hardcoding the loopback client +enabled: true + +## Encryption service configuration +config: + ## Log level configuration. + ## + logLevel: "info" + + ## Authentication configuration + ## + auth: + # toggle JWT validation using retrieved keys from the configured JWKS endpoint + enabled: true + # expected `audience` value within the JWT claims + jwtAud: qlik.api.internal/encryption + +## Encryption backend specifc configurations +## +backend: + ## Which backend to enable [loopback,vault] + ## + type: loopback + ## URI used to reach the backend (Only used by vault) + ## + # uri: + + ## Auth settings used with the backend (Only used by vault) + # auth: + ## Type of authentication to use [token,kubernetes] + # type: + ## Token to use + # token: + ## Should encryption auto-renew the token + # tokenRenew: + ## How often should the token be auto-renewed + # tokenRenewFrequency: + ## How long should the renewed token be good for + # tokenRenewTTL: + + +## Service account configuration +## +serviceAccount: + # Specifies whether a service account should be created. Set to false if you're using one that already exists in the cluster + create: true + # Name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: + +## Metrics configuration +## +metrics: + + ## Prometheus configuration + ## + prometheus: + ## prometheus.enabled determines whether the annotations for prometheus scraping are included. + enabled: true + +## JWKS configuration +jwks: {} + ## URI where the JWKS to validate JWTs is located. This overrides the default of http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal + ## If left blank the service will return 401 on all authenticated endpoints + # uri: + +## Resources configuration +## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +## +# resources: +# limits: +# cpu: 200m +# memory: 50Mi +# requests: +# cpu: 50m +# memory: 20Mi diff --git a/qliksense/charts/engine/.helmignore b/qliksense/charts/engine/.helmignore new file mode 100644 index 0000000..0ac6282 --- /dev/null +++ b/qliksense/charts/engine/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +dependencies.yaml diff --git a/qliksense/charts/engine/Chart.yaml b/qliksense/charts/engine/Chart.yaml new file mode 100644 index 0000000..67c5cc6 --- /dev/null +++ b/qliksense/charts/engine/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +appVersion: 12.687.0 +description: A Helm chart for the qix engine +home: https://www.qlik.com +name: engine +sources: +- https://github.com/qlik-trial/engine +version: 1.68.13 diff --git a/qliksense/charts/engine/README.md b/qliksense/charts/engine/README.md new file mode 100644 index 0000000..bdee1dc --- /dev/null +++ b/qliksense/charts/engine/README.md @@ -0,0 +1,277 @@ +# engine + +[engine](https://github.com/qlik-trial/engine) is the service that handles all application calculations and logic. + +## Introduction + +This chart bootstraps an engine service on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release qlik/engine +``` + +The command deploys engine on the Kubernetes cluster in the default configuration. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | ---------------------------------------------------------- | +| `environment` | Environment name | `example` | +| `region` | Deployed region | `example`| +| `global.imageRegistry` | The global image registry (overrides default `image.registry`) | `nil` | +| `image.registry` | The default registry where the repository is pulled from. | `qliktech-docker.jfrog.io` | +| `image.repository` | Image name with no registry | `engine`| +| `image.tag` | Image version | If defined, `` else `latest` | +| `image.pullPolicy` | Image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | +| `qlikview.image.registry` | The default registry where the qlikview repository is pulled from. | `qliktech-docker.jfrog.io` | +| `qlikview.image.repository` | Qlikview image name with no registry | `engine-qv`| +| `qlikview.image.tag` | Qlikview image version | If not defined, `image.tag` is used else this value if defined else `latest` | +| `qlikview.image.pullPolicy` | image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | +| `imagePullSecrets` | A list of secret names for accessing private image registries | `[{name: "artifactory-docker-secret"}]` | +| `replicaCount` | Number of engine replicas | `1` | +| `acceptEULA` | Flag for accepting the Engine EULA | `no` | +| `enableCrashDumpUpload` | Flag for enabling the upload of crashes to Backtrace | `false` | +| `resources.limits.cpu` | Engine cpu resource limits, e.g. 500m | `nil` | +| `resources.limits.memory` | Engine memory resource limits, e.g. 8Gi | `nil` | +| `resources.requests.cpu` | Engine cpu resource request, e.g. 500m | `nil` | +| `resources.requests.memory` | Engine memory resource request, e.g. 8Gi | `nil` | +| `reloadResources.limits.cpu` | Reload Engine cpu resource limits, e.g. 500m | `nil` | +| `reloadResources.limits.memory` | Reload Engine memory resource limits, e.g. 8Gi | `nil` | +| `reloadResources.requests.cpu` | Reload Engine cpu resource request, e.g. 500m | `nil` | +| `reloadResources.requests.memory` | Reload Engine memory resource request, e.g. 8Gi | `nil` | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | Engine external port | `9076` | +| `liveness.timeoutSeconds` | Engine Liveness timeout in seconds | `10` | +| `liveness.failureThreshold` | Engine Liveness failure threshold | `3` | +| `optionalArgs` | Engine optional arguments | `` | +| `metrics.prometheus.enabled` | Prometheus metrics enablement | `true` | +| `metrics.prometheus.port` | Port for Prometheus metrics | `9090` | +| `persistence.enabled` | Use persistent volume claim for persistence | `true` | +| `persistence.autoSave.enabled` | Enables auto save mode | `true` | +| `persistence.autoSave.interval` | Autosave interval in seconds | `5` | +| `persistence.accessMode` | Persistence access mode | `ReadWriteMany` | +| `persistence.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `persistence.size` | Persistence volume size | `5Gi` | +| `persistence.existingClaim` | If defined, PersistentVolumeClaim is not created and Engine chart uses this claim | `nil` | +| `persistence.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `persistence.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled. | `{}` | +| `ingress.enabled` | Enables Ingress | `true` | +| `ingress.class` | the `kubernetes.io/ingress.class` to use | `nginx` | +| `ingress.authURL` | The URL to use for nginx's `auth-url` configuration to authenticate `/api` requests | `http://{.Release.Name}-edge-auth.{.Release.Namespace}.svc.cluster.local:8080/v1/auth` | +| `ingress.annotations` | Ingress annotations | `{kubernetes.io/ingress.class: traefik, traefik.frontend.rule.type: PathPrefix}` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `jwt.enabled` | Enables JWT validation | `true` | +| `jwt.uri` | URI for JWKS endpoint - used to fetch public keys to validate JWTs | `http://{.Release.Name}-keys:8080/v1/keys/qlik.api.internal` | +| `jwt.secret` | Name of secret containing JWT public key for signature verification. The name of the secret data must be `jwt-public-key` | `nil` | +| `serviceJwt.enabled` | Enables Service 2 Service JWT | `true` | +| `serviceJwt.jwtPrivateKey` | Private key for self signed JWT, corresponding public key can be found in keys/values.yaml | `see values.yaml` | +| `serviceJwt.keyId` | Key id for self signed JWT | `see values.yaml` | +| `serviceJwt.enabledRenewToken` | Enables Renew Token | `true` | +| `serviceJwt.internalTokens` | URI to internal token API | `http://{.Release.Name}-edge-auth:8080/v1` | +| `accessControl.enabled` | Enables settings for Attribute based access control (ABAC). | `true` | +| `accessControlRules` | Section of yaml containing static ABAC rules used to secure access to an engine. | `see values.yaml` | +| `sandBox.enabled` | Enables sandbox | `false` | +| `sandBox.calcMemoryLimitMB` | Limits max memory consumption per calculation. | `nil` | +| `sandBox.exportTimeLimitSec` | Limits max time in seconds per export. | `nil` | +| `sandBox.exportMemoryLimitMB` | Limits max memory consumption per export. | `nil` | +| `sandBox.calcTimeLimitSec` | Limits max time in seconds per calculation. | `nil` | +| `sandBox.reloadMemoryLimitMB` | Limits max memory consumption per reload. | `nil` | +| `sandBox.reloadTimeLimitSec` | Limits max time in seconds per reload. | `nil` | +| `externalRestServices.RestServiceRequestNbrOfRetries` | Limits number of retries engine does to external rest services. | `5` | +| `license.enabled` | Enables license validation | `true` | +| `license.uri` | URI for license endpoint | `http://{.Release.Name}-licenses:9200` | +| `licence.cacheTimeout` | The time in seconds that we cache the license access type (professional/analyzer) information. A value <=0 disables the cache. | 3600 | +| `spaces.enabled` | Enables spaces access control integration | `true` | +| `spaces.uri` | URI for spaces endpoint | `http://{.Release.Name}-spaces:6080` | +| `logging.HttpTrafficLogVerbosity` | Traffic log verbosity | `4` | +| `logging.TrafficLogVerbosity` | QIX traffic log verbosity | `0` | +| `logging.SystemLogVerbosity` | System log verbosity | `4` | +| `logging.AuditLogVerbosity` | Audit log verbosity | `0` | +| `logging.PerformanceLogVerbosity` | Performance log verbosity | `0` | +| `logging.QixPerformanceLogVerbosity` | QIX Performance log verbosity | `0` | +| `logging.SessionLogVerbosity` | Session Performance log verbosity | `4` | +| `logging.SmartSearchQueryLogVerbosity` | Smart search query log verbosity | `3` | +| `logging.SmartSearchIndexLogVerbosity` | Smart search index log verbosity | `3` | +| `logging.ScriptLogVerbosity` | Script log verbosity | `4` | +| `logging.SSEVerbosity` | SSE (Server Side Extension) log verbosity | `4` | +| `logging.ExternalServicesLogVerbosity` | External Services log verbosity | `4` | +| `logging.EventBusVerbosity` | Eventbus (NATS) log verbosity | `4` | +| `logging.EnableDebugTracing` | Enables extra debug traces. Should be false in production. | `false` | +| `liveness.timeoutSeconds` | Number of seconds after which the probe times out. Defaults to 1 second if not set. Minimum value is 1. | `nil` | +| `liveness.failureThreshold` | When a Pod starts and the probe fails, Kubernetes will try failureThreshold times before giving up. Giving up in case of liveness probe means restarting the Pod. In case of readiness probe the Pod will be marked Unready. Defaults to 3 if not set. Minimum value is 1. | `nil` | +| `data.remoteConfigEnabled` | Enables engine to fetch connector and connection information remotely. If set to true data.connectionUrl and data.connectorUrl must be set. If set to false data.connectors will be used for connector information and connection information will be stored in the qvf files | `false` | +| `data.connectorUrl` | URL to the connector service. *Used when data.remoteConfigEnabled is set to true* | `nil` | +| `data.retryCount` | When the connector returns RESOURCE_EXHAUSTED, engine will re-fetch a new endpoint and retry. Can be used for client side load-balancing. Engine will try `retryCount` number of times before giving up. *Used when data.remoteConfigEnabled is set to true* | `3` | +| `data.retryInterval` | When the connector returns RESOURCE_EXHAUSTED, engine will re-fetch a new endpoint and retry. Can be used for client side load-balancing. Engine will wait `retryInterval` seconds before retries. *Used when data.remoteConfigEnabled is set to true* | `10` | +| `data.connectionUrl` | URL to the connection service *Used when data.remoteConfigEnabled is set to true* | `nil` | +| `data.connectors` | List of data connectors that engine can connect to during reload. *Used when data.remotConfigEnabled is false*.
Example:
connector:
-name: "my data"
  url: "http://mydata.com" | `nil` | +| `analytics.connectors` | List of advanced analytics connectors that engine can connect.
Example:
connector:
-name: "P"
  url: "http://pythoncon.com" | `nil` | +| `bdi.enabled` | Enables engine bdi integration | `false` | +| `stan.enabled` | Enables engine nats streaming integration | `true` | +| `stan.uri` | nats streaming url | `nil` | +| `stan.cluster` | nats streaming cluster name | `nil` | +| `stan.maxreconnect` | max number of reconnect attempts to nats cluster | `60` | +| `stan.reconnectwait` | number of seconds between reconnect attempts | `2` | +| `stan.timeout` | number of seconds before connection try times out | `10` | +| `stan.trace` | nats extensive logging | `false` | +| `stan.transactions` | nats event persistence and recovery | `false` | +| `stan.statistics.enabled` | Enables statistics event | `false` | +| `stan.statistics.intervalMs` | Interval that the statistics event is sent on | `1000` | +| `temporarycontents.enabled` | Enable temporary contents upload for exported apps functionality | `true` | +| `temporarycontents.uri` | URI for temp-contents endpoint | `http://{.Release.Name}-temporary-contents:6080` | +| `temporarycontents.import` | Enable temporary contents download for resumable upload functionality | `true` | +| `featureflags.enabled` | Enable featureflag service | `true` | +| `featureflags.uri` | URI for featureflag endpoint | `http://{.Release.Name}-feature-flags:8080` | +| `groups.enabled` | Enable groups service | `false` | +| `groups.uri` | URI for groups endpoint | `http://{.Release.Name}-groups:8080` | +| `quota.enabled` | Enable quota service | `false` | +| `quota.uri` | URI for quota endpoint | `http://{.Release.Name}-quotas:6080` | +| `encryption.enabled` | Enable encryption of QVF files | `true` | +| `encryption.uri` | URI for encryption service | `http://{.Release.Name}-encryption:8080` | +| `prestop.enabled` | If set to true engine will wait for all sessions to be disconnected before terminating | `false` | +| `deployments` | list of engine deployments including QlikView. | `see section below or values.yaml` | +| `terminationGracePeriodSeconds` | Number of seconds to wait during pod termination until SIGKILL | `30` | +| `progressDeadlineSeconds` | Number of seconds you want to wait for the deployment to progress before the system reports back that the deployment has failed progressing | `600` | +| `strategy.rollingUpdate.maxUnavailable` | Specifies the maximum number of Pods that can be unavailable during the update process. | `25%` | +| `strategy.rollingUpdate.maxSurge` | Specifies the maximum number of Pods that can be created over the desired number of Pods. | `25%` | +| `dnsConfig` | DNS configurations. | `see values.yaml` | +| `grpc.dnsResolver` | Declares which DNS resolver to use. | `native` | +| `session.timeout` | Specifies the timeout in seconds before closing an idle websocket. | `1200` | +| `redis-sub-chart' | Specifies which redis sub-chart to use | `redis` | +| `redis-user-state.client.connectTimeoutMs` | Timeout in ms while connecting to Redis server | `100` | +| `redis-user-state.client.socketTimeoutMs` | Timeout in ms for request to to Redis server | `100` | +| `redis-user-state.client.userStateTTLSeconds` | Expiry time for user state stored in Redis | `1800` | +| `redis-user-state.client.sentinel.connectTimeoutMs` | Timeout in ms while connecting to Redis sentinel | `100` | +| `redis-user-state.client.sentinel.socketTimeoutMs` | Timeout in ms for request to to Redis sentinel | `100` | +| `redis-user-state.client.sentinel.retryIntervalMs` | Interval in ms between retries while connecting to Redis sentinel | `100` | +| `redis-ha-user-state.client.connectTimeoutMs` | Timeout in ms while connecting to Redis HA server | `100` | +| `redis-ha-user-state.client.socketTimeoutMs` | Timeout in ms for request to to Redis HA server | `100` | +| `redis-ha-user-state.client.userStateTTLSeconds` | Expiry time for user state stored in Redis HA | `1800` | +| `redis-ha-user-state.client.sentinel.connectTimeoutMs` | Timeout in ms while connecting to Redis HA sentinel | `100` | +| `redis-ha-user-state.client.sentinel.socketTimeoutMs` | Timeout in ms for request to to Redis HA sentinel | `100` | +| `redis-ha-user-state.client.sentinel.retryIntervalMs` | Interval in ms between retries while connecting to Redis HA sentinel | `100` | +| `networkPolicy.enabled` | Enables an egress networkpolicy for engine. | false | +| `networkPolicy.externalcidr` | Describes a particular CIDR e.g. "0.0.0.0/0" allows all addresses. | `nil` | +| `networkPolicy.blockedcidrs` | List of ranges in the externalcidr that should be blocked. | `nil` | + + +### Deployments + +This chart supports multiple engine deployments with different settings such as labels, tolerances, resource limits, replicas, hpa. +Here is an example of deployments in values.yaml: + +```yaml +deployments: + - name: engines-default + labels: + qlik.com/default: true + - name: engine-qv-default + labels: + qlik.com/qv-default: true + qlikview: true + - name: engines-tenant1 + labels: + qlik.com/tenant: tenant1 + tolerations: + - effect: NoExecute + key: qlik.com/tenant + operator: "Equal" + value: tenant1 + replicas: 2 + resources: + limits: + memory: 100M + cpu: 1 + hpa: + enabled: true + minReplicas: 3 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 75 +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml engine +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Persistence + +Engine stores qlik database data files in the /qlik/apps directory in the container. +The chart mounts a Persistent Volume Claim at this location. The volume is created using dynamic volume provisioning. In order to disable this functionality you can change the values.yaml to disable persistence and use an emptyDir instead. + +### Access mode + +The access mode can be changed depending on the deployed usage scenario and the underlying storage: + +- A single node deployment can use `ReadWriteOnce` +- A multi node read-only deployment where static apps are used can use `ReadOnlyMany` +- A multi node deployment supporting multi user edit capabilities needs to use `ReadWriteMany` + +### StorageClass + +Dynamic provisioning is configured by + +1. Create a StorageClass +2. Install the helm chart + +```console +$ helm install --name my-release --set persitence.storageClass=STORAGE_CLASS_NAME engine +``` + +#### Using the internal StorageClass + +Normally a StorgeClass is created by an administrator prior to installing the engine helm chart. An option to create the StorageClass as part of the helm chart is provided to simplify certain deployment scenarios. +Example configuration: + +```yaml + persistence: + enabled: true + storageClass: "qlik-docs" + internalStorageClass: + enabled: true + definition: + provisioner: kubernetes.io/no-provisioner + parameters: {} + reclaimPolicy: Retain + mountOptions: {} +``` + +### Existing PersistentVolumeClaims + +You can also configure and external PersistentVolumeClaim by setting the claim name in the existingClaim parameter. + +1. Create a Persistent Volume +2. Create a Persistent Volume Claim +3. Install chart. + +```console +$ helm install --name my-release --set persitence.existingClaim=PVC_QLIKAPPS engine +``` + +## Redis +If any Redis sub-chart is configured, then the state of a user session will be persisted during a configurable lifetime. This will enable seamless move of sessions between Engine pods when the cluster is scaled or upgraded. The Redis server may be configured at will by overriding the default values defined in the [Redis subchart] (https://github.com/helm/charts/tree/master/stable/redis) diff --git a/qliksense/charts/engine/charts/messaging/Chart.yaml b/qliksense/charts/engine/charts/messaging/Chart.yaml new file mode 100644 index 0000000..38cea1f --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +description: | + Messaging system services. NATS and NATS Streaming are supported. Other services can communicate with each other and orchestrate their works using the services provided by this chart. +home: https://www.qlik.com +keywords: +- messaging +- queue +- nats +- nats-streaming +name: messaging +sources: +- https://github.com/nats-io/gnatsd +- https://github.com/nats-io/nats-streaming-server +- https://github.com/helm/charts/tree/master/stable/nats +- https://github.com/nats-io/prometheus-nats-exporter +- https://github.com/qlik-trial/nats-prom-exporter +version: 1.7.4 diff --git a/qliksense/charts/engine/charts/messaging/README.md b/qliksense/charts/engine/charts/messaging/README.md new file mode 100644 index 0000000..77654ec --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/README.md @@ -0,0 +1,284 @@ +# messaging + +This charts provides **messaging system** (a.k.a. message queue, message bus, etc.) capabilities for services. +Currently, [NATS](https://www.nats.io) and [NATS Streaming](https://nats.io/documentation/streaming/nats-streaming-intro/) +are included in this chart, but in the future, other message systems like RabbitMQ can also be added. + +## Installing the Chart + +To install the chart with the release name `messaging`: + +```console +helm install --name messaging qlik/messaging +``` + +## Uninstalling the Chart + +To uninstall/delete the `messaging` deployment: + +```console +helm delete messaging +``` + +## Configuration + +### NATS + +| Parameter | Description | Default | +| --------------------------------- | ------------------------------------------- | ------------------------------------- | +| `nats.enabled` | enable NATS messaging system | `true` | +| `nats.image.registry` | NATS image registry | `qliktech-docker.jfrog.io` | +| `nats.image.repository` | NATS Image name | `qnatsd` | +| `nats.image.tag` | NATS Image tag | `0.2.4` | +| `nats.image.pullPolicy` | pull policy for nats docker image | `IfNotPresent` | +| `nats.image.pullSecrets` | specify image pull secrets | `artifactory-docker-secret` | +| `nats.replicaCount` | number of nats replicas | `1` | +| `nats.antiAffinity` | anti-affinity for nats pod assignment | `soft` | +| `nats.auth.enabled` | enable authentication for nats clients | `true` | +| `nats.auth.user` | username for nats client authentication | `nats_client` | +| `nats.auth.password` | password for nats client authentication | `T0pS3cr3t` | +| `auth.users` | Client authentication users | `[]` See [Rotation](#how-to-rotate) | +| `auth.users[].user` | Client authentication user | `nil` | +| `auth.users[].password` | Client authentication password | `nil` | +| `nats.auth.jwtUsers` | array of jwt authenticated users | See [Authentication](#authentication) | +| `nats.clusterAuth.enabled` | enable authentication for nats clustering | `false` | +| `nats.clusterAuth.user` | username for nats clustering authentication | `nats_cluster` | +| `nats.clusterAuth.password` | password for nats clustering authentication | random string | +| `nats.statefulset.updateStrategy` | update strategy for nats statefulsets | `onDelete` | +| `nats.client.service.type` | nats-client service type | `ClusterIP` | +| `nats.client.service.port` | nats-client service port | `4222` | +| `nats.cluster.service.type` | nats-cluster service type | `ClusterIP` | +| `nats.cluster.service.port` | nats-cluster service port | `6222` | +| `nats.monitoring.service.type` | nats-monitoring service type | `ClusterIP` | +| `nats.monitoring.service.port` | nats-monitoring service port | `8222` | +| `nats.livenessProbe.enabled` | enable liveness probe | `true` | +| `nats.readinessProbe.enabled` | enable readiness probe | `true` | +| `nats.resources` | CPU and memory requests and limits for nats | `{}` | +| `extraArgs` | Optional flags for NATS | See [values.yaml](./values.yaml) | + +### NATS Streaming + +| Parameter | Description | Default | +| ------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- | +| `nats-streaming.enabled` | enable NATS messaging system | `false` | +| `nats-streaming.image.registry` | NATS streaming image registry | `qliktech-docker.jfrog.io` | +| `nats-streaming.image.repository` | NATS streaming image name | `nats-streaming` | +| `nats-streaming.image.tag` | NATS Streaming image tag | `0.12.2` | +| `nats-streaming.image.pullPolicy` | pull policy for nats docker image | `IfNotPresent` | +| `nats-streaming.image.pullSecrets` | specify image pull secrets | `artifactory-registry-secret` | +| `nats-streaming.replicaCount` | number of nats replicas | `3` | +| `nats-streaming.antiAffinity` | anti-affinity for nats pod assignment | `soft` | +| `nats-streaming.auth.enabled` | enable authentication for nats clients | `true` | +| `nats-streaming.auth.user` | username for nats client authentication | `nats_client` | +| `nats-streaming.auth.password` | password for nats client authentication | `nil` (Uses Secret below for password) | +| `nats-streaming.auth.secretName` | secretName for nats client authentication | `{{ .Release.Name }}-nats-secret` | +| `nats-streaming.auth.secretKey` | secretKey for nats client authentication | `client-password` | +| `nats-streaming.statefulset.updateStrategy` | update strategy for nats statefulsets | `onDelete` | +| `nats-streaming.monitoring.service.type` | nats-streaming-monitoring service type | `ClusterIP` | +| `nats-streaming.monitoring.service.port` | nats-streaming-monitoring service port | `8222` | +| `nats-streaming.livenessProbe.enabled` | enable liveness probe | `true` | +| `nats-streaming.readinessProbe.enabled` | enable readiness probe | `true` | +| `nats-streaming.resources` | CPU and memory requests and limits for nats | `{}` | +| `nats-streaming.clusterID` | nats streaming cluster name id | `{{ .Release.Name }}-nats-streaming-cluster` | +| `nats-streaming.natsSvc` | external nats server url | `nats://{{ .Release.Name }}-nats-client:4222` | +| `nats-streaming.hbInterval` | Interval at which server sends heartbeat to a client | `10s` | +| `nats-streaming.hbTimeout` | How long server waits for a heartbeat response | `10s` | +| `nats-streaming.hbFailCount` | Number of failed heartbeats before server closes the client connection | `5` | +| `nats-streaming.persistence.volume.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `nats-streaming.persistence.volume.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `nats-streaming.persistence.volume.size` | Persistence volume size | `nil` | +| `nats-streaming.persistence.volume.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `nats-streaming.persistence.volume.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +### Network Policy for NATS and NATS Streaming + +| Parameter | Description | Default | +| -------------------------------------- | ---------------------------------------------------------------- | --------------------- | +| `networkPolicy.nats.enabled` | enable custom network policy for NATS messaging system | `false` | +| `networkPolicy.nats-streaming.enabled` | enable custom network policy for NATS Streaming messaging system | `false` | +| `networkPolicy.keys.release` | keys service release name for egress rules | `{{ .Release.Name }}` | + +## Requirements + +### Network Plugin to enable Network Policies in Kubernetes cluster + +This chart include options to enable [Network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) for the created +`nats` and `nats-streaming` clusters. + +Network policies are implemented by the network plugin, so the Kubernetes cluster must be configured with a networking solution which supports NetworkPolicy - +simply creating the resource without a controller to implement it will have no effect. + +For local development, please refer to [Setting Up a Minikube Cluster - Configuring Network Plugin to support Network Policies](https://github.com/qlik-trial/elastic-charts/blob/master/docs/prerequisites/minikube-cluster.md#configuring-network-plugin-to-support-network-policies) +for detailed instructions. + +### Secrets + +For deploying this chart to **stage**/**prod**, you need the following secrets written to **vault**. + +*The passwords should not start with a number!* + +| Secret | Key | Purpose | +| -------------------------------------------------------------- | ------- | ----------------------------------- | +| `/secret/{environment}/messaging/{region}/natsClientPassword` | `value` | password for client authentication | +| `/secret/{environment}/messaging/{region}/natsClusterPassword` | `value` | password for cluster authentication | + +## Connecting to NATS / NATS Streaming + +### From the command line: +#### Port-forward NATS Client Service: +```sh + > kubectl port-forward messaging-nats-0 4222 +``` +#### Connect via `telnet`: +```sh + > telnet localhost 4222 +``` +#### Connect with no auth: +```sh + CONNECT {} +``` +#### Connect with auth: +```sh + CONNECT {"user":"my-user","pass":"T0pS3cr3t"} +``` +#### Subscribing to channel, publishing to a channel, and receiving the published message: +```sh + SUB foo 1 + +OK + PUB foo 11 + Hello World + +OK + MSG foo 1 11 + Hello World +``` + +### Using [go-nats](https://github.com/nats-io/go-nats/) and [go-nats-streaming](https://github.com/nats-io/go-nats-streaming) clients: +```golang +package main + +import ( + "log" + + "github.com/nats-io/go-nats" + "github.com/nats-io/go-nats-streaming" +) + +func main() { + nc, err := nats.Connect("nats://nats_client:asdf@localhost:4222") + if err != nil { + log.Fatal(err) + } + sc, err := stan.Connect("messaging-nats-streaming-cluster", "client-123", stan.NatsConn(nc)) + if err != nil { + log.Fatal(err) + } + sc.Publish("hello", []byte("msg1")) + + sc.Subscribe("hello", func(m *stan.Msg) { + log.Printf("[Received] %+v", m) + }, stan.StartWithLastReceived()) + + sc.Publish("hello", []byte("msg2")) + + select{} +} +``` + +### With Network Policies enabled + +To connect to `NATS` as a client with Network Policies enabled , the pod in which the service client is in must have the label +`{{ .Release.Name }}-nats-client=true`. + +Otherwise, if enabled, the `ingress` `Network Policy` for `NATS` will block incoming traffic from any pod without the appropriate label. + +`Network Policy` is enabled in `stage` and `production` environments. + +## Authentication + +It's important to know that when using NATS Streaming, a NATS connection is also required and that it is the NATS connection that handles authentication and authorization not the NATS Streaming connnection. + +### NATS to NATS-Streaming password rotation + +The nats chart supports an array of users (`nats.auth.users`) used for authenticating NATS-Streaming to NATS. NATS-Streaming will use the first entry in the array to authenticate to NATS. Any additional entries can still be used to authenticate against. + +#### How to rotate + +In this example we have a deployed cluster with a NATS-Streaming that is authenticated using user `user1` with password `password1` from the following config. We want to update this to use `password2`. +```yaml +auth: + users: + - user: user1 + password: password1 +``` + +1) Add new user/password to the first entry in the array, but leave the old entry as *second* in the list. Then `helm update` your release. +```yaml +auth: + users: + - user: user2 + password: password2 + - user: user1 + password: password1 +``` +2) NATS will now have both user/passwords configured, but NATS-Streaming will still be using the original entry to authenticate. NATS-Streaming servers will need to be restarted to pickup the new password from the first entry in `nats.auth-users`. +```sh +kubectl delete pod {Release.Name}-nats-streaming-2 #wait for new pod to become ready +kubectl delete pod {Release.Name}-nats-streaming-1 #wait for new pod to become ready +kubectl delete pod {Release.Name}-nats-streaming-0 #wait for new pod to become ready +``` +3) Finally remove the old user from the `nats.auth.users` array and `helm update` to remove authentication for the old user. +```yaml +auth: + users: + - user: user2 + password: password2 +``` + +### JWT Authentication + +NATS has been configured to allow authentication using service-to-service(S2S) JWTs, but in order to be authenticated, a service must be whitelisted. +The `nats.auth.jwtUsers` value can be used to provide a whitelist of users that should be authenticated using a S2S JWT. +**Note** when using a S2S JWT both the NATS username and JWT `subject` must match + +Adding a new service to the whitelist is as simple as updating `nats.auth.jwtUsers` value as such: +```yaml +nats: + auth: + jwtUsers: + - user: "my-service" + - user: "my-service"` + ...etc +``` + +### Authorization + +The above method of adding a JWT authentication whitelist also allows for setting authorization rules. +NATS [authorization rules](https://nats.io/documentation/managing_the_server/authorization/) can be configured on a per subject basis. + +The following is an example of adding publish/subscribe authorization rules +```yaml +nats: + auth: + jwtUsers: + - user: "my-service" + stanPermissions: + publish: + - "events.mysubject.>" # service can publish to any subject that starts with `events.mysubject.` + - "system-events.mysubject" # service can publish to `system-events.mysubject` subject + subscribe: + - "events.somesubject" # service can subscribe `events.somesubject` subject + natsPermissions: + publish: + - "events.mysubject1" # service can publish to `events.mysubject1` subject + subscribe: + - "events.somesubject1" # service can subscribe `events.somesubject1` subject +``` +Wildcard support works as follow: + +The dot character `.` is the token separator. + +The asterisk character `*` is a token wildcard match. +`e.g foo.* matches foo.bar, foo.baz, but not foo.bar.baz.` + +The greater-than symbol `>` is a full wildcard match. +`e.g. foo.> matches foo.bar, foo.baz, foo.bar.baz, foo.bar.1, etc.` diff --git a/qliksense/charts/engine/charts/messaging/charts/nats-streaming/Chart.yaml b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/Chart.yaml new file mode 100644 index 0000000..34633a0 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +appVersion: 0.6.0 +description: A NATS Streaming cluster setup +home: https://nats.io/ +keywords: +- NATS +- Messaging +- publish +- subscribe +- streaming +- cluster +- persistence +name: nats-streaming +version: 0.3.0 diff --git a/qliksense/charts/engine/charts/messaging/charts/nats-streaming/README.md b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/README.md new file mode 100644 index 0000000..c88074b --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/README.md @@ -0,0 +1,133 @@ +# NATS Streaming Clustering Helm Chart + +Sets up a [NATS](http://nats.io/) Streaming server cluster with Raft based replication. + +## Getting started + +This chart relies on an already available NATS Service to which the +NATS Streaming nodes that will form a clusters can connect to. +You can install the NATS Operator and then use it to create a NATS cluster +via the following: + +```console +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/example-nats-cluster.yaml +``` + +This will create a NATS cluster on the `nats-io` namespace. Then, to +install a NATS Streaming cluster the URL to the NATS cluster can be +specified as follows (using `my-release` for a name label for the +cluster): + +```console +$ git clone https://github.com/wallyqs/nats-streaming-cluster-chart nats-streaming-cluster +$ helm install nats-streaming-cluster -n my-release --set natsUrl=nats://nats.nats-io.svc.cluster.local:4222 +``` + +This will create 3 follower nodes plus an extra Pod which is +configured to be in bootstrapping mode, which will start as the leader +of the Raft group as soon as it joins. + +```console +$ kubectl get pods --namespace default -l "app=nats-streaming-cluster,release=my-release" +NAME READY STATUS RESTARTS AGE +my-release-nats-streaming-cluster-0 1/1 Running 0 30s +my-release-nats-streaming-cluster-1 1/1 Running 0 23s +my-release-nats-streaming-cluster-2 1/1 Running 0 17s +my-release-nats-streaming-cluster-bootstrap 1/1 Running 0 30s +``` + +Note that in case the bootstrapping Pod fails then it will not be +recreated and instead one of the extra follower Pods will take over +the leadership. The follower Pods are part of a Deployment so those +in case of failure they will be recreated. + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | -------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS Streaming image registry | `docker.io` | +| `image.repository` | NATS Streaming Image name | `nats-streaming` | +| `image.tag` | NATS Streaming Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.secretName` | Client authentication secret name | `nil` | +| `auth.secretKey` | Client authentication secret key | `nil` | +| `clusterID` | NATS Streaming Cluster Name ID | `"test-cluster"` | +| `natsSvc` | External NATS Server URL | `"nats://username:password@nats:4222"` | +| `maxChannels` | Max # of channels | `100` | +| `maxSubs` | Max # of subscriptions per channel | `1000` | +| `maxMsgs` | Max # of messages per channel | `"1000000"` | +| `maxBytes` | Max messages total size per channel | `900mb` | +| `maxAge` | Max duration a message can be stored | `"0s" (unlimited)` | +| `maxMsgs` | Max # of messages per channel | `1000000` | +| `debug` | Enable debugging | `false` | +| `trace` | Enable detailed tracing | `false` | +| `replicaCount` | Number of NATS Streaming nodes | `3` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `RollingUpdate` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `monitoring.service.type` | Kubernetes Service type (NATS Streaming monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS Streaming monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS Streaming monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS Streaming monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS Streaming monitoring service type is `LoadBalancer` | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `sidecars` | Attach additional containers to the pod. | `nil` | + +### File Specific Persistence Configuration + +| Parameter | Description | Default | +| --------------------------------- | ---------------------------------- | ---------------- | +| `persistence.file.compactEnabled` | Enable compaction | true | +| `persistence.file.bufferSize` | File buffer size (in bytes) | "2097152" (2MB) | +| `persistence.file.crc` | Enable file CRC-32 checksum | true | +| `persistence.file.sync` | Enable File.Sync on Flush | true | +| `persistence.file.fdsLimit` | Max File Descriptor limit (approx) | 0 (unlimited) | + +### Storage Specific Persistence Configuration + +| Parameter | Description | Default | +| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `persistence.volume.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `persistence.volume.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `persistence.volume.size` | Persistence volume size | `nil` | +| `persistence.volume.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `persistence.volume.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +*Additional configuration parameters not typically used may be found in [values.yaml](values.yaml).* diff --git a/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/NOTES.txt b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/NOTES.txt new file mode 100644 index 0000000..cc0eaa0 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/NOTES.txt @@ -0,0 +1,25 @@ +NATS Streaming server has been installed. + +Externally monitor the NATS streaming server by running these commands. + +{{- if contains "NodePort" .Values.monitoring.service.type }} + +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats-streaming-cluster.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + +echo http://$NODE_IP:$NODE_PORT/streaming + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "nats-streaming-cluster.fullname" . }}' +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats-streaming-cluster.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +echo http://$SERVICE_IP:{{ .Values.monitoring.service.port }}/streaming + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + +export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats-streaming-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 8222:{{ .Values.monitoring.service.port }} +echo "Visit http://127.0.0.1:8222/streaming to monitor the NATS streaming server" + +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/_helpers.tpl b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/_helpers.tpl new file mode 100644 index 0000000..81001e5 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nats-streaming-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats-streaming-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper NATS Streaming image name +*/}} +{{- define "nats-streaming-cluster.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a NATS Streaming cluster peers list string based on fullname, namespace, # of servers +in a format like "host-1,host-2,host-3" +*/}} +{{- define "nats-streaming-cluster.peers" -}} +{{- $name := include "nats-streaming-cluster.fullname" . -}} +{{- $nats_streaming_cluster := dict "peers" (list) -}} +{{- range $idx, $v := until (int .Values.replicaCount) }} +{{- $noop := printf "%s-%d" $name $idx | append $nats_streaming_cluster.peers | set $nats_streaming_cluster "peers" -}} +{{- end }} +{{- printf "%s" (join "," $nats_streaming_cluster.peers) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml new file mode 100644 index 0000000..2950f19 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }}-monitoring + labels: + app: {{ template "nats-streaming-cluster.name" . }} + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - name: monitoring + port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml new file mode 100644 index 0000000..b642078 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/networkpolicy.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/sc.yaml b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/sc.yaml new file mode 100644 index 0000000..75b0519 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/sc.yaml @@ -0,0 +1,7 @@ +{{- if .Values.persistence.volume.internalStorageClass.enabled -}} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.persistence.volume.storageClass }} +{{ toYaml .Values.persistence.volume.internalStorageClass.definition }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/statefulset.yaml b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/statefulset.yaml new file mode 100644 index 0000000..03ddc08 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/templates/statefulset.yaml @@ -0,0 +1,254 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats-streaming-cluster.name" . }} + image: {{ template "nats-streaming-cluster.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.auth.enabled }} + - name: USER + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientUser }} + {{- else }} + value: {{ .Values.auth.user }} + {{- end }} + {{- if .Values.auth.token }} + - name: AUTH + value: {{ .Values.auth.token }} + {{- else }} + - name: PASSWORD + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientPassword }} + {{- else }} + value: {{ .Values.auth.password }} + {{- end }} + {{- end }} + {{- end }} + args: [ + "-clustered", + "-cid", "{{ tpl (.Values.clusterID) . }}", + "-m", "{{ .Values.monitoring.service.port }}", + "-ns", "{{ tpl (.Values.natsSvc) . }}", + "-mc", "{{ .Values.maxChannels }}", + "-msu", "{{ .Values.maxSubs }}", + "-mm", "{{ .Values.maxMsgs }}", + "-mb", "{{ .Values.maxBytes }}", + "-ma", "{{ .Values.maxAge }}", + "-hbi", "{{ .Values.hbInterval }}", + "-hbt", "{{ .Values.hbTimeout }}", + "-hbf", "{{ .Values.hbFailCount }}", + + "--cluster_node_id", "$(POD_NAME)", + "--cluster_peers", "{{ template "nats-streaming-cluster.peers" . }}", + "--store", "file", + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/data", + "--cluster_log_path", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/raft", + {{- if .Values.cluster_raft_logging }} + "--cluster_raft_logging", + {{- end }} + {{- if .Values.persistence.file.compactEnabled }} + "--file_compact_enabled", + "--file_compact_frag", "{{ .Values.persistence.file.compactFrag }}", + "--file_compact_interval", "{{ .Values.persistence.file.compactInterval }}", + "--file_compact_min_size", "{{ .Values.persistence.file.compactMinSize }}", + {{- end }} + "--file_buffer_size", "{{ .Values.persistence.file.bufferSize }}", + {{- if .Values.persistence.file.crc }} + "--file_crc", + "--file_crc_poly", "{{ .Values.persistence.file.crcPoly }}", + {{- end }} + {{- if .Values.persistence.file.sync }} + "--file_sync", + {{- end }} + "--file_slice_max_msgs", "{{ .Values.persistence.file.sliceMaxMsgs }}", + "--file_slice_max_bytes", "{{ .Values.persistence.file.sliceMaxBytes }}", + "--file_slice_max_age", "{{ .Values.persistence.file.sliceMaxAge }}", + {{- if ne .Values.persistence.file.sliceArchiveScript "" }} + "--file_slice_archive_script", "{{ .Values.persistence.file.sliceArchiveScript }}", + {{- end }} + "--file_fds_limit", "{{ .Values.persistence.file.fdsLimit }}", + "--file_parallel_recovery", "{{ .Values.persistence.file.parallelRecovery }}", + + {{- if .Values.auth.enabled }} + "--user", "$(USER)", + {{- if .Values.auth.token }} + "--auth", "$(AUTH)", + {{- else }} + "--pass", "$(PASSWORD)", + {{- end }} + {{- end }} + + {{- if .Values.debug }} + "-SD", + {{- end }} + {{- if .Values.trace }} + "-SV", + {{- end }} + ] + ports: + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + volumeMounts: + - name: datadir + mountPath: /nats + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + {{ if not .Values.persistence.volume.enabled }} + - name: datadir + emptyDir: {} + {{- end }} + {{- if .Values.persistence.volume.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: "{{ .Values.persistence.volume.size }}" + {{- if .Values.persistence.volume.storageClass }} + {{- if (eq "-" .Values.persistence.volume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.volume.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats-streaming/values.yaml b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/values.yaml new file mode 100644 index 0000000..f7920f3 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats-streaming/values.yaml @@ -0,0 +1,290 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## NATS Streaming image +## +image: + registry: docker.io + repository: nats-streaming + tag: 0.11.2 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS Streaming replicas +replicaCount: 3 + +# +# Streaming server configuration +# + +# Cluster ID (default: test-cluster) +clusterID: "test-cluster" + +# Connect to this external NATS Server URL +natsSvc: "nats://nats:4222" + +# Max number of channels (0 for unlimited) +maxChannels: 100 + +# Max number of subscriptions per channel (0 for unlimited) +maxSubs: 1000 + +# Max number of messages per channel (0 for unlimited) +maxMsgs: "1000000" + +# Max messages total size per channel (0 for unlimited) +maxBytes: "900mb" + +# Max duration a message can be stored ("0s" for unlimited) +maxAge: 0 + +# +# ADVANCED CONFIGURATION: Change these with caution. +# + +# Interval at which server sends heartbeat to a client +hbInterval: 30s + +# How long server waits for a heartbeat response +hbTimeout: 10s + +# Number of failed heartbeats before server closes the client connection +hbFailCount: 330 + +# Use for general debugging. Enabling this will negatively affect performance. +debug: false + +# Do not normally set this as it will drastically decrease performance and generate +# volumous logs. +trace: false + +# Use for raft related debugging +cluster_raft_logging: false + +persistence: + file: + + # Enable compaction + compactEnabled: true + + # Enable file CRC-32 checksum + crc: true + + # Enable File.Sync on Flush + sync: true + + # Store will try to use no more file descriptors than this given limit + fdsLimit: 0 + + ## + ## ADVANCED CONFIGURATION: Change these with caution. + ## + # File buffer size (in bytes) + bufferSize: "2097152" + + # File fragmentation % threshold for compaction + compactFrag: 50 + + # Minimum interval (in seconds) between file compactions, 5min + compactInterval: 300 + + # Minimum file size for compaction + compactMinSize: "1048576" + + # Polynomial used to make the table used for CRC-32 checksum (default is crc32.IEEE) + crcPoly : "3988292384" + + # Maximum number of messages per file slice (subject to channel limits) + sliceMaxMsgs: 0 + + # Maximum file slice size - including index file (subject to channel limits) 64MB + sliceMaxBytes: "67108931" + + # Maximum file slice duration starting when the first message is stored (subject to channel limits) + sliceMaxAge: 0 + + # Path to script to use if you want to archive a file slice being removed + sliceArchiveScript: "" + + # On startup, number of channels that can be recovered in parallel + parallelRecovery: 1 + + volume: + # If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + internalStorageClass: + enabled: false + definition: {} + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +## NATS Streaming Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + ## Annotations for Promethues metrics + # prometheus.io/scrape: "true" + # prometheus.io/port: "7777" + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/nats-streaming-server#authorization +## +auth: + enabled: false + # user: nats_client + # password: + # token: + # secretName: "{{ .Release.Name }}-nats-secret" + # secretKey: "client-password" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + +## NATS Streaming svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + # TODO: Uncomment these args once synadia/prometheus-nats-exporter adds NATS Streaming support + # - -serverz + # - -storez + # - -clientz + # - -channelz + +## Sidecars +sidecars: + ## e.g. + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/Chart.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/Chart.yaml new file mode 100644 index 0000000..ba57b0c --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +appVersion: 1.3.0 +description: An open-source, cloud-native messaging system +home: https://nats.io/ +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png +keywords: +- nats +- messaging +- addressing +- discovery +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: nats +sources: +- https://github.com/bitnami/bitnami-docker-nats +version: 2.3.1 diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/README.md b/qliksense/charts/engine/charts/messaging/charts/nats/README.md new file mode 100644 index 0000000..368d388 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/README.md @@ -0,0 +1,194 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.users` | Client authentication users | `nil` | +| `auth.users[].user` | Client authentication user | `nil` | +| `auth.users[].password` | Client authentication password | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `OnDelete` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `extraArgs` | Optional flags for NATS | `[]` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `client.service.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `client.service.port` | NATS client port | `4222` | +| `client.service.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `client.service.annotations` | Annotations for NATS client service | {} | +| `client.service.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `cluster.service.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `cluster.service.port` | NATS cluster port | `6222` | +| `cluster.service.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `cluster.service.annotations` | Annotations for NATS cluster service | {} | +| `cluster.service.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `cluster.noAdvertise` | Do not advertise known cluster IPs to clients | `false` | +| `monitoring.service.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | +| `metrics.enabled` | Enable Prometheus metrics via exporter side-car | `false` | +| `metrics.image.registry` | Prometheus metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Prometheus metrics exporter image name | `synadia/prometheus-nats-exporter` | +| `metrics.image.tag` | Prometheus metrics exporter image tag | `0.1.0` | +| `metrics.image.pullPolicy` | Prometheus metrics image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Prometheus metrics image pull secrets | `nil` | +| `metrics.port` | Prometheus metrics exporter port | `7777` | +| `metrics.podAnnotations` | Prometheus metrics exporter annotations | `prometheus.io/scrape: "true"`, `prometheus.io/port: "7777"` | +| `metrics.resources` | Prometheus metrics exporter resource requests/limit | {} | +| `sidecars` | Attach additional containers to the pod | `nil` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Sidecars + +If you have a need for additional containers to run within the same pod as NATS (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` + +## Upgrading + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is nats: + +```console +$ kubectl delete statefulset nats-nats --cascade=false +``` diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/NOTES.txt b/qliksense/charts/engine/charts/messaging/charts/nats/templates/NOTES.txt new file mode 100644 index 0000000..224df8f --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/NOTES.txt @@ -0,0 +1,88 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.client.service.type "LoadBalancer") (contains .Values.client.service.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "client.service.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.client.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoring.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoring.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "Monitoring URL: http://$SERVICE_IP/" + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoring.service.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "nats.fullname" . }}-monitoring {{ .Values.monitoring.service.port }}:{{ .Values.monitoring.service.port }} +{{- end }} + +2. Access NATS monitoring by opening the URL obtained in a browser. +{{- end }} + +{{- if .Values.metrics.enabled }} + +3. Get the NATS Prometheus Metrics URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +4. Access NATS Prometheus metrics by opening the URL obtained in a browser. +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/_helpers.tpl b/qliksense/charts/engine/charts/messaging/charts/nats/templates/_helpers.tpl new file mode 100644 index 0000000..ef93757 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Nats image name +*/}} +{{- define "nats.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random alphanumeric password string. +We prepend a random letter to the string to avoid password validation errors +*/}} +{{- define "nats.randomPassword" -}} +{{- randAlpha 1 -}}{{- randAlphaNum 9 -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/client-svc.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/templates/client-svc.yaml new file mode 100644 index 0000000..aacab44 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.client.service.annotations }} + annotations: +{{ toYaml .Values.client.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.client.service.type }} + {{- if and (eq .Values.client.service.type "LoadBalancer") .Values.client.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.client.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.client.service.port }} + targetPort: client + name: client + {{- if and (eq .Values.client.service.type "NodePort") (not (empty .Values.client.service.nodePort)) }} + nodePort: {{ .Values.client.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/cluster-svc.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000..b48c791 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.cluster.service.annotations }} + annotations: +{{ toYaml .Values.cluster.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.cluster.service.type }} + {{- if and (eq .Values.cluster.service.type "LoadBalancer") .Values.cluster.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.cluster.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.cluster.service.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.cluster.service.type "NodePort") (not (empty .Values.cluster.service.nodePort)) }} + nodePort: {{ .Values.cluster.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/configmap.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/templates/configmap.yaml new file mode 100644 index 0000000..f77e6c6 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/configmap.yaml @@ -0,0 +1,99 @@ +{{- $authPwd := default (include "nats.randomPassword" .) .Values.auth.password -}} +{{- $clusterAuthPwd := default (include "nats.randomPassword" .) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.client.service.port }} + http: 0.0.0.0:{{ .Values.monitoring.service.port }} + + {{- if .Values.clientAdvertise }} + client_advertise: {{ tpl (.Values.clientAdvertise) . }} + {{- end }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + + {{- if .Values.auth.users }} + users: [ + {{- range .Values.auth.users }} + {user: {{ .user | quote }}, password: {{ .password | quote }}} + {{- end }} + ] + {{- end }} + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.cluster.service.port }} + + no_advertise: {{ .Values.cluster.noAdvertise }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + ] + } + users.json: {{ if .Values.auth.enabled }}{{ toJson .Values.auth.jwtUsers | quote }}{{else}}"[]"{{ end }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/headless-svc.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/templates/headless-svc.yaml new file mode 100644 index 0000000..d340551 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/headless-svc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-headless + labels: + app: {{ template "nats.name" . }} + chart: {{ template "nats.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: client + port: 4222 + targetPort: client + - name: cluster + port: 6222 + targetPort: cluster + selector: + app: {{ template "nats.name" . }} + release: {{ .Release.Name | quote }} \ No newline at end of file diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/ingress.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/templates/ingress.yaml new file mode 100644 index 0000000..eec9749 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/monitoring-svc.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000..f8f4081 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/networkpolicy.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000..12032f8 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.client.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.cluster.service.port }} + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/statefulset.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/templates/statefulset.yaml new file mode 100644 index 0000000..e4ce77a --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/statefulset.yaml @@ -0,0 +1,161 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + serviceName: {{ template "nats.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} +{{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: + checksum/secrets: {{ toYaml .Values.auth.users | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - qnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + {{- range .Values.extraArgs }} + - {{ tpl (.) $ }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.client.service.port }} + - name: cluster + containerPort: {{ .Values.cluster.service.port }} + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/templates/tls-secret.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/templates/tls-secret.yaml new file mode 100644 index 0000000..5acf441 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/charts/nats/values.yaml b/qliksense/charts/engine/charts/messaging/charts/nats/values.yaml new file mode 100644 index 0000000..9200bff --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/charts/nats/values.yaml @@ -0,0 +1,306 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.3.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: false + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Client URL to advertise to other servers +## +# clientAdvertise: + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +client: + service: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +cluster: + service: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: + + ## Do not advertise known cluster IPs to clients + ## + noAdvertise: false + +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: + +# Optional additional arguments +extraArgs: [] + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +sidecars: +## Add sidecars to the pod. +## e.g. +# - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/engine/charts/messaging/nats-streaming/Chart.yaml b/qliksense/charts/engine/charts/messaging/nats-streaming/Chart.yaml new file mode 100644 index 0000000..2a44467 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats-streaming/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +description: A NATS Streaming cluster setup +name: nats-streaming +version: 0.3.0 +appVersion: 0.6.0 +keywords: +- NATS +- Messaging +- publish +- subscribe +- streaming +- cluster +- persistence +home: https://nats.io/ diff --git a/qliksense/charts/engine/charts/messaging/nats-streaming/README.md b/qliksense/charts/engine/charts/messaging/nats-streaming/README.md new file mode 100644 index 0000000..c88074b --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats-streaming/README.md @@ -0,0 +1,133 @@ +# NATS Streaming Clustering Helm Chart + +Sets up a [NATS](http://nats.io/) Streaming server cluster with Raft based replication. + +## Getting started + +This chart relies on an already available NATS Service to which the +NATS Streaming nodes that will form a clusters can connect to. +You can install the NATS Operator and then use it to create a NATS cluster +via the following: + +```console +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml +$ kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/example-nats-cluster.yaml +``` + +This will create a NATS cluster on the `nats-io` namespace. Then, to +install a NATS Streaming cluster the URL to the NATS cluster can be +specified as follows (using `my-release` for a name label for the +cluster): + +```console +$ git clone https://github.com/wallyqs/nats-streaming-cluster-chart nats-streaming-cluster +$ helm install nats-streaming-cluster -n my-release --set natsUrl=nats://nats.nats-io.svc.cluster.local:4222 +``` + +This will create 3 follower nodes plus an extra Pod which is +configured to be in bootstrapping mode, which will start as the leader +of the Raft group as soon as it joins. + +```console +$ kubectl get pods --namespace default -l "app=nats-streaming-cluster,release=my-release" +NAME READY STATUS RESTARTS AGE +my-release-nats-streaming-cluster-0 1/1 Running 0 30s +my-release-nats-streaming-cluster-1 1/1 Running 0 23s +my-release-nats-streaming-cluster-2 1/1 Running 0 17s +my-release-nats-streaming-cluster-bootstrap 1/1 Running 0 30s +``` + +Note that in case the bootstrapping Pod fails then it will not be +recreated and instead one of the extra follower Pods will take over +the leadership. The follower Pods are part of a Deployment so those +in case of failure they will be recreated. + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | -------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS Streaming image registry | `docker.io` | +| `image.repository` | NATS Streaming Image name | `nats-streaming` | +| `image.tag` | NATS Streaming Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.secretName` | Client authentication secret name | `nil` | +| `auth.secretKey` | Client authentication secret key | `nil` | +| `clusterID` | NATS Streaming Cluster Name ID | `"test-cluster"` | +| `natsSvc` | External NATS Server URL | `"nats://username:password@nats:4222"` | +| `maxChannels` | Max # of channels | `100` | +| `maxSubs` | Max # of subscriptions per channel | `1000` | +| `maxMsgs` | Max # of messages per channel | `"1000000"` | +| `maxBytes` | Max messages total size per channel | `900mb` | +| `maxAge` | Max duration a message can be stored | `"0s" (unlimited)` | +| `maxMsgs` | Max # of messages per channel | `1000000` | +| `debug` | Enable debugging | `false` | +| `trace` | Enable detailed tracing | `false` | +| `replicaCount` | Number of NATS Streaming nodes | `3` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `RollingUpdate` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `monitoring.service.type` | Kubernetes Service type (NATS Streaming monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS Streaming monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS Streaming monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS Streaming monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS Streaming monitoring service type is `LoadBalancer` | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `sidecars` | Attach additional containers to the pod. | `nil` | + +### File Specific Persistence Configuration + +| Parameter | Description | Default | +| --------------------------------- | ---------------------------------- | ---------------- | +| `persistence.file.compactEnabled` | Enable compaction | true | +| `persistence.file.bufferSize` | File buffer size (in bytes) | "2097152" (2MB) | +| `persistence.file.crc` | Enable file CRC-32 checksum | true | +| `persistence.file.sync` | Enable File.Sync on Flush | true | +| `persistence.file.fdsLimit` | Max File Descriptor limit (approx) | 0 (unlimited) | + +### Storage Specific Persistence Configuration + +| Parameter | Description | Default | +| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `persistence.volume.enabled` | Use specified storage class for volume claim (emptyDir is used if disabled) | `false` | +| `persistence.volume.storageClass` | Storage class of backing persistent volume claim | `nil` | +| `persistence.volume.size` | Persistence volume size | `nil` | +| `persistence.volume.internalStorageClass.enabled` | Use an internal StorageClass | `false` | +| `persistence.volume.internalStorageClass.definition` | Definition of the internal StorageClass. Configuration includes provider and parameters. Only needed if the internal StorageClass is enabled | `{}` | + +*Additional configuration parameters not typically used may be found in [values.yaml](values.yaml).* diff --git a/qliksense/charts/engine/charts/messaging/nats-streaming/templates/NOTES.txt b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/NOTES.txt new file mode 100644 index 0000000..cc0eaa0 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/NOTES.txt @@ -0,0 +1,25 @@ +NATS Streaming server has been installed. + +Externally monitor the NATS streaming server by running these commands. + +{{- if contains "NodePort" .Values.monitoring.service.type }} + +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats-streaming-cluster.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + +echo http://$NODE_IP:$NODE_PORT/streaming + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "nats-streaming-cluster.fullname" . }}' +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats-streaming-cluster.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +echo http://$SERVICE_IP:{{ .Values.monitoring.service.port }}/streaming + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + +export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "nats-streaming-cluster.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 8222:{{ .Values.monitoring.service.port }} +echo "Visit http://127.0.0.1:8222/streaming to monitor the NATS streaming server" + +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/nats-streaming/templates/_helpers.tpl b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/_helpers.tpl new file mode 100644 index 0000000..81001e5 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nats-streaming-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats-streaming-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming-cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper NATS Streaming image name +*/}} +{{- define "nats-streaming-cluster.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a NATS Streaming cluster peers list string based on fullname, namespace, # of servers +in a format like "host-1,host-2,host-3" +*/}} +{{- define "nats-streaming-cluster.peers" -}} +{{- $name := include "nats-streaming-cluster.fullname" . -}} +{{- $nats_streaming_cluster := dict "peers" (list) -}} +{{- range $idx, $v := until (int .Values.replicaCount) }} +{{- $noop := printf "%s-%d" $name $idx | append $nats_streaming_cluster.peers | set $nats_streaming_cluster "peers" -}} +{{- end }} +{{- printf "%s" (join "," $nats_streaming_cluster.peers) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/engine/charts/messaging/nats-streaming/templates/monitoring-svc.yaml b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/monitoring-svc.yaml new file mode 100644 index 0000000..2950f19 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }}-monitoring + labels: + app: {{ template "nats-streaming-cluster.name" . }} + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - name: monitoring + port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/engine/charts/messaging/nats-streaming/templates/networkpolicy.yaml b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/networkpolicy.yaml new file mode 100644 index 0000000..b642078 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/networkpolicy.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/nats-streaming/templates/sc.yaml b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/sc.yaml new file mode 100644 index 0000000..75b0519 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/sc.yaml @@ -0,0 +1,7 @@ +{{- if .Values.persistence.volume.internalStorageClass.enabled -}} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.persistence.volume.storageClass }} +{{ toYaml .Values.persistence.volume.internalStorageClass.definition }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/nats-streaming/templates/statefulset.yaml b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/statefulset.yaml new file mode 100644 index 0000000..03ddc08 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats-streaming/templates/statefulset.yaml @@ -0,0 +1,254 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "nats-streaming-cluster.fullname" . }} + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "nats-streaming-cluster.name" . }} + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats-streaming-cluster.name" . }}" + chart: "{{ template "nats-streaming-cluster.chart" . }}" + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats-streaming-cluster.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats-streaming-cluster.name" . }} + image: {{ template "nats-streaming-cluster.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.auth.enabled }} + - name: USER + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientUser }} + {{- else }} + value: {{ .Values.auth.user }} + {{- end }} + {{- if .Values.auth.token }} + - name: AUTH + value: {{ .Values.auth.token }} + {{- else }} + - name: PASSWORD + {{- if .Values.auth.secretName }} + valueFrom: + secretKeyRef: + name: {{ tpl (.Values.auth.secretName) . }} + key: {{ .Values.auth.secretClientPassword }} + {{- else }} + value: {{ .Values.auth.password }} + {{- end }} + {{- end }} + {{- end }} + args: [ + "-clustered", + "-cid", "{{ tpl (.Values.clusterID) . }}", + "-m", "{{ .Values.monitoring.service.port }}", + "-ns", "{{ tpl (.Values.natsSvc) . }}", + "-mc", "{{ .Values.maxChannels }}", + "-msu", "{{ .Values.maxSubs }}", + "-mm", "{{ .Values.maxMsgs }}", + "-mb", "{{ .Values.maxBytes }}", + "-ma", "{{ .Values.maxAge }}", + "-hbi", "{{ .Values.hbInterval }}", + "-hbt", "{{ .Values.hbTimeout }}", + "-hbf", "{{ .Values.hbFailCount }}", + + "--cluster_node_id", "$(POD_NAME)", + "--cluster_peers", "{{ template "nats-streaming-cluster.peers" . }}", + "--store", "file", + "--dir", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/data", + "--cluster_log_path", "/nats/{{ tpl (.Values.clusterID) . }}/$(POD_NAME)/raft", + {{- if .Values.cluster_raft_logging }} + "--cluster_raft_logging", + {{- end }} + {{- if .Values.persistence.file.compactEnabled }} + "--file_compact_enabled", + "--file_compact_frag", "{{ .Values.persistence.file.compactFrag }}", + "--file_compact_interval", "{{ .Values.persistence.file.compactInterval }}", + "--file_compact_min_size", "{{ .Values.persistence.file.compactMinSize }}", + {{- end }} + "--file_buffer_size", "{{ .Values.persistence.file.bufferSize }}", + {{- if .Values.persistence.file.crc }} + "--file_crc", + "--file_crc_poly", "{{ .Values.persistence.file.crcPoly }}", + {{- end }} + {{- if .Values.persistence.file.sync }} + "--file_sync", + {{- end }} + "--file_slice_max_msgs", "{{ .Values.persistence.file.sliceMaxMsgs }}", + "--file_slice_max_bytes", "{{ .Values.persistence.file.sliceMaxBytes }}", + "--file_slice_max_age", "{{ .Values.persistence.file.sliceMaxAge }}", + {{- if ne .Values.persistence.file.sliceArchiveScript "" }} + "--file_slice_archive_script", "{{ .Values.persistence.file.sliceArchiveScript }}", + {{- end }} + "--file_fds_limit", "{{ .Values.persistence.file.fdsLimit }}", + "--file_parallel_recovery", "{{ .Values.persistence.file.parallelRecovery }}", + + {{- if .Values.auth.enabled }} + "--user", "$(USER)", + {{- if .Values.auth.token }} + "--auth", "$(AUTH)", + {{- else }} + "--pass", "$(PASSWORD)", + {{- end }} + {{- end }} + + {{- if .Values.debug }} + "-SD", + {{- end }} + {{- if .Values.trace }} + "-SV", + {{- end }} + ] + ports: + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + volumeMounts: + - name: datadir + mountPath: /nats + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + {{ if not .Values.persistence.volume.enabled }} + - name: datadir + emptyDir: {} + {{- end }} + {{- if .Values.persistence.volume.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: "{{ .Values.persistence.volume.size }}" + {{- if .Values.persistence.volume.storageClass }} + {{- if (eq "-" .Values.persistence.volume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.volume.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/qliksense/charts/engine/charts/messaging/nats-streaming/values.yaml b/qliksense/charts/engine/charts/messaging/nats-streaming/values.yaml new file mode 100644 index 0000000..f7920f3 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats-streaming/values.yaml @@ -0,0 +1,290 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## NATS Streaming image +## +image: + registry: docker.io + repository: nats-streaming + tag: 0.11.2 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS Streaming replicas +replicaCount: 3 + +# +# Streaming server configuration +# + +# Cluster ID (default: test-cluster) +clusterID: "test-cluster" + +# Connect to this external NATS Server URL +natsSvc: "nats://nats:4222" + +# Max number of channels (0 for unlimited) +maxChannels: 100 + +# Max number of subscriptions per channel (0 for unlimited) +maxSubs: 1000 + +# Max number of messages per channel (0 for unlimited) +maxMsgs: "1000000" + +# Max messages total size per channel (0 for unlimited) +maxBytes: "900mb" + +# Max duration a message can be stored ("0s" for unlimited) +maxAge: 0 + +# +# ADVANCED CONFIGURATION: Change these with caution. +# + +# Interval at which server sends heartbeat to a client +hbInterval: 30s + +# How long server waits for a heartbeat response +hbTimeout: 10s + +# Number of failed heartbeats before server closes the client connection +hbFailCount: 330 + +# Use for general debugging. Enabling this will negatively affect performance. +debug: false + +# Do not normally set this as it will drastically decrease performance and generate +# volumous logs. +trace: false + +# Use for raft related debugging +cluster_raft_logging: false + +persistence: + file: + + # Enable compaction + compactEnabled: true + + # Enable file CRC-32 checksum + crc: true + + # Enable File.Sync on Flush + sync: true + + # Store will try to use no more file descriptors than this given limit + fdsLimit: 0 + + ## + ## ADVANCED CONFIGURATION: Change these with caution. + ## + # File buffer size (in bytes) + bufferSize: "2097152" + + # File fragmentation % threshold for compaction + compactFrag: 50 + + # Minimum interval (in seconds) between file compactions, 5min + compactInterval: 300 + + # Minimum file size for compaction + compactMinSize: "1048576" + + # Polynomial used to make the table used for CRC-32 checksum (default is crc32.IEEE) + crcPoly : "3988292384" + + # Maximum number of messages per file slice (subject to channel limits) + sliceMaxMsgs: 0 + + # Maximum file slice size - including index file (subject to channel limits) 64MB + sliceMaxBytes: "67108931" + + # Maximum file slice duration starting when the first message is stored (subject to channel limits) + sliceMaxAge: 0 + + # Path to script to use if you want to archive a file slice being removed + sliceArchiveScript: "" + + # On startup, number of channels that can be recovered in parallel + parallelRecovery: 1 + + volume: + # If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + internalStorageClass: + enabled: false + definition: {} + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: false + fsGroup: 1001 + runAsUser: 1001 + +## NATS Streaming Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + ## Annotations for Promethues metrics + # prometheus.io/scrape: "true" + # prometheus.io/port: "7777" + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/nats-streaming-server#authorization +## +auth: + enabled: false + # user: nats_client + # password: + # token: + # secretName: "{{ .Release.Name }}-nats-secret" + # secretKey: "client-password" + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + +## NATS Streaming svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + # TODO: Uncomment these args once synadia/prometheus-nats-exporter adds NATS Streaming support + # - -serverz + # - -storez + # - -clientz + # - -channelz + +## Sidecars +sidecars: + ## e.g. + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/engine/charts/messaging/nats/Chart.yaml b/qliksense/charts/engine/charts/messaging/nats/Chart.yaml new file mode 100644 index 0000000..63250ef --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/Chart.yaml @@ -0,0 +1,17 @@ +name: nats +version: 2.3.1 +appVersion: 1.3.0 +description: An open-source, cloud-native messaging system +keywords: +- nats +- messaging +- addressing +- discovery +home: https://nats.io/ +sources: +- https://github.com/bitnami/bitnami-docker-nats +maintainers: +- name: Bitnami + email: containers@bitnami.com +engine: gotpl +icon: https://bitnami.com/assets/stacks/nats/img/nats-stack-110x117.png diff --git a/qliksense/charts/engine/charts/messaging/nats/README.md b/qliksense/charts/engine/charts/messaging/nats/README.md new file mode 100644 index 0000000..368d388 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/README.md @@ -0,0 +1,194 @@ +# NATS + +[NATS](https://nats.io/) is an open-source, cloud-native messaging system. It provides a lightweight server that is written in the Go programming language. + +## TL;DR + +```bash +$ helm install stable/nats +``` + +## Introduction + +This chart bootstraps a [NATS](https://github.com/bitnami/bitnami-docker-nats) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/nats +``` + +The command deploys NATS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the NATS chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `image.registry` | NATS image registry | `docker.io` | +| `image.repository` | NATS Image name | `bitnami/nats` | +| `image.tag` | NATS Image tag | `{VERSION}` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `auth.enabled` | Switch to enable/disable client authentication | `true` | +| `auth.user` | Client authentication user | `nats_cluster` | +| `auth.password` | Client authentication password | `random alhpanumeric string (10)` | +| `auth.token` | Client authentication token | `nil` | +| `auth.users` | Client authentication users | `nil` | +| `auth.users[].user` | Client authentication user | `nil` | +| `auth.users[].password` | Client authentication password | `nil` | +| `clusterAuth.enabled` | Switch to enable/disable cluster authentication | `true` | +| `clusterAuth.user` | Cluster authentication user | `nats_cluster` | +| `clusterAuth.password` | Cluster authentication password | `random alhpanumeric string (10)` | +| `clusterAuth.token` | Cluster authentication token | `nil` | +| `debug.enabled` | Switch to enable/disable debug on logging | `false` | +| `debug.trace` | Switch to enable/disable trace debug level on logging | `false` | +| `debug.logtime` | Switch to enable/disable logtime on logging | `false` | +| `maxConnections` | Max. number of client connections | `nil` | +| `maxControlLine` | Max. protocol control line | `nil` | +| `maxPayload` | Max. payload | `nil` | +| `writeDeadline` | Duration the server can block on a socket write to a client | `nil` | +| `replicaCount` | Number of NATS nodes | `1` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `statefulset.updateStrategy` | Statefulsets Update strategy | `OnDelete` | +| `statefulset.rollingUpdatePartition` | Partition for Rolling Update strategy | `nil` | +| `podLabels` | Additional labels to be added to pods | {} | +| `podAnnotations` | Annotations to be added to pods | {} | +| `nodeSelector` | Node labels for pod assignment | `nil` | +| `schedulerName` | Name of an alternate | `nil` | +| `antiAffinity` | Anti-affinity for pod assignment | `soft` | +| `tolerations` | Toleration labels for pod assignment | `nil` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `extraArgs` | Optional flags for NATS | `[]` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `client.service.type` | Kubernetes Service type (NATS client) | `ClusterIP` | +| `client.service.port` | NATS client port | `4222` | +| `client.service.nodePort` | Port to bind to for NodePort service type (NATS client) | `nil` | +| `client.service.annotations` | Annotations for NATS client service | {} | +| `client.service.loadBalancerIP` | loadBalancerIP if NATS client service type is `LoadBalancer` | `nil` | +| `cluster.service.type` | Kubernetes Service type (NATS cluster) | `ClusterIP` | +| `cluster.service.port` | NATS cluster port | `6222` | +| `cluster.service.nodePort` | Port to bind to for NodePort service type (NATS cluster) | `nil` | +| `cluster.service.annotations` | Annotations for NATS cluster service | {} | +| `cluster.service.loadBalancerIP` | loadBalancerIP if NATS cluster service type is `LoadBalancer` | `nil` | +| `cluster.noAdvertise` | Do not advertise known cluster IPs to clients | `false` | +| `monitoring.service.type` | Kubernetes Service type (NATS monitoring) | `ClusterIP` | +| `monitoring.service.port` | NATS monitoring port | `8222` | +| `monitoring.service.nodePort` | Port to bind to for NodePort service type (NATS monitoring) | `nil` | +| `monitoring.service.annotations` | Annotations for NATS monitoring service | {} | +| `monitoring.service.loadBalancerIP` | loadBalancerIP if NATS monitoring service type is `LoadBalancer` | `nil` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hosts[0].name` | Hostname for NATS monitoring | `nats.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `nats.local-tls-secret` | +| `ingress.hosts[0].annotations` | Annotations for this host's ingress record | `[]` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Allow external connections | `true` | +| `metrics.enabled` | Enable Prometheus metrics via exporter side-car | `false` | +| `metrics.image.registry` | Prometheus metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Prometheus metrics exporter image name | `synadia/prometheus-nats-exporter` | +| `metrics.image.tag` | Prometheus metrics exporter image tag | `0.1.0` | +| `metrics.image.pullPolicy` | Prometheus metrics image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Prometheus metrics image pull secrets | `nil` | +| `metrics.port` | Prometheus metrics exporter port | `7777` | +| `metrics.podAnnotations` | Prometheus metrics exporter annotations | `prometheus.io/scrape: "true"`, `prometheus.io/port: "7777"` | +| `metrics.resources` | Prometheus metrics exporter resource requests/limit | {} | +| `sidecars` | Attach additional containers to the pod | `nil` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + + +```bash +$ helm install --name my-release \ + --set auth.enabled=true,auth.user=my-user,auth.password=T0pS3cr3t \ + stable/nats +``` + +The above command enables NATS client authentication with `my-user` as user and `T0pS3cr3t` as password credentials. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/nats +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Sidecars + +If you have a need for additional containers to run within the same pod as NATS (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Production settings and horizontal scaling + +The [values-production.yaml](values-production.yaml) file consists a configuration to deploy a scalable and high-available NATS deployment for production environments. We recommend that you base your production configuration on this template and adjust the parameters appropriately. + +```console +$ curl -O https://raw.githubusercontent.com/kubernetes/charts/master/stable/nats/values-production.yaml +$ helm install --name my-release -f ./values-production.yaml stable/nats +``` + +To horizontally scale this chart, run the following command to scale the number of nodes in your NATS replica set. + +```console +$ kubectl scale statefulset my-release-nats --replicas=3 +``` + +## Upgrading + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is nats: + +```console +$ kubectl delete statefulset nats-nats --cascade=false +``` diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/NOTES.txt b/qliksense/charts/engine/charts/messaging/nats/templates/NOTES.txt new file mode 100644 index 0000000..224df8f --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/NOTES.txt @@ -0,0 +1,88 @@ +** Please be patient while the chart is being deployed ** + +{{- if or (contains .Values.client.service.type "LoadBalancer") (contains .Values.client.service.type "nodePort") }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "client.service.type=NodePort/LoadBalancer" and "auth.enabled=false" + you have most likely exposed the NATS service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP". As + alternative, you can also switch to "auth.enabled=true" providing a valid + password on "auth.password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +NATS can be accessed via port {{ .Values.client.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-client.{{ .Release.Namespace }}.svc.cluster.local + +{{- if .Values.auth.enabled }} +To get the authentication credentials, run: + + export NATS_USER=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 user | awk '{print $2}') + export NATS_PASS=$(kubectl get cm --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }} -o jsonpath='{.data.*}' | grep -m 1 password | awk '{print $2}') + echo -e "Client credentials:\n\tUser: $NATS_USER\n\tPassword: $NATS_PASS" + +{{- end }} + +NATS monitoring service can be accessed via port {{ .Values.monitoring.service.port }} on the following DNS name from within your cluster: + + {{ template "nats.fullname" . }}-monitoring.{{ .Release.Namespace }}.svc.cluster.local + +To access the Monitoring svc from outside the cluster, follow the steps below: + +{{- if .Values.ingress.enabled }} + +1. Get the hostname indicated on the Ingress Rule and associate it to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring -o jsonpath='{.spec.rules[0].host}') + echo "Monitoring URL: http://$HOSTNAME/" + echo "$CLUSTER_IP $HOSTNAME" | sudo tee -a /etc/hosts + +2. Open a browser and access the NATS monitoring browsing to the Monitoring URL + +{{- else }} + +1. Get the NATS monitoring URL by running: + +{{- if contains "NodePort" .Values.monitoring.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nats.fullname" . }}-monitoring) + echo "Monitoring URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.monitoring.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "nats.fullname" . }}-monitoring' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-monitoring --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "Monitoring URL: http://$SERVICE_IP/" + +{{- else if contains "ClusterIP" .Values.monitoring.service.type }} + + echo "Monitoring URL: http://127.0.0.1:{{ .Values.monitoring.service.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "nats.fullname" . }}-monitoring {{ .Values.monitoring.service.port }}:{{ .Values.monitoring.service.port }} +{{- end }} + +2. Access NATS monitoring by opening the URL obtained in a browser. +{{- end }} + +{{- if .Values.metrics.enabled }} + +3. Get the NATS Prometheus Metrics URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "nats.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +4. Access NATS Prometheus metrics by opening the URL obtained in a browser. +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/_helpers.tpl b/qliksense/charts/engine/charts/messaging/nats/templates/_helpers.tpl new file mode 100644 index 0000000..ef93757 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nats.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nats.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Nats image name +*/}} +{{- define "nats.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random alphanumeric password string. +We prepend a random letter to the string to avoid password validation errors +*/}} +{{- define "nats.randomPassword" -}} +{{- randAlpha 1 -}}{{- randAlphaNum 9 -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/client-svc.yaml b/qliksense/charts/engine/charts/messaging/nats/templates/client-svc.yaml new file mode 100644 index 0000000..aacab44 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/client-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-client + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.client.service.annotations }} + annotations: +{{ toYaml .Values.client.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.client.service.type }} + {{- if and (eq .Values.client.service.type "LoadBalancer") .Values.client.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.client.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.client.service.port }} + targetPort: client + name: client + {{- if and (eq .Values.client.service.type "NodePort") (not (empty .Values.client.service.nodePort)) }} + nodePort: {{ .Values.client.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/cluster-svc.yaml b/qliksense/charts/engine/charts/messaging/nats/templates/cluster-svc.yaml new file mode 100644 index 0000000..b48c791 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/cluster-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-cluster + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.cluster.service.annotations }} + annotations: +{{ toYaml .Values.cluster.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.cluster.service.type }} + {{- if and (eq .Values.cluster.service.type "LoadBalancer") .Values.cluster.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.cluster.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.cluster.service.port }} + targetPort: cluster + name: cluster + {{- if and (eq .Values.cluster.service.type "NodePort") (not (empty .Values.cluster.service.nodePort)) }} + nodePort: {{ .Values.cluster.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/configmap.yaml b/qliksense/charts/engine/charts/messaging/nats/templates/configmap.yaml new file mode 100644 index 0000000..f77e6c6 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/configmap.yaml @@ -0,0 +1,99 @@ +{{- $authPwd := default (include "nats.randomPassword" .) .Values.auth.password -}} +{{- $clusterAuthPwd := default (include "nats.randomPassword" .) .Values.clusterAuth.password -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + name: {{ template "nats.fullname" . }} +data: + gnatsd.conf: |- + listen: 0.0.0.0:{{ .Values.client.service.port }} + http: 0.0.0.0:{{ .Values.monitoring.service.port }} + + {{- if .Values.clientAdvertise }} + client_advertise: {{ tpl (.Values.clientAdvertise) . }} + {{- end }} + + # Authorization for client connections + {{- if .Values.auth.enabled }} + authorization { + {{- if .Values.auth.user }} + user: {{ .Values.auth.user }} + password: {{ $authPwd }} + {{- else if .Values.auth.token }} + token: {{ .Values.auth.token }} + {{- end }} + timeout: 1 + + {{- if .Values.auth.users }} + users: [ + {{- range .Values.auth.users }} + {user: {{ .user | quote }}, password: {{ .password | quote }}} + {{- end }} + ] + {{- end }} + } + {{- end }} + + # Logging options + debug: {{ .Values.debug.enabled }} + trace: {{ .Values.debug.trace }} + logtime: {{ .Values.debug.logtime }} + + # Pid file + pid_file: "/tmp/gnatsd.pid" + + # Some system overides + {{- if .Values.maxConnections }} + max_connections: {{ .Values.maxConnections }} + {{- end }} + {{- if .Values.maxControlLine }} + max_control_line: {{ .Values.maxControlLine }} + {{- end }} + {{- if .Values.maxPayload }} + max_payload: {{ .Values.maxPayload }} + {{- end }} + {{- if .Values.writeDeadline }} + write_deadline: {{ .Values.writeDeadline | quote }} + {{- end }} + + + # Clustering definition + cluster { + listen: 0.0.0.0:{{ .Values.cluster.service.port }} + + no_advertise: {{ .Values.cluster.noAdvertise }} + + # Authorization for cluster connections + {{- if .Values.clusterAuth.enabled }} + authorization { + {{- if .Values.clusterAuth.user }} + user: {{ .Values.clusterAuth.user }} + password: {{ $clusterAuthPwd }} + {{- else if .Values.clusterAuth.token }} + token: {{ .Values.clusterAuth.token }} + {{- end }} + timeout: 1 + } + {{- end }} + + # Routes are actively solicited and connected to from this server. + # Other servers can connect to us if they supply the correct credentials + # in their routes definitions from above + routes = [ + {{- if .Values.clusterAuth.enabled }} + {{- if .Values.clusterAuth.user }} + nats://{{ .Values.clusterAuth.user }}:{{ $clusterAuthPwd }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- else if .Values.clusterAuth.token }} + nats://{{ .Values.clusterAuth.token }}@{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + {{- else }} + nats://{{ template "nats.fullname" . }}-cluster:{{ .Values.cluster.service.port }} + {{- end }} + ] + } + users.json: {{ if .Values.auth.enabled }}{{ toJson .Values.auth.jwtUsers | quote }}{{else}}"[]"{{ end }} diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/headless-svc.yaml b/qliksense/charts/engine/charts/messaging/nats/templates/headless-svc.yaml new file mode 100644 index 0000000..d340551 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/headless-svc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-headless + labels: + app: {{ template "nats.name" . }} + chart: {{ template "nats.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: client + port: 4222 + targetPort: client + - name: cluster + port: 6222 + targetPort: cluster + selector: + app: {{ template "nats.name" . }} + release: {{ .Release.Name | quote }} \ No newline at end of file diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/ingress.yaml b/qliksense/charts/engine/charts/messaging/nats/templates/ingress.yaml new file mode 100644 index 0000000..eec9749 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled -}} +{{- range .Values.ingress.hosts }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "nats.fullname" $ }}-monitoring + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + annotations: + {{- if .tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ template "nats.fullname" $ }}-monitoring + servicePort: monitoring +{{- if .tls }} + tls: + - hosts: + - {{ .name }} + secretName: {{ .tlsSecret }} +{{- end }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/monitoring-svc.yaml b/qliksense/charts/engine/charts/messaging/nats/templates/monitoring-svc.yaml new file mode 100644 index 0000000..f8f4081 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/monitoring-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nats.fullname" . }}-monitoring + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.monitoring.service.annotations }} + annotations: +{{ toYaml .Values.monitoring.service.annotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.monitoring.service.type }} + {{- if and (eq .Values.monitoring.service.type "LoadBalancer") .Values.monitoring.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.monitoring.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.monitoring.service.port }} + targetPort: monitoring + name: monitoring + {{- if and (eq .Values.monitoring.service.type "NodePort") (not (empty .Values.monitoring.service.nodePort)) }} + nodePort: {{ .Values.monitoring.service.nodePort }} + {{- end }} + selector: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/networkpolicy.yaml b/qliksense/charts/engine/charts/messaging/nats/templates/networkpolicy.yaml new file mode 100644 index 0000000..12032f8 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/networkpolicy.yaml @@ -0,0 +1,30 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.client.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + {{- end }} + - ports: + - port: {{ .Values.cluster.service.port }} + - ports: + - port: {{ .Values.monitoring.service.port }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/statefulset.yaml b/qliksense/charts/engine/charts/messaging/nats/templates/statefulset.yaml new file mode 100644 index 0000000..e4ce77a --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/statefulset.yaml @@ -0,0 +1,161 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: {{ template "nats.fullname" . }} + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + serviceName: {{ template "nats.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if .Values.statefulset.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.statefulset.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: "{{ template "nats.name" . }}" + chart: "{{ template "nats.chart" . }}" + release: {{ .Release.Name | quote }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} +{{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: + checksum/secrets: {{ toYaml .Values.auth.users | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- else if eq .Values.antiAffinity "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + {{- end }} + containers: + - name: {{ template "nats.name" . }} + image: {{ template "nats.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - qnatsd + args: + - -c + - /opt/bitnami/nats/gnatsd.conf + {{- range .Values.extraArgs }} + - {{ tpl (.) $ }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.client.service.port }} + - name: cluster + containerPort: {{ .Values.cluster.service.port }} + - name: monitoring + containerPort: {{ .Values.monitoring.service.port }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: monitoring + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: config + mountPath: /opt/bitnami/nats + {{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 6 }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + args: +{{ toYaml .Values.metrics.args | indent 10 -}} + - "http://localhost:{{ .Values.monitoring.service.port }}" + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "nats.fullname" . }} diff --git a/qliksense/charts/engine/charts/messaging/nats/templates/tls-secret.yaml b/qliksense/charts/engine/charts/messaging/nats/templates/tls-secret.yaml new file mode 100644 index 0000000..5acf441 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/templates/tls-secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "nats.name" $ }}" + chart: "{{ template "nats.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/nats/values.yaml b/qliksense/charts/engine/charts/messaging/nats/values.yaml new file mode 100644 index 0000000..9200bff --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/nats/values.yaml @@ -0,0 +1,306 @@ +## Global Docker image registry +## Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value +## +# global: +# imageRegistry: + +## Bitnami NATS image version +## ref: https://hub.docker.com/r/bitnami/nats/tags/ +## +image: + registry: docker.io + repository: bitnami/nats + tag: 1.3.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - name: myRegistrKeySecretName + +## NATS replicas +replicaCount: 1 + +## NATS Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## NATS Node selector and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +## +# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} +# tolerations: [] + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pods anti-affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +antiAffinity: soft + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Update strategy, can be set to RollingUpdate or OnDelete by default. +## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +statefulset: + updateStrategy: OnDelete + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Client Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +auth: + enabled: false + +## Cluster Authentication +## ref: https://github.com/nats-io/gnatsd#authentication +## +clusterAuth: + enabled: true + user: nats_cluster + # password: + # token: + +## Logging parameters +## ref: https://github.com/nats-io/gnatsd#command-line-arguments +## +debug: + enabled: false + trace: false + logtime: false + +## System overrides parameters +## ref: https://github.com/nats-io/gnatsd#configuration-file +## +# maxConnections: 100 +# maxControlLine: 512 +# maxPayload: 65536 +# writeDeadline: "2s" + +## Client URL to advertise to other servers +## +# clientAdvertise: + +## Network pullPolicy +## https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Enable creation of NetworkPolicy resources. + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client labels will have network access to the port NATS is listening + ## on. When true, NATS will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## NATS svc used for client connections +## ref: https://github.com/nats-io/gnatsd#running +## +client: + service: + ## Kubernetes service type + type: ClusterIP + port: 4222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: +## Kubernetes svc used for clustering +## ref: https://github.com/nats-io/gnatsd#clustering +## +cluster: + service: + ## Kubernetes service type + type: ClusterIP + port: 6222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + # loadBalancerIP: + + ## Do not advertise known cluster IPs to clients + ## + noAdvertise: false + +## NATS svc used for monitoring +## ref: https://github.com/nats-io/gnatsd#monitoring +## +monitoring: + service: + ## Kubernetes service type + type: ClusterIP + port: 8222 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Use loadBalancerIP to request a specific static IP, + ## otherwise leave blank + ## + loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## NATS Monitoring. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + enabled: false + # The list of hostnames to be covered with this ingress record. + # Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: nats.local + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: nats.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: nats.local-tls + # key: + # certificate: + +# Optional additional arguments +extraArgs: [] + +## Metrics / Prometheus NATS Exporter +## +## ref: https://github.com/nats-io/prometheus-nats-exporter +metrics: + enabled: false + image: + registry: docker.io + repository: synadia/prometheus-nats-exporter + tag: 0.1.0 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +sidecars: +## Add sidecars to the pod. +## e.g. +# - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # ports: + # - name: portname + # containerPort: 1234 diff --git a/qliksense/charts/engine/charts/messaging/requirements.yaml b/qliksense/charts/engine/charts/messaging/requirements.yaml new file mode 100644 index 0000000..61c3461 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/requirements.yaml @@ -0,0 +1,13 @@ +dependencies: + - name: nats + version: 2.3.1 + repository: "file://./nats" + # messaging.nats.enabled is used by services that depend on the messaging chart to enable or disable nats + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.nats.enabled,nats.enabled + - name: nats-streaming + version: 0.3.0 + repository: "file://./nats-streaming" + # messaging.nats-streaming.enabled is used by services that depend on the messaging chart to enable or disable nats streaming + # The first valid condition will be used https://docs.helm.sh/developing_charts/#tags-and-condition-fields-in-requirements-yaml + condition: messaging.nats-streaming.enabled,nats-streaming.enabled diff --git a/qliksense/charts/engine/charts/messaging/templates/_helper.tpl b/qliksense/charts/engine/charts/messaging/templates/_helper.tpl new file mode 100644 index 0000000..d03e4d7 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/templates/_helper.tpl @@ -0,0 +1,38 @@ +{{- define "messaging.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "messaging.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "messaging.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats.name" -}} +{{- "nats" -}} +{{- end -}} + +{{- define "nats.fullname" -}} +{{- $name := "nats" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nats-streaming.name" -}} +{{- "nats-streaming" -}} +{{- end -}} + +{{- define "nats-streaming.fullname" -}} +{{- $name := "nats-streaming" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/qliksense/charts/engine/charts/messaging/templates/nats-secret.yaml b/qliksense/charts/engine/charts/messaging/templates/nats-secret.yaml new file mode 100644 index 0000000..92ffbe4 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/templates/nats-secret.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ .Release.Name }}-nats-secret +data: + {{ if .Values.nats.auth.enabled }} + {{ if .Values.nats.auth.user }} + client-user: {{ print .Values.nats.auth.user | b64enc }} + client-password: {{ print .Values.nats.auth.password | b64enc }} + {{ else if .Values.nats.auth.users }} + client-user: {{ print (index .Values.nats.auth.users 0).user | b64enc }} + client-password: {{ print (index .Values.nats.auth.users 0).password | b64enc }} + {{- end -}} + {{- end -}} diff --git a/qliksense/charts/engine/charts/messaging/templates/networkpolicy-nats-streaming.yaml b/qliksense/charts/engine/charts/messaging/templates/networkpolicy-nats-streaming.yaml new file mode 100644 index 0000000..cd855c0 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/templates/networkpolicy-nats-streaming.yaml @@ -0,0 +1,51 @@ +{{- if and (index .Values "nats-streaming" "enabled") (index .Values "networkPolicy" "nats-streaming" "enabled") }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "{{ template "nats-streaming.fullname" . }}" + labels: + chart: "{{ template "messaging.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + - ports: + - port: {{ index .Values "nats-streaming" "monitoring" "service" "port" }} + from: + - podSelector: + matchLabels: + {{ template "nats-streaming.fullname" . }}-admin: "true" + - ports: + - port: {{ index .Values "nats-streaming" "metrics" "port" }} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/templates/networkpolicy-nats.yaml b/qliksense/charts/engine/charts/messaging/templates/networkpolicy-nats.yaml new file mode 100644 index 0000000..df645c6 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/templates/networkpolicy-nats.yaml @@ -0,0 +1,51 @@ +{{- if and (.Values.nats.enabled) (.Values.networkPolicy.nats.enabled) }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "{{ template "nats.fullname" . }}" + labels: + chart: "{{ template "messaging.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "{{ template "nats-streaming.name" . }}" + release: {{ .Release.Name | quote }} + - ports: + - port: {{ .Values.nats.client.service.port }} + from: + - podSelector: + matchLabels: + {{ template "nats.fullname" . }}-client: "true" + - ports: + - port: {{ .Values.nats.metrics.port }} + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - podSelector: + matchLabels: + app: "{{ template "nats.name" . }}" + release: {{ .Release.Name | quote }} + - podSelector: + matchLabels: + app: "keys" + release: {{ tpl ( .Values.networkPolicy.keys.release ) . | quote }} +{{- end }} diff --git a/qliksense/charts/engine/charts/messaging/values.yaml b/qliksense/charts/engine/charts/messaging/values.yaml new file mode 100644 index 0000000..35482e5 --- /dev/null +++ b/qliksense/charts/engine/charts/messaging/values.yaml @@ -0,0 +1,343 @@ +## Default values for the messaging Helm chart. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +## NATS configuration +## +nats: + ## Enables NATS chart by default + enabled: true + + securityContext: + enabled: false + + ## Image pull policy for NATS chart + image: + registry: ghcr.io + repository: qlik-download/qnatsd + tag: 0.2.4 + pullPolicy: IfNotPresent + pullSecrets: + - name: artifactory-docker-secret + + ## Number of NATS nodes + replicaCount: 1 + + ## NATS statefulset configurations + statefulset: + updateStrategy: RollingUpdate + + ## NATS svc used for client connections + ## ref: https://github.com/nats-io/gnatsd#running + ## + client: + service: + type: ClusterIP + port: 4222 + + clientAdvertise: "{{.Release.Name}}-nats-client:4222" + + ## Kubernetes svc used for clustering + ## ref: https://github.com/nats-io/gnatsd#clustering + ## + cluster: + service: + type: ClusterIP + port: 6222 + # noAdvertise: false + + ## NATS svc used for monitoring + ## ref: https://github.com/nats-io/gnatsd#monitoring + ## + monitoring: + service: + type: ClusterIP + port: 8222 + ## Annotations for Promethues + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + + ## Client Authentication + auth: + enabled: true + + users: + - user: "nats_client" + password: T0pS3cr3t + + ## Configuration of users that are authenticated used JWTs + ## Users can be configured with permissions to allow or deny publish/subscribe access to subjects + ## ref: https://nats.io/documentation/managing_the_server/authorization/ + ## + jwtUsers: + - user: "audit" + stanPermissions: + subscribe: + - "system-events.engine.app" + - "system-events.user-session" + - "system-events.spaces" + - user: "chronos-worker" + stanPermissions: + publish: + - "chronos-worker.>" + - user: "data-engineering-exporter" + stanPermissions: + subscribe: + - "system-events.>" + - user: "edge-auth" + stanPermissions: + publish: + - "system-events.user-session" + subscribe: + - "system-events.users" + - "system-events.user-session" + - "private.idp-sync" + - user: "engine" + stanPermissions: + publish: + - "com.qlik.app" + - "com.qlik.engine.session" + - "system-events.engine.app" + - "system-events.engine.session" + - user: "identity-providers" + stanPermissions: + publish: + - "private.idp-sync" + - user: "odag" + stanPermissions: + publish: + - "odag.>" + subscribe: + - "odag.>" + - "system-events.engine.app" + - "system-events.reloadResults" + - user: "qix-data-reload" + stanPermissions: + publish: + - "reload" + - "system-events.reloadResults" + subscribe: + - "reload" + - user: "resource-library" + stanPermissions: + publish: + - "system-events.resource-library" + - user: "tenants" + stanPermissions: + publish: + - "system-events.tenants" + - user: "users" + stanPermissions: + publish: + - "system-events.users" + - user: "collections" + stanPermissions: + subscribe: + - "system-events.engine.app" + - user: "licenses" + stanPermissions: + publish: + - "system-events.licenses" + subscribe: + - "system-events.licenses" + - user: "spaces" + stanPermissions: + publish: + - "system-events.spaces" + - user: "precedents" + stanPermissions: + subscribe: + - "system-events.engine.app" + + extraArgs: + - --jwt_users_file=/opt/bitnami/nats/users.json + - --jwt_auth_url=http://{{ .Release.Name }}-keys:8080/v1/keys/qlik.api.internal + ## for localdev use this configuration instead + # - --jwt_auth_url=http://keys:8080/v1/keys/qlik.api.internal + + ## Cluster Authentication + clusterAuth: + enabled: false + + ## Metrics / Prometheus NATS Exporter + ## + ## ref: https://github.com/nats-io/prometheus-nats-exporter + metrics: + enabled: true + image: + registry: ghcr.io + repository: qlik-download/prometheus-nats-exporter + tag: 0.1.0-34 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-docker-secret + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "false" # prometheus annotations are added on the monitoring service instead + ## Metrics exporter flags + args: + - -connz + - -routez + - -subz + - -varz + +## NATS Streaming configuration +## +nats-streaming: + enabled: true + + securityContext: + enabled: false + + ## NATS Streaming image + image: + registry: ghcr.io + repository: qlik-download/nats-streaming + tag: 0.12.2 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-docker-secret + + ## NATS Streaming replicas + replicaCount: 3 + + ## NATS Streaming statefulset configurations + statefulset: + updateStrategy: RollingUpdate + + ## NATS Streaming extra options for liveness and readiness probes + readinessProbe: + enabled: true + initialDelaySeconds: 30 + + ## NATS Streaming svc used for monitoring + ## + monitoring: + service: + type: ClusterIP + port: 8222 + # Annotations for Promethues + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "7777" + + ## NATS Streaming cluster id + clusterID: "{{ .Release.Name }}-nats-streaming-cluster" + + ## NATS server + natsSvc: "nats://{{ .Release.Name }}-nats-client:4222" + + ## NATS server client Authentication + auth: + enabled: true + secretName: "{{ .Release.Name }}-nats-secret" + secretClientUser: "client-user" + secretClientPassword: "client-password" + + ## Use for general debugging. Enabling this will negatively affect performance. + debug: true + + # Interval at which server sends heartbeat to a client + hbInterval: 10s + + # How long server waits for a heartbeat response + hbTimeout: 10s + + # Number of failed heartbeats before server closes the client connection + hbFailCount: 5 + + + persistence: + volume: + ## If false, emptyDir will be used as a volume. + enabled: false + # size: 10Gi + # storageClass: "" + internalStorageClass: + ## Normally the storage class should be created outside this helm chart + ## If we want to deploy a storage class as part of the helm chart + ## - Provide a storageClassName above. + ## - set enabled true + ## - provide a storage class definition. + + ## If enabled storage class will be configured as part of the chart. + ## If not enabled an external storageclass can be used by providing storageClassName above. + enabled: false + + ## Storageclass definition + definition: {} + ## Storage classes have a provisioner that determines what volume plugin is used for provisioning PVs. + ## This field must be specified. + ## See https://kubernetes.io/docs/concepts/storage/storage-classes/ + # provisioner: kubernetes.io/no-provisioner + + ## Reclaim policy should normally be set to Retain to avoid loosing data when deleting this helm chart. + # reclaimPolicy: Retain + + ## Persistent Volumes that are dynamically created by a storage class will have the mount options specified + ## in the mountOptions field of the class. + # mountOptions: {} + + ## Storage classes have parameters that describe volumes belonging to the storage class. + ## Different parameters may be accepted depending on the provisioner. + # parameters: {} + maxAge: "2h" + + ## Metrics / Prometheus NATS Exporter + ## + ## ref: https://github.com/nats-io/prometheus-nats-exporter + metrics: + enabled: true + image: + registry: ghcr.io + repository: qlik-download/prometheus-nats-exporter + tag: 0.1.0-34 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: + - name: artifactory-docker-secret + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + ## Metrics exporter port + port: 7777 + ## Metrics exporter annotations + podAnnotations: + prometheus.io/scrape: "false" # prometheus annotations are added on the monitoring service instead + ## Metrics exporter flags + args: + - -channelz + + +## NATS and NATS Streaming Network Policy +## +networkPolicy: + ## NATS + nats: + enabled: false + ## NATS Streaminng + nats-streaming: + enabled: false + ## Keys + keys: + ## Set keys release name for egress rules + release: "{{ .Release.Name }}" diff --git a/qliksense/charts/engine/charts/redis-ha/Chart.yaml b/qliksense/charts/engine/charts/redis-ha/Chart.yaml new file mode 100644 index 0000000..c5a1c66 --- /dev/null +++ b/qliksense/charts/engine/charts/redis-ha/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 5.0.6 +description: Highly available Kubernetes implementation of Redis +home: http://redis.io/ +icon: https://upload.wikimedia.org/wikipedia/en/thumb/6/6b/Redis_Logo.svg/1200px-Redis_Logo.svg.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: salimsalaues@gmail.com + name: ssalaues +- email: aaron.layfield@gmail.com + name: dandydeveloper +name: redis-ha +sources: +- https://redis.io/download +- https://github.com/scality/Zenko/tree/development/1.0/kubernetes/zenko/charts/redis-ha +- https://github.com/oliver006/redis_exporter +version: 4.4.1 diff --git a/qliksense/charts/engine/charts/redis-ha/OWNERS b/qliksense/charts/engine/charts/redis-ha/OWNERS new file mode 100644 index 0000000..cf4f87d --- /dev/null +++ b/qliksense/charts/engine/charts/redis-ha/OWNERS @@ -0,0 +1,6 @@ +approvers: +- ssalaues +- dandydeveloper +reviewers: +- ssalaues +- dandydeveloper \ No newline at end of file diff --git a/qliksense/charts/engine/charts/redis-ha/README.md b/qliksense/charts/engine/charts/redis-ha/README.md new file mode 100644 index 0000000..b127ad8 --- /dev/null +++ b/qliksense/charts/engine/charts/redis-ha/README.md @@ -0,0 +1,228 @@ +# Redis + +[Redis](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +## TL;DR; + +```bash +$ helm install stable/redis-ha +``` + +By default this chart install 3 pods total: + * one pod containing a redis master and sentinel container (optional prometheus metrics exporter sidecar available) + * two pods each containing a redis slave and sentinel containers (optional prometheus metrics exporter sidecars available) + +## Introduction + +This chart bootstraps a [Redis](https://redis.io) highly available master/slave statefulset in a [Kubernetes](http://kubernetes.io) cluster using the Helm package manager. + +## Prerequisites + +- Kubernetes 1.8+ with Beta APIs enabled +- PV provisioner support in the underlying infrastructure + +## Upgrading the Chart + +Please note that there have been a number of changes simplifying the redis management strategy (for better failover and elections) in the 3.x version of this chart. These changes allow the use of official [redis](https://hub.docker.com/_/redis/) images that do not require special RBAC or ServiceAccount roles. As a result when upgrading from version >=2.0.1 to >=3.0.0 of this chart, `Role`, `RoleBinding`, and `ServiceAccount` resources should be deleted manually. + +### Upgrading the chart from 3.x to 4.x + +Starting from version `4.x` HAProxy sidecar prometheus-exporter removed and replaced by the embedded [HAProxy metrics endpoint](https://github.com/haproxy/haproxy/tree/master/contrib/prometheus-exporter), as a result when upgrading from version 3.x to 4.x section `haproxy.exporter` should be removed and the `haproxy.metrics` need to be configured for fit your needs. + +## Installing the Chart + +To install the chart + +```bash +$ helm install stable/redis-ha +``` + +The command deploys Redis on the Kubernetes cluster in the default configuration. By default this chart install one master pod containing redis master container and sentinel container along with 2 redis slave pods each containing their own sentinel sidecars. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the deployment: + +```bash +$ helm delete +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the Redis chart and their default values. + +| Parameter | Description | Default | +|:--------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------| +| `image` | Redis image | `redis` | +| `imagePullSecrets` | Reference to one or more secrets to be used when pulling redis images | [] | +| `tag` | Redis tag | `5.0.6-alpine` | +| `replicas` | Number of redis master/slave pods | `3` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the redis-ha.fullname template | +| `rbac.create` | Create and use RBAC resources | `true` | +| `redis.port` | Port to access the redis service | `6379` | +| `redis.masterGroupName` | Redis convention for naming the cluster group: must match `^[\\w-\\.]+$` and can be templated | `mymaster` | +| `redis.config` | Any valid redis config options in this section will be applied to each server (see below) | see values.yaml | +| `redis.customConfig` | Allows for custom redis.conf files to be applied. If this is used then `redis.config` is ignored | `` | +| `redis.resources` | CPU/Memory for master/slave nodes resource requests/limits | `{}` | +| `sentinel.port` | Port to access the sentinel service | `26379` | +| `sentinel.quorum` | Minimum number of servers necessary to maintain quorum | `2` | +| `sentinel.config` | Valid sentinel config options in this section will be applied as config options to each sentinel (see below) | see values.yaml | +| `sentinel.customConfig` | Allows for custom sentinel.conf files to be applied. If this is used then `sentinel.config` is ignored | `` | +| `sentinel.resources` | CPU/Memory for sentinel node resource requests/limits | `{}` | +| `init.resources` | CPU/Memory for init Container node resource requests/limits | `{}` | +| `auth` | Enables or disables redis AUTH (Requires `redisPassword` to be set) | `false` | +| `redisPassword` | A password that configures a `requirepass` and `masterauth` in the conf parameters (Requires `auth: enabled`) | `` | +| `authKey` | The key holding the redis password in an existing secret. | `auth` | +| `existingSecret` | An existing secret containing a key defined by `authKey` that configures `requirepass` and `masterauth` in the conf parameters (Requires `auth: enabled`, cannot be used in conjunction with `.Values.redisPassword`) | `` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `hardAntiAffinity` | Whether the Redis server pods should be forced to run on separate nodes. | `true` | +| `additionalAffinities` | Additional affinities to add to the Redis server pods. | `{}` | +| `securityContext` | Security context to be added to the Redis server pods. | `{runAsUser: 1000, fsGroup: 1000, runAsNonRoot: true}` | +| `affinity` | Override all other affinity settings with a string. | `""` | +| `persistentVolume.size` | Size for the volume | 10Gi | +| `persistentVolume.annotations` | Annotations for the volume | `{}` | +| `persistentVolume.reclaimPolicy` | Method used to reclaim an obsoleted volume. `Delete` or `Retain` | `""` | +| `exporter.enabled` | If `true`, the prometheus exporter sidecar is enabled | `false` | +| `exporter.image` | Exporter image | `oliver006/redis_exporter` | +| `exporter.tag` | Exporter tag | `v0.31.0` | +| `exporter.port` | Exporter port | `9121` | +| `exporter.annotations` | Prometheus scrape annotations | `{prometheus.io/path: /metrics, prometheus.io/port: "9121", prometheus.io/scrape: "true"}` | +| `exporter.extraArgs` | Additional args for the exporter | `{}` | +| `exporter.script` | A custom custom Lua script that will be mounted to exporter for collection of custom metrics. Creates a ConfigMap and sets env var `REDIS_EXPORTER_SCRIPT`. | | +| `exporter.serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | +| `exporter.serviceMonitor.namespace` | Namespace the service monitor is created in | `default` | +| `exporter.serviceMonitor.interval` | Scrape interval, If not set, the Prometheus default scrape interval is used | `nil` | +| `exporter.serviceMonitor.telemetryPath` | Path to redis-exporter telemetry-path | `/metrics` | +| `exporter.serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | +| `exporter.serviceMonitor.timeout` | How long until a scrape request times out. If not set, the Prometheus default scape timeout is used | `nil` | +| `haproxy.enabled` | Enabled HAProxy LoadBalancing/Proxy | `false` | +| `haproxy.replicas` | Number of HAProxy instances | `3` | +| `haproxy.image.repository`| HAProxy Image Repository | `haproxy` | +| `haproxy.image.tag` | HAProxy Image Tag | `2.0.1` | +| `haproxy.image.pullPolicy`| HAProxy Image PullPolicy | `IfNotPresent` | +| `haproxy.imagePullSecrets`| Reference to one or more secrets to be used when pulling haproxy images | [] | +| `haproxy.annotations` | HAProxy template annotations | `{}` | +| `haproxy.customConfig` | Allows for custom config-haproxy.cfg file to be applied. If this is used then default config will be overwriten | `` | +| `haproxy.extraConfig` | Allows to place any additional configuration section to add to the default config-haproxy.cfg | `` | +| `haproxy.resources` | HAProxy resources | `{}` | +| `haproxy.service.type` | HAProxy service type "ClusterIP", "LoadBalancer" or "NodePort" | `ClusterIP` | +| `haproxy.service.nodePort` | HAProxy service nodePort value (haproxy.service.type must be NodePort) | not set | +| `haproxy.service.annotations` | HAProxy service annotations | `{}` | +| `haproxy.stickyBalancing` | HAProxy sticky load balancing to Redis nodes. Helps with connections shutdown. | `false` | +| `haproxy.hapreadport.enable` | Enable a read only port for redis slaves | `false` | +| `haproxy.hapreadport.port` | Haproxy port for read only redis slaves | `6380` | +| `haproxy.metrics.enabled` | HAProxy enable prometheus metric scraping | `false` | +| `haproxy.metrics.port` | HAProxy prometheus metrics scraping port | `9101` | +| `haproxy.metrics.portName` | HAProxy metrics scraping port name | `exporter-port` | +| `haproxy.metrics.scrapePath` | HAProxy prometheus metrics scraping port | `/metrics` | +| `haproxy.metrics.serviceMonitor.enabled` | Use servicemonitor from prometheus operator for HAProxy metrics | `false` | +| `haproxy.metrics.serviceMonitor.namespace` | Namespace the service monitor for HAProxy metrics is created in | `default` | +| `haproxy.metrics.serviceMonitor.interval` | Scrape interval, If not set, the Prometheus default scrape interval is used | `nil` | +| `haproxy.metrics.serviceMonitor.telemetryPath` | Path to HAProxy metrics telemetry-path | `/metrics` | +| `haproxy.metrics.serviceMonitor.labels` | Labels for the HAProxy metrics servicemonitor passed to Prometheus Operator | `{}` | +| `haproxy.metrics.serviceMonitor.timeout` | How long until a scrape request times out. If not set, the Prometheus default scape timeout is used | `nil` | +| `haproxy.init.resources` | Extra init resources | `{}` | +| `haproxy.timeout.connect` | haproxy.cfg `timeout connect` setting | `4s` | +| `haproxy.timeout.server` | haproxy.cfg `timeout server` setting | `30s` | +| `haproxy.timeout.client` | haproxy.cfg `timeout client` setting | `30s` | +| `haproxy.timeout.check` | haproxy.cfg `timeout check` setting | `2s` | +| `haproxy.priorityClassName` | priorityClassName for `haproxy` deployment | not set | +| `haproxy.securityContext` | Security context to be added to the HAProxy deployment. | `{runAsUser: 1000, fsGroup: 1000, runAsNonRoot: true}` | +| `haproxy.hardAntiAffinity` | Whether the haproxy pods should be forced to run on separate nodes. | `true` | +| `haproxy.affinity` | Override all other haproxy affinity settings with a string. | `""` | +| `haproxy.additionalAffinities` | Additional affinities to add to the haproxy server pods. | `{}` | +| `podDisruptionBudget` | Pod Disruption Budget rules | `{}` | +| `priorityClassName` | priorityClassName for `redis-ha-statefulset` | not set | +| `hostPath.path` | Use this path on the host for data storage | not set | +| `hostPath.chown` | Run an init-container as root to set ownership on the hostPath | `true` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `busybox` | +| `sysctlImage.tag` | sysctlImage Init container tag | `1.31.1` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys`| Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage resources | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install \ + --set image=redis \ + --set tag=5.0.5-alpine \ + stable/redis-ha +``` + +The above command sets the Redis server within `default` namespace. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install -f values.yaml stable/redis-ha +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Custom Redis and Sentinel config options + +This chart allows for most redis or sentinel config options to be passed as a key value pair through the `values.yaml` under `redis.config` and `sentinel.config`. See links below for all available options. + +[Example redis.conf](http://download.redis.io/redis-stable/redis.conf) +[Example sentinel.conf](http://download.redis.io/redis-stable/sentinel.conf) + +For example `repl-timeout 60` would be added to the `redis.config` section of the `values.yaml` as: + +```yml + repl-timeout: "60" +``` + +Note: + +1. Some config options should be renamed by redis version,e.g.: + + ``` + # In redis 5.x,see https://raw.githubusercontent.com/antirez/redis/5.0/redis.conf + min-replicas-to-write: 1 + min-replicas-max-lag: 5 + + # In redis 4.x and redis 3.x,see https://raw.githubusercontent.com/antirez/redis/4.0/redis.conf and https://raw.githubusercontent.com/antirez/redis/3.0/redis.conf + min-slaves-to-write 1 + min-slaves-max-lag 5 + ``` + +Sentinel options supported must be in the the `sentinel