mirror of
https://github.com/jprdonnelly/kubernetes-cluster.git
synced 2025-12-19 18:05:19 -05:00
EFK refinement
This commit is contained in:
@@ -1,241 +1,66 @@
|
||||
---
|
||||
clusterName: "elasticsearch"
|
||||
nodeGroup: "master"
|
||||
|
||||
# The service that non master groups will try to connect to when joining the cluster
|
||||
# This should be set to clusterName + "-" + nodeGroup for your master group
|
||||
masterService: ""
|
||||
|
||||
# Elasticsearch roles that will be applied to this nodeGroup
|
||||
# These will be set as environment variables. E.g. node.master=true
|
||||
roles:
|
||||
master: "true"
|
||||
ingest: "true"
|
||||
data: "true"
|
||||
|
||||
replicas: 3
|
||||
minimumMasterNodes: 2
|
||||
|
||||
esMajorVersion: ""
|
||||
|
||||
# Allows you to add any config files in /usr/share/elasticsearch/config/
|
||||
# such as elasticsearch.yml and log4j2.properties
|
||||
esConfig: {}
|
||||
# elasticsearch.yml: |
|
||||
# key:
|
||||
# nestedkey: value
|
||||
# log4j2.properties: |
|
||||
# key = value
|
||||
|
||||
# Extra environment variables to append to this nodeGroup
|
||||
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
|
||||
# syntax here
|
||||
extraEnvs: []
|
||||
# - name: MY_ENVIRONMENT_VAR
|
||||
# value: the_value_goes_here
|
||||
|
||||
# A list of secrets and their paths to mount inside the pod
|
||||
# This is useful for mounting certificates for security and for mounting
|
||||
# the X-Pack license
|
||||
secretMounts: []
|
||||
# - name: elastic-certificates
|
||||
# secretName: elastic-certificates
|
||||
# path: /usr/share/elasticsearch/config/certs
|
||||
|
||||
image: "docker.elastic.co/elasticsearch/elasticsearch"
|
||||
imageTag: "7.4.2"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
|
||||
podAnnotations: {}
|
||||
# iam.amazonaws.com/role: es-cluster
|
||||
|
||||
# additionals labels
|
||||
labels: {}
|
||||
|
||||
esJavaOpts: "-Xmx1g -Xms1g"
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "2Gi"
|
||||
limits:
|
||||
cpu: "1000m"
|
||||
memory: "2Gi"
|
||||
|
||||
initResources: {}
|
||||
# limits:
|
||||
# cpu: "25m"
|
||||
# # memory: "128Mi"
|
||||
# requests:
|
||||
# cpu: "25m"
|
||||
# memory: "128Mi"
|
||||
|
||||
sidecarResources: {}
|
||||
# limits:
|
||||
# cpu: "25m"
|
||||
# # memory: "128Mi"
|
||||
# requests:
|
||||
# cpu: "25m"
|
||||
# memory: "128Mi"
|
||||
|
||||
networkHost: "0.0.0.0"
|
||||
|
||||
volumeClaimTemplate:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
|
||||
rbac:
|
||||
create: false
|
||||
serviceAccountName: ""
|
||||
|
||||
podSecurityPolicy:
|
||||
create: false
|
||||
name: ""
|
||||
spec:
|
||||
privileged: true
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- secret
|
||||
- configMap
|
||||
- persistentVolumeClaim
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
annotations: {}
|
||||
|
||||
extraVolumes: ""
|
||||
# - name: extras
|
||||
# emptyDir: {}
|
||||
|
||||
extraVolumeMounts: ""
|
||||
# - name: extras
|
||||
# mountPath: /usr/share/extras
|
||||
# readOnly: true
|
||||
|
||||
extraInitContainers: ""
|
||||
# - name: do-something
|
||||
# image: busybox
|
||||
# command: ['do', 'something']
|
||||
|
||||
# This is the PriorityClass settings as defined in
|
||||
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
||||
priorityClassName: ""
|
||||
|
||||
# By default this will make sure two pods don't end up on the same node
|
||||
# Changing this to a region would allow you to spread pods across regions
|
||||
antiAffinityTopologyKey: "kubernetes.io/hostname"
|
||||
|
||||
# Hard means that by default pods will only be scheduled if there are enough nodes for them
|
||||
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
|
||||
antiAffinity: "hard"
|
||||
|
||||
# This is the node affinity settings as defined in
|
||||
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
|
||||
nodeAffinity: {}
|
||||
|
||||
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
|
||||
# the same time when bootstrapping the cluster
|
||||
podManagementPolicy: "Parallel"
|
||||
|
||||
protocol: http
|
||||
httpPort: 9200
|
||||
transportPort: 9300
|
||||
|
||||
service:
|
||||
labels: {}
|
||||
labelsHeadless: {}
|
||||
type: ClusterIP
|
||||
nodePort: ""
|
||||
annotations: {}
|
||||
httpPortName: http
|
||||
transportPortName: transport
|
||||
|
||||
updateStrategy: RollingUpdate
|
||||
|
||||
# This is the max unavailable setting for the pod disruption budget
|
||||
# The default value of 1 will make sure that kubernetes won't allow more than 1
|
||||
# of your pods to be unavailable during maintenance
|
||||
maxUnavailable: 1
|
||||
|
||||
podSecurityContext:
|
||||
fsGroup: 1000
|
||||
runAsUser: 1000
|
||||
|
||||
# The following value is deprecated,
|
||||
# please use the above podSecurityContext.fsGroup instead
|
||||
fsGroup: ""
|
||||
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
|
||||
# How long to wait for elasticsearch to stop gracefully
|
||||
terminationGracePeriod: 120
|
||||
|
||||
sysctlVmMaxMapCount: 262144
|
||||
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 3
|
||||
timeoutSeconds: 5
|
||||
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status
|
||||
clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
|
||||
|
||||
## Use an alternate scheduler.
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
schedulerName: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nodeSelector: {}
|
||||
tolerations: []
|
||||
|
||||
# Enabling this will publically expose your Elasticsearch instance.
|
||||
# Only enable this if you have security enabled on your cluster
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
hosts:
|
||||
- es.browntown.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# https://github.com/elastic/helm-charts/issues/63
|
||||
masterTerminationFix: false
|
||||
|
||||
lifecycle: {}
|
||||
# preStop:
|
||||
# exec:
|
||||
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
|
||||
# postStart:
|
||||
# exec:
|
||||
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
|
||||
|
||||
sysctlInitContainer:
|
||||
# Default values for elk.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
elasticsearch:
|
||||
enabled: true
|
||||
|
||||
keystore: []
|
||||
kibana:
|
||||
enabled: true
|
||||
env:
|
||||
ELASTICSEARCH_HOSTS: http://{{ .Release.Name }}-elasticsearch-client:9200
|
||||
|
||||
logstash:
|
||||
enabled: false
|
||||
# elasticsearch:
|
||||
# host: elastic-stack-elasticsearch-client
|
||||
|
||||
filebeat:
|
||||
enabled: true
|
||||
# config:
|
||||
# output.file.enabled: false
|
||||
# output.logstash:
|
||||
# hosts: ["elastic-stack-logstash:5044"]
|
||||
# indexTemplateLoad:
|
||||
# - elastic-stack-elasticsearch-client:9200
|
||||
|
||||
fluentd:
|
||||
enabled: true
|
||||
|
||||
fluent-bit:
|
||||
enabled: false
|
||||
|
||||
fluentd-elasticsearch:
|
||||
enabled: true
|
||||
|
||||
nginx-ldapauth-proxy:
|
||||
enabled: false
|
||||
# Example config to get it working with ELK. Adjust as you need to.
|
||||
# proxy:
|
||||
# port: 5601
|
||||
# # This is the internal hostname for the kibana service
|
||||
# host: "elk-kibana.default.svc.cluster.local"
|
||||
# authName: "ELK:Infrastructure:LDAP"
|
||||
# ldapHost: "ldap.example.com"
|
||||
# ldapDN: "dc=example,dc=com"
|
||||
# ldapFilter: "objectClass=organizationalPerson"
|
||||
# ldapBindDN: "cn=reader,dc=example,dc=com"
|
||||
# requires:
|
||||
# - name: "ELK-USER"
|
||||
# filter: "cn=elkuser,ou=groups,dc=example,dc=com"
|
||||
# ingress:
|
||||
# enabled: true
|
||||
# hosts:
|
||||
# - "elk.example.com"
|
||||
# annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# tls:
|
||||
# - hosts:
|
||||
# - elk.example.com
|
||||
# secretName: example-elk-tls
|
||||
# secrets:
|
||||
# ldapBindPassword: PASSWORD
|
||||
elasticsearch-curator:
|
||||
enabled: true
|
||||
|
||||
elasticsearch-exporter:
|
||||
enabled: false
|
||||
Reference in New Issue
Block a user