Initial efk support testing

This commit is contained in:
jprdonnelly
2019-09-25 13:49:45 -04:00
parent 526c43b37e
commit a7384e5688
2 changed files with 366 additions and 0 deletions

239
efk/elastic-values.yaml Normal file
View File

@@ -0,0 +1,239 @@
---
clusterName: "elasticsearch"
nodeGroup: "master"
# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: ""
# Elasticsearch roles that will be applied to this nodeGroup
# These will be set as environment variables. E.g. node.master=true
roles:
master: "true"
ingest: "true"
data: "true"
replicas: 3
minimumMasterNodes: 2
esMajorVersion: ""
# Allows you to add any config files in /usr/share/elasticsearch/config/
# such as elasticsearch.yml and log4j2.properties
esConfig: {}
# elasticsearch.yml: |
# key:
# nestedkey: value
# log4j2.properties: |
# key = value
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs: []
# - name: MY_ENVIRONMENT_VAR
# value: the_value_goes_here
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts: []
# - name: elastic-certificates
# secretName: elastic-certificates
# path: /usr/share/elasticsearch/config/certs
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "7.3.2"
imagePullPolicy: "IfNotPresent"
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
# additionals labels
labels: {}
esJavaOpts: "-Xmx1g -Xms1g"
resources:
requests:
cpu: "100m"
memory: "2Gi"
limits:
cpu: "1000m"
memory: "2Gi"
initResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
sidecarResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
networkHost: "0.0.0.0"
volumeClaimTemplate:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 30Gi
rbac:
create: false
serviceAccountName: ""
podSecurityPolicy:
create: false
name: ""
spec:
privileged: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
persistence:
enabled: true
annotations: {}
extraVolumes: ""
# - name: extras
# emptyDir: {}
extraVolumeMounts: ""
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraInitContainers: ""
# - name: do-something
# image: busybox
# command: ['do', 'something']
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"
# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "hard"
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"
protocol: http
httpPort: 9200
transportPort: 9300
service:
type: ClusterIP
nodePort:
annotations: {}
httpPortName: http
transportPortName: transport
updateStrategy: RollingUpdate
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
# The following value is deprecated,
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# How long to wait for elasticsearch to stop gracefully
terminationGracePeriod: 120
sysctlVmMaxMapCount: 262144
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status
clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
imagePullSecrets: []
nodeSelector: {}
tolerations: []
# Enabling this will publically expose your Elasticsearch instance.
# Only enable this if you have security enabled on your cluster
ingress:
enabled: true
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- elastic.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
nameOverride: ""
fullnameOverride: ""
# https://github.com/elastic/helm-charts/issues/63
masterTerminationFix: false
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
sysctlInitContainer:
enabled: true
keystore: []

127
efk/values.yaml Normal file
View File

@@ -0,0 +1,127 @@
# Default values for efk.
# https://github.com/helm/charts/blob/master/stable/kibana/values.yaml
kibana:
image:
repository: "docker.elastic.co/kibana/kibana-oss"
tag: "7.3.2"
env:
# All Kibana configuration options are adjustable via env vars.
# To adjust a config option to an env var uppercase + replace `.` with `_`
# Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
#
ELASTICSEARCH_URL: http://elastic.brown:9200
SERVER_PORT: 5601
LOGGING_VERBOSE: "true"
SERVER_DEFAULTROUTE: "/app/kibana"
# https://github.com/komljen/helm-charts/blob/master/elasticsearch/values.yaml
elasticsearch:
spec:
use-ssl: false
elastic-search-image: docker.elastic.co/elasticsearch/elasticsearch:7.3.2
client-node-replicas: 1
master-node-replicas: 1
data-node-replicas: 1
network-host: 0.0.0.0
zones: []
data-volume-size: 10Gi
client-java-options: "-Xms1024m -Xmx1024m"
master-java-options: "-Xms2048m -Xmx2048m"
data-java-options: "-Xms2048m -Xmx2048m"
snapshot:
scheduler-enabled: false
repo-type: s3
bucket-name: efk-snapshots
cron-schedule: "0 2 * * *"
repo-region: us-east-1
image: komljen/elasticsearch-cron:0.0.6
storage:
# e.g. gp2 for AWS
type: nfs-dynamic
# e.g. kubernetes.io/aws-ebs for AWS
classProvisioner: volume.alpha.kubernetes.io/storage-class
# Retain or Delete
reclaimPolicy: Delete
resources:
requests:
memory: 1024Mi
cpu: 200m
limits:
memory: 2048Mi
cpu: '1'
# https://github.com/helm/charts/blob/master/stable/fluent-bit/values.yaml
fluent-bit:
enabled: true
image:
fluent_bit:
repository: fluent/fluent-bit
tag: 1.1.1
backend:
type: es
es:
host: elastic.brown
port: 9200
index: kubernetes_cluster
logstash_prefix: kubernetes_cluster
# https://github.com/helm/charts/blob/master/stable/elasticsearch-curator/values.yaml
elasticsearch-curator:
config:
elasticsearch:
hosts:
- elastic.brown
# https://github.com/helm/charts/blob/master/stable/filebeat/values.yaml
filebeat:
enabled: false
config:
setup.template.name: "kubernetes_cluster"
setup.template.pattern: "kubernetes_cluster-*"
processors:
- decode_json_fields:
fields: ["message"]
process_array: true
max_depth: 8
target: ""
filebeat.prospectors:
- type: docker
containers.ids:
- "*"
processors:
- add_kubernetes_metadata:
in_cluster: true
- drop_event:
when:
equals:
kubernetes.container.name: "filebeat"
output.file:
enabled: false
output.elasticsearch:
hosts: ["http://elastic.brown:9200"]
index: "kubernetes_cluster-%{+yyyy.MM.dd}"
privileged: true
# https://github.com/helm/charts/blob/master/stable/metricbeat/values.yaml
metricbeat:
enabled: false
daemonset:
enabled: false
deployment:
config:
setup.template.name: "kubernetes_events"
setup.template.pattern: "kubernetes_events-*"
output.elasticsearch:
hosts: ["http://elastic.brown:9200"]
index: "kubernetes_events-%{[beat.version]}-%{+yyyy.MM.dd}"
output.file:
enabled: false
modules:
kubernetes:
enabled: true
config:
- module: kubernetes
metricsets:
- event