mirror of
https://github.com/jprdonnelly/kubernetes-cluster.git
synced 2025-12-19 18:05:19 -05:00
Adding optional Grafana and Prometheus deployments
This commit is contained in:
35
Vagrantfile
vendored
35
Vagrantfile
vendored
@@ -56,15 +56,15 @@ EOF'
|
||||
curl -sSL get.docker.com | sh
|
||||
|
||||
# Setup daemon.
|
||||
sudo bash -c 'cat <<EOF> /etc/docker/daemon.json
|
||||
{
|
||||
"exec-opts": ["native.cgroupdriver=systemd"],
|
||||
"log-driver": "json-file",
|
||||
"log-opts": {
|
||||
"max-size": "100m"
|
||||
},
|
||||
"storage-driver": "overlay2"
|
||||
}
|
||||
sudo bash -c 'cat <<EOF> /etc/docker/daemon.json
|
||||
{
|
||||
"exec-opts": ["native.cgroupdriver=systemd"],
|
||||
"log-driver": "json-file",
|
||||
"log-opts": {
|
||||
"max-size": "100m"
|
||||
},
|
||||
"storage-driver": "overlay2"
|
||||
}
|
||||
EOF'
|
||||
|
||||
sudo mkdir -p /etc/systemd/system/docker.service.d
|
||||
@@ -89,7 +89,7 @@ EOF'
|
||||
|
||||
# systemctl daemon-reload
|
||||
|
||||
sudo apt update && sudo apt install -y docker ntpdate nmap netcat neofetch socat apt-transport-https ca-certificates curl software-properties-common nfs-common sshpass kubelet kubeadm kubectl kubernetes-cni
|
||||
sudo apt update && sudo apt install -y docker ntpdate nmap netcat neofetch socat apt-transport-https ca-certificates curl software-properties-common sshpass kubelet kubeadm kubectl kubernetes-cni
|
||||
|
||||
echo "libssl1.1 libssl1.1/restart-services boolean true" | sudo debconf-set-selections
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt update && sudo DEBIAN_FRONTEND=noninteractive apt upgrade -y
|
||||
@@ -183,19 +183,7 @@ $configureNFS = <<-SCRIPT
|
||||
|
||||
# Label the node that will host NFS pvs
|
||||
# kubectl label nodes k8s-nfs role=nfs
|
||||
# kubectl taint nodes k8s-nfs key=value:NoSchkubectl label nodes k8s-nfs role=nfsedule
|
||||
|
||||
# echo "################################################################"
|
||||
# echo " Deploy nfs-provisioner in k8s cluster
|
||||
# echo " using dedicated disk attached to k8s-node1"
|
||||
# echo "################################################################"
|
||||
# # Pull and apply the nfs-provisioner
|
||||
# sleep 60
|
||||
# kubectl apply -f https://raw.githubusercontent.com/jprdonnelly/kubernetes-cluster/master/nfs-provisioner/nfs-deployment.yaml
|
||||
# kubectl apply -f https://raw.githubusercontent.com/jprdonnelly/kubernetes-cluster/master/nfs-provisioner/nfs-class.yaml
|
||||
|
||||
# # Define the new storage class as default
|
||||
# kubectl patch storageclass nfs-dynamic -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
|
||||
# kubectl taint nodes k8s-nfs key=value:NoSchedule
|
||||
SCRIPT
|
||||
|
||||
# Insanely broken - barely fit for testing
|
||||
@@ -220,6 +208,7 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
servers.each do |opts|
|
||||
config.ssh.keep_alive = true
|
||||
config.vm.define opts[:name] do |config|
|
||||
config.vm.box = opts[:box]
|
||||
config.vm.box_version = opts[:box_version]
|
||||
|
||||
13
grafana/grafana-claim-persistentvolumeclaim.yaml
Normal file
13
grafana/grafana-claim-persistentvolumeclaim.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
component: grafana
|
||||
name: grafana-claim
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
54
grafana/grafana-deployment.yaml
Normal file
54
grafana/grafana-deployment.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: grafana
|
||||
name: grafana-deployment
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
component: grafana
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: grafana
|
||||
spec:
|
||||
volumes:
|
||||
- name: grafana-claim
|
||||
persistentVolumeClaim:
|
||||
claimName: grafana-claim
|
||||
containers:
|
||||
- name: grafana
|
||||
image: pharosproduction/grafana:6
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 2500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
# livenessProbe:
|
||||
# exec:
|
||||
# command:
|
||||
# - wget
|
||||
# - localhost:3000
|
||||
# - --spider
|
||||
# initialDelaySeconds: 30
|
||||
# periodSeconds: 30
|
||||
# readinessProbe:
|
||||
# exec:
|
||||
# command:
|
||||
# - wget
|
||||
# - localhost:3000
|
||||
# - --spider
|
||||
# initialDelaySeconds: 120
|
||||
# periodSeconds: 5
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/grafana
|
||||
name: grafana-claim
|
||||
|
||||
16
grafana/grafana-ip-service.yaml
Normal file
16
grafana/grafana-ip-service.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: grafana-ip-service
|
||||
namespace: default
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/path: /metrics
|
||||
prometheus.io/port: '3000'
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
component: grafana
|
||||
ports:
|
||||
- port: 3000
|
||||
targetPort: 3000
|
||||
123
prometheus/config-map.yaml
Normal file
123
prometheus/config-map.yaml
Normal file
@@ -0,0 +1,123 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus-server-conf
|
||||
labels:
|
||||
name: prometheus-server-conf
|
||||
namespace: default
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
evaluation_interval: 5s
|
||||
rule_files:
|
||||
- /etc/prometheus/prometheus.rules
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: http
|
||||
static_configs:
|
||||
- targets:
|
||||
- "alertmanager.monitoring.svc:9093"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'kubernetes-apiservers'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: default;kubernetes;https
|
||||
|
||||
- job_name: 'kubernetes-nodes'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
|
||||
- job_name: 'kubernetes-pods'
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
|
||||
- job_name: 'kubernetes-service-endpoints'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
|
||||
action: replace
|
||||
target_label: __scheme__
|
||||
regex: (https?)
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
target_label: __address__
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_service_name]
|
||||
action: replace
|
||||
target_label: kubernetes_name
|
||||
40
prometheus/prometheus-deployment.yaml
Normal file
40
prometheus/prometheus-deployment.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: prometheus-deployment
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: prometheus-server
|
||||
spec:
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: prom/prometheus:latest
|
||||
args:
|
||||
- "--config.file=/etc/prometheus/prometheus.yml"
|
||||
- "--storage.tsdb.path=/prometheus/"
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
volumeMounts:
|
||||
- name: prometheus-config-volume
|
||||
mountPath: /etc/prometheus/
|
||||
- name: prometheus-storage-volume
|
||||
mountPath: /prometheus/
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "1000m"
|
||||
volumes:
|
||||
- name: prometheus-config-volume
|
||||
configMap:
|
||||
defaultMode: 420
|
||||
name: prometheus-server-conf
|
||||
|
||||
- name: prometheus-storage-volume
|
||||
emptyDir: {}
|
||||
18
prometheus/prometheus-ip-service.yaml
Normal file
18
prometheus/prometheus-ip-service.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus-ip-service
|
||||
namespace: default
|
||||
# namespace: monitoring
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/path: /metrics
|
||||
prometheus.io/port: '9090'
|
||||
|
||||
spec:
|
||||
selector:
|
||||
app: prometheus-server
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9090
|
||||
targetPort: 9090
|
||||
Reference in New Issue
Block a user