mirror of
https://github.com/jprdonnelly/kubernetes-cluster.git
synced 2025-12-23 21:04:13 -05:00
Merge branch 'master' of https://github.com/jprdonnelly/kubernetes-cluster
This commit is contained in:
37
README.md
37
README.md
@@ -59,6 +59,43 @@ k8s-nfs Ready <none> 53m v1.15.2
|
||||
k8s-node1 Ready <none> 65m v1.15.2
|
||||
k8s-node2 Ready <none> 61m v1.15.2
|
||||
```
|
||||
|
||||
### Install Helm
|
||||
|
||||
```bash
|
||||
vagrant@k8s-master:~$ curl -LO https://git.io/get_helm.sh
|
||||
% Total % Received % Xferd Average Speed Time Time Time Current
|
||||
Dload Upload Total Spent Left Speed
|
||||
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
|
||||
100 7034 100 7034 0 0 40425 0 --:--:-- --:--:-- --:--:-- 40425
|
||||
vagrant@k8s-master:~$ chmod +x ./get_helm.sh
|
||||
vagrant@k8s-master:~$ ./get_helm.sh --version v2.14.3
|
||||
Downloading https://get.helm.sh/helm-v2.14.3-linux-amd64.tar.gz
|
||||
Preparing to install helm and tiller into /usr/local/bin
|
||||
helm installed into /usr/local/bin/helm
|
||||
tiller installed into /usr/local/bin/tiller
|
||||
Run 'helm init' to configure helm.
|
||||
vagrant@k8s-master:~$ helm init --service-account tiller --wait
|
||||
Creating /home/vagrant/.helm
|
||||
Creating /home/vagrant/.helm/repository
|
||||
Creating /home/vagrant/.helm/repository/cache
|
||||
Creating /home/vagrant/.helm/repository/local
|
||||
Creating /home/vagrant/.helm/plugins
|
||||
Creating /home/vagrant/.helm/starters
|
||||
Creating /home/vagrant/.helm/cache/archive
|
||||
Creating /home/vagrant/.helm/repository/repositories.yaml
|
||||
Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com
|
||||
Adding local repo with URL: http://127.0.0.1:8879/charts
|
||||
$HELM_HOME has been configured at /home/vagrant/.helm.
|
||||
|
||||
Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster.
|
||||
|
||||
Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy.
|
||||
To prevent this, run `helm init` with the --tiller-tls-verify flag.
|
||||
For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation
|
||||
vagrant@k8s-master:~$
|
||||
```
|
||||
|
||||
### Install NFS Provisioner
|
||||
|
||||
```bash
|
||||
|
||||
32
Vagrantfile
vendored
32
Vagrantfile
vendored
@@ -54,25 +54,11 @@ $configureBox = <<-SCRIPT
|
||||
# # Install CRI docker via install script
|
||||
# curl -sSL get.docker.com | sh
|
||||
|
||||
# # Setup daemon.
|
||||
# sudo bash -c 'cat <<EOF> /etc/docker/daemon.json
|
||||
# {
|
||||
# "exec-opts": ["native.cgroupdriver=systemd"],
|
||||
# "log-driver": "json-file",
|
||||
# "log-opts": {
|
||||
# "max-size": "100m"
|
||||
# },
|
||||
# "storage-driver": "overlay2"
|
||||
# }
|
||||
# EOF'
|
||||
# # Restart docker.
|
||||
# sudo systemctl daemon-reload
|
||||
# sudo systemctl restart docker
|
||||
|
||||
# sudo mkdir -p /etc/systemd/system/docker.service.d
|
||||
|
||||
# # Restart docker.
|
||||
# sudo systemctl daemon-reload
|
||||
# sudo systemctl restart docker
|
||||
|
||||
# # run docker commands as vagrant user (sudo not required)
|
||||
# run docker commands as vagrant user (sudo not required)
|
||||
# sudo usermod -aG docker vagrant
|
||||
|
||||
# # Install CRI containerd
|
||||
@@ -164,6 +150,16 @@ $configureMaster = <<-SCRIPT
|
||||
echo "################################################################"
|
||||
kubectl apply -f https://raw.githubusercontent.com/jprdonnelly/kubernetes-cluster/master/base/metrics-server.yaml
|
||||
|
||||
# echo "################################################################"
|
||||
# echo "Deploying Helm and Init Tiller"
|
||||
# echo "################################################################"
|
||||
# curl -o /home/vagrant/get_helm.sh -LO https://git.io/get_helm.sh
|
||||
# chmod +x /home/vagrant/get_helm.sh
|
||||
# /home/vagrant/get_helm.sh --version v2.14.3
|
||||
# sleep 5
|
||||
# /usr/local/bin/helm init --service-account tiller --wait
|
||||
# /usr/local/bin/helm version
|
||||
|
||||
# required for setting up password less ssh between guest VMs
|
||||
sudo sed -i "/^[^#]*PasswordAuthentication[[:space:]]no/c\PasswordAuthentication yes" /etc/ssh/sshd_config
|
||||
sudo service sshd restart
|
||||
|
||||
@@ -161,7 +161,7 @@ replicaSet:
|
||||
## Number of replicas per each node type
|
||||
##
|
||||
replicas:
|
||||
secondary: 2
|
||||
secondary: 1
|
||||
arbiter: 1
|
||||
|
||||
## Pod Disruption Budget
|
||||
|
||||
15
gke/nfs-helm-pvc.yaml
Normal file
15
gke/nfs-helm-pvc.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: nfs-provisioner-vol
|
||||
spec:
|
||||
capacity:
|
||||
storage: 100Gi
|
||||
# volumeMode field requires BlockVolume Alpha feature gate to be enabled.
|
||||
volumeMode: Filesystem
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
storageClassName: standard
|
||||
local:
|
||||
path: /storage/dynamic
|
||||
76
gke/nfs-helm-values.yaml
Normal file
76
gke/nfs-helm-values.yaml
Normal file
@@ -0,0 +1,76 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: quay.io/kubernetes_incubator/nfs-provisioner
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
|
||||
nfsPort: 2049
|
||||
mountdPort: 20048
|
||||
rpcbindPort: 51413
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
|
||||
## Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
storageClass: "standard"
|
||||
|
||||
accessMode: ReadWriteOnce
|
||||
size: 100Gi
|
||||
|
||||
## For creating the StorageClass automatically:
|
||||
storageClass:
|
||||
create: true
|
||||
|
||||
## Set a provisioner name. If unset, a name will be generated.
|
||||
provisionerName: "provisioner.local/nfs"
|
||||
|
||||
## Set StorageClass as the default StorageClass
|
||||
## Ignored if storageClass.create is false
|
||||
defaultClass: false
|
||||
|
||||
## Set a StorageClass name
|
||||
## Ignored if storageClass.create is false
|
||||
name: nfs-dynamic
|
||||
|
||||
# set to null to prevent expansion
|
||||
allowVolumeExpansion: true
|
||||
## StorageClass parameters
|
||||
parameters: {}
|
||||
|
||||
mountOptions:
|
||||
- vers=4.1
|
||||
- noatime
|
||||
|
||||
## ReclaimPolicy field of the class, which can be either Delete or Retain
|
||||
reclaimPolicy: Delete
|
||||
|
||||
## For RBAC support:
|
||||
rbac:
|
||||
create: true
|
||||
|
||||
## Ignored if rbac.create is true
|
||||
##
|
||||
serviceAccountName: default
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
affinity: {}
|
||||
@@ -15,10 +15,10 @@ identity-providers:
|
||||
clientSecret: "<CLIENT_SEECRET>"
|
||||
realm: "Auth0"
|
||||
hostname: "<INGRESS_IP/HOSTNAME>"
|
||||
claimsMapping:
|
||||
client_id: [ "client_id", "azp" ]
|
||||
groups: "/https:~1~1qlik.com~1groups"
|
||||
sub: ["/https:~1~1qlik.com~1sub", "sub"]
|
||||
useClaimsFromIdToken: true
|
||||
# claimsMapping:
|
||||
# client_id: "user_id"
|
||||
# name: "name"
|
||||
|
||||
# We use a global persistence statement for maximum compatibility with future releases. This will create multiple PVCs.
|
||||
global:
|
||||
|
||||
Reference in New Issue
Block a user