Merge pull request #120 from marcel-dempers/vault

vault-2022
This commit is contained in:
Marcel Dempers 2022-01-26 12:17:19 +11:00 committed by GitHub
commit cfb70bac24
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 1802 additions and 1 deletions

View File

@ -0,0 +1,11 @@
global:
datacenter: vault-kubernetes-guide
client:
enabled: true
server:
replicas: 1
bootstrapExpect: 1
disruptionBudget:
maxUnavailable: 0

View File

@ -0,0 +1,39 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: basic-secret
labels:
app: basic-secret
spec:
selector:
matchLabels:
app: basic-secret
replicas: 1
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/tls-skip-verify: "true"
vault.hashicorp.com/agent-inject-secret-helloworld: "secret/basic-secret/helloworld"
vault.hashicorp.com/agent-inject-template-helloworld: |
{{- with secret "secret/basic-secret/helloworld" -}}
{
"username" : "{{ .Data.username }}",
"password" : "{{ .Data.password }}"
}
{{- end }}
vault.hashicorp.com/role: "basic-secret-role"
labels:
app: basic-secret
spec:
serviceAccountName: basic-secret
containers:
- name: app
image: jweissig/app:0.0.1
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: basic-secret
labels:
app: basic-secret

View File

@ -0,0 +1,52 @@
# Basic Secret Injection
In order for us to start using secrets in vault, we need to setup a policy.
```
#Create a role for our app
kubectl -n vault exec -it vault-0 -- sh
vault write auth/kubernetes/role/basic-secret-role \
bound_service_account_names=basic-secret \
bound_service_account_namespaces=example-app \
policies=basic-secret-policy \
ttl=1h
```
The above maps our Kubernetes service account, used by our pod, to a policy.
Now lets create the policy to map our service account to a bunch of secrets
```
kubectl -n vault exec -it vault-0 -- sh
cat <<EOF > /home/vault/app-policy.hcl
path "secret/basic-secret/*" {
capabilities = ["read"]
}
EOF
vault policy write basic-secret-policy /home/vault/app-policy.hcl
```
Now our service account for our pod can access all secrets under `secret/basic-secret/*`
Lets create some secrets.
```
kubectl -n vault exec -it vault-0 -- sh
vault secrets enable -path=secret/ kv
vault kv put secret/basic-secret/helloworld username=dbuser password=sUp3rS3cUr3P@ssw0rd
```
Lets deploy our app and see if it works:
```
kubectl create ns example-app
kubectl -n example-app apply -f ./example-apps/basic-secret/deployment.yaml
kubectl -n example-app get pods
```
Once the pod is ready, the secret is injected into the pod at the following location:
```
kubectl -n example-app exec <pod-name> -- sh -c "cat /vault/secrets/helloworld"
```

View File

@ -0,0 +1,7 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
- role: worker

View File

@ -0,0 +1,626 @@
---
# Source: consul/templates/server-disruptionbudget.yaml
# PodDisruptionBudget to prevent degrading the server cluster through
# voluntary cluster changes.
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
spec:
maxUnavailable: 0
selector:
matchLabels:
app: consul
release: "consul"
component: server
---
# Source: consul/templates/client-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul-consul-client
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: client
---
# Source: consul/templates/server-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
---
# Source: consul/templates/client-config-configmap.yaml
# ConfigMap with extra configuration specified directly to the chart
# for client agents only.
apiVersion: v1
kind: ConfigMap
metadata:
name: consul-consul-client-config
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: client
data:
extra-from-values.json: |-
{}
central-config.json: |-
{
"enable_central_service_config": true
}
---
# Source: consul/templates/server-config-configmap.yaml
# StatefulSet to run the actual Consul server cluster.
apiVersion: v1
kind: ConfigMap
metadata:
name: consul-consul-server-config
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
data:
extra-from-values.json: |-
{}
central-config.json: |-
{
"enable_central_service_config": true
}
---
# Source: consul/templates/client-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul-consul-client
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: client
rules: []
---
# Source: consul/templates/server-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
rules: []
---
# Source: consul/templates/client-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul-consul-client
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: client
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul-consul-client
subjects:
- kind: ServiceAccount
name: consul-consul-client
---
# Source: consul/templates/server-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul-consul-server
subjects:
- kind: ServiceAccount
name: consul-consul-server
---
# Source: consul/templates/dns-service.yaml
# Service for Consul DNS.
apiVersion: v1
kind: Service
metadata:
name: consul-consul-dns
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: dns
spec:
type: ClusterIP
ports:
- name: dns-tcp
port: 53
protocol: "TCP"
targetPort: dns-tcp
- name: dns-udp
port: 53
protocol: "UDP"
targetPort: dns-udp
selector:
app: consul
release: "consul"
hasDNS: "true"
---
# Source: consul/templates/server-service.yaml
# Headless service for Consul server DNS entries. This service should only
# point to Consul servers. For access to an agent, one should assume that
# the agent is installed locally on the node and the NODE_IP should be used.
# If the node can't run a Consul agent, then this service can be used to
# communicate directly to a server agent.
apiVersion: v1
kind: Service
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
annotations:
# This must be set in addition to publishNotReadyAddresses due
# to an open issue where it may not work:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
clusterIP: None
# We want the servers to become available even if they're not ready
# since this DNS is also used for join operations.
publishNotReadyAddresses: true
ports:
- name: http
port: 8500
targetPort: 8500
- name: serflan-tcp
protocol: "TCP"
port: 8301
targetPort: 8301
- name: serflan-udp
protocol: "UDP"
port: 8301
targetPort: 8301
- name: serfwan-tcp
protocol: "TCP"
port: 8302
targetPort: 8302
- name: serfwan-udp
protocol: "UDP"
port: 8302
targetPort: 8302
- name: server
port: 8300
targetPort: 8300
- name: dns-tcp
protocol: "TCP"
port: 8600
targetPort: dns-tcp
- name: dns-udp
protocol: "UDP"
port: 8600
targetPort: dns-udp
selector:
app: consul
release: "consul"
component: server
---
# Source: consul/templates/ui-service.yaml
# UI Service for Consul Server
apiVersion: v1
kind: Service
metadata:
name: consul-consul-ui
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: ui
spec:
selector:
app: consul
release: "consul"
component: server
ports:
- name: http
port: 80
targetPort: 8500
---
# Source: consul/templates/client-daemonset.yaml
# DaemonSet to run the Consul clients on every node.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: consul-consul
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: client
spec:
selector:
matchLabels:
app: consul
chart: consul-helm
release: consul
component: client
hasDNS: "true"
template:
metadata:
labels:
app: consul
chart: consul-helm
release: consul
component: client
hasDNS: "true"
annotations:
"consul.hashicorp.com/connect-inject": "false"
"consul.hashicorp.com/config-checksum": 797b3593a73b78fc74f3b1e3b978107b3022d4649802185631f959f000234331
spec:
terminationGracePeriodSeconds: 10
serviceAccountName: consul-consul-client
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 100
volumes:
- name: data
emptyDir: {}
- name: config
configMap:
name: consul-consul-client-config
containers:
- name: consul
image: "hashicorp/consul:1.11.1"
env:
- name: ADVERTISE_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: CONSUL_DISABLE_PERM_MGMT
value: "true"
command:
- "/bin/sh"
- "-ec"
- |
CONSUL_FULLNAME="consul-consul"
mkdir -p /consul/extra-config
cp /consul/config/extra-from-values.json /consul/extra-config/extra-from-values.json
[ -n "${HOST_IP}" ] && sed -Ei "s|HOST_IP|${HOST_IP?}|g" /consul/extra-config/extra-from-values.json
[ -n "${POD_IP}" ] && sed -Ei "s|POD_IP|${POD_IP?}|g" /consul/extra-config/extra-from-values.json
[ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /consul/extra-config/extra-from-values.json
exec /usr/local/bin/docker-entrypoint.sh consul agent \
-node="${NODE}" \
-advertise="${ADVERTISE_IP}" \
-bind=0.0.0.0 \
-client=0.0.0.0 \
-node-meta=host-ip:${HOST_IP} \
-node-meta=pod-name:${HOSTNAME} \
-hcl='leave_on_terminate = true' \
-hcl='ports { grpc = 8502 }' \
-config-dir=/consul/config \
-datacenter=vault-kubernetes-guide \
-data-dir=/consul/data \
-retry-join="${CONSUL_FULLNAME}-server-0.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc:8301" \
-config-file=/consul/extra-config/extra-from-values.json \
-domain=consul
volumeMounts:
- name: data
mountPath: /consul/data
- name: config
mountPath: /consul/config
ports:
- containerPort: 8500
hostPort: 8500
name: http
- containerPort: 8502
hostPort: 8502
name: grpc
- containerPort: 8301
protocol: "TCP"
name: serflan-tcp
- containerPort: 8301
protocol: "UDP"
name: serflan-udp
- containerPort: 8600
name: dns-tcp
protocol: "TCP"
- containerPort: 8600
name: dns-udp
protocol: "UDP"
readinessProbe:
# NOTE(mitchellh): when our HTTP status endpoints support the
# proper status codes, we should switch to that. This is temporary.
exec:
command:
- "/bin/sh"
- "-ec"
- |
curl http://127.0.0.1:8500/v1/status/leader \
2>/dev/null | grep -E '".+"'
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
null
---
# Source: consul/templates/server-statefulset.yaml
# StatefulSet to run the actual Consul server cluster.
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
spec:
serviceName: consul-consul-server
podManagementPolicy: Parallel
replicas: 1
selector:
matchLabels:
app: consul
chart: consul-helm
release: consul
component: server
hasDNS: "true"
template:
metadata:
labels:
app: consul
chart: consul-helm
release: consul
component: server
hasDNS: "true"
annotations:
"consul.hashicorp.com/connect-inject": "false"
"consul.hashicorp.com/config-checksum": c9b100f895d5bda6a5c8bbebac73e1ab5bdc4cad06b04e72eb1b620677bfe41d
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
release: "consul"
component: server
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 30
serviceAccountName: consul-consul-server
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 100
volumes:
- name: config
configMap:
name: consul-consul-server-config
containers:
- name: consul
image: "hashicorp/consul:1.11.1"
env:
- name: ADVERTISE_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONSUL_DISABLE_PERM_MGMT
value: "true"
command:
- "/bin/sh"
- "-ec"
- |
CONSUL_FULLNAME="consul-consul"
mkdir -p /consul/extra-config
cp /consul/config/extra-from-values.json /consul/extra-config/extra-from-values.json
[ -n "${HOST_IP}" ] && sed -Ei "s|HOST_IP|${HOST_IP?}|g" /consul/extra-config/extra-from-values.json
[ -n "${POD_IP}" ] && sed -Ei "s|POD_IP|${POD_IP?}|g" /consul/extra-config/extra-from-values.json
[ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /consul/extra-config/extra-from-values.json
exec /usr/local/bin/docker-entrypoint.sh consul agent \
-advertise="${ADVERTISE_IP}" \
-bind=0.0.0.0 \
-bootstrap-expect=1 \
-client=0.0.0.0 \
-config-dir=/consul/config \
-datacenter=vault-kubernetes-guide \
-data-dir=/consul/data \
-domain=consul \
-hcl="connect { enabled = true }" \
-ui \
-retry-join="${CONSUL_FULLNAME}-server-0.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc:8301" \
-serf-lan-port=8301 \
-config-file=/consul/extra-config/extra-from-values.json \
-server
volumeMounts:
- name: data-vault
mountPath: /consul/data
- name: config
mountPath: /consul/config
ports:
- name: http
containerPort: 8500
- name: serflan-tcp
containerPort: 8301
protocol: "TCP"
- name: serflan-udp
containerPort: 8301
protocol: "UDP"
- name: serfwan-tcp
containerPort: 8302
protocol: "TCP"
- name: serfwan-udp
containerPort: 8302
protocol: "UDP"
- name: server
containerPort: 8300
- name: dns-tcp
containerPort: 8600
protocol: "TCP"
- name: dns-udp
containerPort: 8600
protocol: "UDP"
readinessProbe:
# NOTE(mitchellh): when our HTTP status endpoints support the
# proper status codes, we should switch to that. This is temporary.
exec:
command:
- "/bin/sh"
- "-ec"
- |
curl http://127.0.0.1:8500/v1/status/leader \
2>/dev/null | grep -E '".+"'
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 5
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
null
volumeClaimTemplates:
- metadata:
name: data-vault
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
# Source: consul/templates/tests/test-runner.yaml
apiVersion: v1
kind: Pod
metadata:
name: "consul-consul-test"
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: consul-test
image: "hashicorp/consul:1.11.1"
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: CONSUL_HTTP_ADDR
value: http://$(HOST_IP):8500
command:
- "/bin/sh"
- "-ec"
- |
consul members | tee members.txt
if [ $(grep -c consul-server members.txt) != $(grep consul-server members.txt | grep -c alive) ]
then
echo "Failed because not all consul servers are available"
exit 1
fi
restartPolicy: Never

View File

@ -0,0 +1,710 @@
---
# Source: vault/templates/server-disruptionbudget.yaml
# PodDisruptionBudget to prevent degrading the server cluster through
# voluntary cluster changes.
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: vault
namespace: vault
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
component: server
---
# Source: vault/templates/injector-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault-agent-injector
namespace: vault
labels:
app.kubernetes.io/name: vault-agent-injector
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
---
# Source: vault/templates/server-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault
namespace: vault
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
---
# Source: vault/templates/server-config-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: vault-config
namespace: vault
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
data:
extraconfig-from-values.hcl: |-
disable_mlock = true
ui = true
listener "tcp" {
tls_disable = 0
address = "0.0.0.0:8200"
tls_cert_file = "/vault/userconfig/tls-server/tls.crt"
tls_key_file = "/vault/userconfig/tls-server/tls.key"
tls_min_version = "tls12"
}
storage "consul" {
path = "vault"
address = "consul-consul-server:8500"
}
---
# Source: vault/templates/injector-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: vault-agent-injector-clusterrole
labels:
app.kubernetes.io/name: vault-agent-injector
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["mutatingwebhookconfigurations"]
verbs:
- "get"
- "list"
- "watch"
- "patch"
---
# Source: vault/templates/injector-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: vault-agent-injector-binding
labels:
app.kubernetes.io/name: vault-agent-injector
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: vault-agent-injector-clusterrole
subjects:
- kind: ServiceAccount
name: vault-agent-injector
namespace: vault
---
# Source: vault/templates/server-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: vault-server-binding
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: vault
namespace: vault
---
# Source: vault/templates/server-discovery-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: vault
name: vault-discovery-role
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "update", "patch"]
---
# Source: vault/templates/server-discovery-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: vault-discovery-rolebinding
namespace: vault
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: vault-discovery-role
subjects:
- kind: ServiceAccount
name: vault
namespace: vault
---
# Source: vault/templates/injector-service.yaml
apiVersion: v1
kind: Service
metadata:
name: vault-agent-injector-svc
namespace: vault
labels:
app.kubernetes.io/name: vault-agent-injector
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
spec:
ports:
- name: https
port: 443
targetPort: 8080
selector:
app.kubernetes.io/name: vault-agent-injector
app.kubernetes.io/instance: vault
component: webhook
---
# Source: vault/templates/server-ha-active-service.yaml
# Service for active Vault pod
apiVersion: v1
kind: Service
metadata:
name: vault-active
namespace: vault
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
annotations:
spec:
publishNotReadyAddresses: true
ports:
- name: https
port: 8200
targetPort: 8200
- name: https-internal
port: 8201
targetPort: 8201
selector:
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
component: server
vault-active: "true"
---
# Source: vault/templates/server-ha-standby-service.yaml
# Service for standby Vault pod
apiVersion: v1
kind: Service
metadata:
name: vault-standby
namespace: vault
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
annotations:
spec:
publishNotReadyAddresses: true
ports:
- name: https
port: 8200
targetPort: 8200
- name: https-internal
port: 8201
targetPort: 8201
selector:
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
component: server
vault-active: "false"
---
# Source: vault/templates/server-headless-service.yaml
# Service for Vault cluster
apiVersion: v1
kind: Service
metadata:
name: vault-internal
namespace: vault
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
annotations:
spec:
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: "https"
port: 8200
targetPort: 8200
- name: https-internal
port: 8201
targetPort: 8201
selector:
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
component: server
---
# Source: vault/templates/server-service.yaml
# Service for Vault cluster
apiVersion: v1
kind: Service
metadata:
name: vault
namespace: vault
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
annotations:
spec:
# We want the servers to become available even if they're not ready
# since this DNS is also used for join operations.
publishNotReadyAddresses: true
ports:
- name: https
port: 8200
targetPort: 8200
- name: https-internal
port: 8201
targetPort: 8201
selector:
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
component: server
---
# Source: vault/templates/ui-service.yaml
apiVersion: v1
kind: Service
metadata:
name: vault-ui
namespace: vault
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault-ui
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
spec:
selector:
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
component: server
publishNotReadyAddresses: true
ports:
- name: https
port: 8200
targetPort: 8200
type: ClusterIP
---
# Source: vault/templates/injector-deployment.yaml
# Deployment for the injector
apiVersion: apps/v1
kind: Deployment
metadata:
name: vault-agent-injector
namespace: vault
labels:
app.kubernetes.io/name: vault-agent-injector
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
component: webhook
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: vault-agent-injector
app.kubernetes.io/instance: vault
component: webhook
template:
metadata:
labels:
app.kubernetes.io/name: vault-agent-injector
app.kubernetes.io/instance: vault
component: webhook
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: vault-agent-injector
app.kubernetes.io/instance: "vault"
component: webhook
topologyKey: kubernetes.io/hostname
serviceAccountName: "vault-agent-injector"
hostNetwork: false
securityContext:
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 100
containers:
- name: sidecar-injector
resources:
limits:
cpu: 250m
memory: 256Mi
requests:
cpu: 50m
memory: 50Mi
image: "hashicorp/vault-k8s:0.14.1"
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
env:
- name: AGENT_INJECT_LISTEN
value: :8080
- name: AGENT_INJECT_LOG_LEVEL
value: info
- name: AGENT_INJECT_VAULT_ADDR
value: https://vault.vault.svc:8200
- name: AGENT_INJECT_VAULT_AUTH_PATH
value: auth/kubernetes
- name: AGENT_INJECT_VAULT_IMAGE
value: "hashicorp/vault:1.9.2"
- name: AGENT_INJECT_TLS_AUTO
value: vault-agent-injector-cfg
- name: AGENT_INJECT_TLS_AUTO_HOSTS
value: vault-agent-injector-svc,vault-agent-injector-svc.vault,vault-agent-injector-svc.vault.svc
- name: AGENT_INJECT_LOG_FORMAT
value: standard
- name: AGENT_INJECT_REVOKE_ON_SHUTDOWN
value: "false"
- name: AGENT_INJECT_CPU_REQUEST
value: "250m"
- name: AGENT_INJECT_CPU_LIMIT
value: "500m"
- name: AGENT_INJECT_MEM_REQUEST
value: "64Mi"
- name: AGENT_INJECT_MEM_LIMIT
value: "128Mi"
- name: AGENT_INJECT_DEFAULT_TEMPLATE
value: "map"
- name: AGENT_INJECT_TEMPLATE_CONFIG_EXIT_ON_RETRY_FAILURE
value: "true"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
args:
- agent-inject
- 2>&1
livenessProbe:
httpGet:
path: /health/ready
port: 8080
scheme: HTTPS
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /health/ready
port: 8080
scheme: HTTPS
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 5
---
# Source: vault/templates/server-statefulset.yaml
# StatefulSet to run the actual vault server cluster.
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: vault
namespace: vault
labels:
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
spec:
serviceName: vault-internal
podManagementPolicy: Parallel
replicas: 3
updateStrategy:
type: OnDelete
selector:
matchLabels:
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
component: server
template:
metadata:
labels:
helm.sh/chart: vault-0.19.0
app.kubernetes.io/name: vault
app.kubernetes.io/instance: vault
component: server
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: vault
app.kubernetes.io/instance: "vault"
component: server
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
serviceAccountName: vault
securityContext:
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 100
fsGroup: 1000
volumes:
- name: config
configMap:
name: vault-config
- name: userconfig-tls-server
secret:
secretName: tls-server
defaultMode: 420
- name: userconfig-tls-ca
secret:
secretName: tls-ca
defaultMode: 420
- name: home
emptyDir: {}
containers:
- name: vault
resources:
limits:
cpu: 2000m
memory: 16Gi
requests:
cpu: 500m
memory: 50Mi
image: hashicorp/vault:1.9.2
imagePullPolicy: IfNotPresent
command:
- "/bin/sh"
- "-ec"
args:
- |
cp /vault/config/extraconfig-from-values.hcl /tmp/storageconfig.hcl;
[ -n "${HOST_IP}" ] && sed -Ei "s|HOST_IP|${HOST_IP?}|g" /tmp/storageconfig.hcl;
[ -n "${POD_IP}" ] && sed -Ei "s|POD_IP|${POD_IP?}|g" /tmp/storageconfig.hcl;
[ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /tmp/storageconfig.hcl;
[ -n "${API_ADDR}" ] && sed -Ei "s|API_ADDR|${API_ADDR?}|g" /tmp/storageconfig.hcl;
[ -n "${TRANSIT_ADDR}" ] && sed -Ei "s|TRANSIT_ADDR|${TRANSIT_ADDR?}|g" /tmp/storageconfig.hcl;
[ -n "${RAFT_ADDR}" ] && sed -Ei "s|RAFT_ADDR|${RAFT_ADDR?}|g" /tmp/storageconfig.hcl;
/usr/local/bin/docker-entrypoint.sh vault server -config=/tmp/storageconfig.hcl
securityContext:
allowPrivilegeEscalation: false
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: VAULT_K8S_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: VAULT_K8S_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: VAULT_ADDR
value: "https://127.0.0.1:8200"
- name: VAULT_API_ADDR
value: "https://$(POD_IP):8200"
- name: SKIP_CHOWN
value: "true"
- name: SKIP_SETCAP
value: "true"
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: VAULT_CLUSTER_ADDR
value: "https://$(HOSTNAME).vault-internal:8201"
- name: HOME
value: "/home/vault"
- name: "VAULT_CACERT"
value: "/vault/userconfig/tls-ca/tls.crt"
volumeMounts:
- name: config
mountPath: /vault/config
- name: userconfig-tls-server
readOnly: true
mountPath: /vault/userconfig/tls-server
- name: userconfig-tls-ca
readOnly: true
mountPath: /vault/userconfig/tls-ca
- name: home
mountPath: /home/vault
ports:
- containerPort: 8200
name: https
- containerPort: 8201
name: https-internal
- containerPort: 8202
name: https-rep
readinessProbe:
httpGet:
path: "/v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204"
port: 8200
scheme: HTTPS
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
livenessProbe:
httpGet:
path: "/v1/sys/health?standbyok=true"
port: 8200
scheme: HTTPS
failureThreshold: 2
initialDelaySeconds: 60
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
lifecycle:
# Vault container doesn't receive SIGTERM from Kubernetes
# and after the grace period ends, Kube sends SIGKILL. This
# causes issues with graceful shutdowns such as deregistering itself
# from Consul (zombie services).
preStop:
exec:
command: [
"/bin/sh", "-c",
# Adding a sleep here to give the pod eviction a
# chance to propagate, so requests will not be made
# to this pod while it's terminating
"sleep 5 && kill -SIGTERM $(pidof vault)",
]
volumeClaimTemplates:
---
# Source: vault/templates/injector-mutating-webhook.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: vault-agent-injector-cfg
labels:
app.kubernetes.io/name: vault-agent-injector
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
webhooks:
- name: vault.hashicorp.com
sideEffects: None
admissionReviewVersions:
- "v1beta1"
- "v1"
clientConfig:
service:
name: vault-agent-injector-svc
namespace: vault
path: "/mutate"
caBundle: ""
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: [""]
apiVersions: ["v1"]
resources: ["pods"]
failurePolicy: Ignore
---
# Source: vault/templates/tests/server-test.yaml
apiVersion: v1
kind: Pod
metadata:
name: "vault-server-test"
namespace: vault
annotations:
"helm.sh/hook": test
spec:
containers:
- name: vault-server-test
image: hashicorp/vault:1.9.2
imagePullPolicy: IfNotPresent
env:
- name: VAULT_ADDR
value: https://vault.vault.svc:8200
- name: "VAULT_CACERT"
value: "/vault/userconfig/tls-ca/tls.crt"
command:
- /bin/sh
- -c
- |
echo "Checking for sealed info in 'vault status' output"
ATTEMPTS=10
n=0
until [ "$n" -ge $ATTEMPTS ]
do
echo "Attempt" $n...
vault status -format yaml | grep -E '^sealed: (true|false)' && break
n=$((n+1))
sleep 5
done
if [ $n -ge $ATTEMPTS ]; then
echo "timed out looking for sealed info in 'vault status' output"
exit 1
fi
exit 0
volumeMounts:
volumes:
restartPolicy: Never

View File

@ -0,0 +1,200 @@
# Hashicorp Vault Guide
Requirements:
* Kubernetes 1.21
* Kind or Minikube
For this tutorial, I will be using Kubernetes 1.21.
If you are watching the old guide for Kuberentes 1.17, go [here](..\vault\readme.md)
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
```
cd hashicorp/vault-2022
kind create cluster --name vault --image kindest/node:v1.21.1 --config kind.yaml
```
Next up, I will be running a small container where I will be doing all the work from:
You can skip this part if you already have `kubectl` and `helm` on your machine.
```
docker run -it --rm --net host -v ${HOME}/.kube/:/root/.kube/ -v ${PWD}:/work -w /work alpine sh
```
Install `kubectl`
```
apk add --no-cache curl
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
chmod +x ./kubectl
mv ./kubectl /usr/local/bin/kubectl
```
Install `helm`
```
curl -LO https://get.helm.sh/helm-v3.7.2-linux-amd64.tar.gz
tar -C /tmp/ -zxvf helm-v3.7.2-linux-amd64.tar.gz
rm helm-v3.7.2-linux-amd64.tar.gz
mv /tmp/linux-amd64/helm /usr/local/bin/helm
chmod +x /usr/local/bin/helm
```
Now we have `helm` and `kubectl` and can access our `kind` cluster:
```
kubectl get nodes
NAME STATUS ROLES AGE VERSION
vault-control-plane Ready control-plane,master 37s v1.21.1
```
Let's add the Helm repositories, so we can access the Kubernetes manifests
```
helm repo add hashicorp https://helm.releases.hashicorp.com
```
## Storage: Consul
We will use a very basic Consul cluster for our Vault backend. </br>
Let's find what versions of Consul are available:
```
helm search repo hashicorp/consul --versions
```
We can use chart `0.39.0` which is the latest at the time of this demo
Let's create a manifests folder and grab the YAML:
```
mkdir manifests
helm template consul hashicorp/consul \
--namespace vault \
--version 0.39.0 \
-f consul-values.yaml \
> ./manifests/consul.yaml
```
Deploy the consul services:
```
kubectl create ns vault
kubectl -n vault apply -f ./manifests/consul.yaml
kubectl -n vault get pods
```
## TLS End to End Encryption
See steps in [./tls/ssl_generate_self_signed.md](./tls/ssl_generate_self_signed.md)
You'll need to generate TLS certs (or bring your own)
Remember not to check-in your TLS to GIT :)
Create the TLS secret
```
kubectl -n vault create secret tls tls-ca \
--cert ./tls/ca.pem \
--key ./tls/ca-key.pem
kubectl -n vault create secret tls tls-server \
--cert ./tls/vault.pem \
--key ./tls/vault-key.pem
```
## Generate Kubernetes Manifests
Let's find what versions of vault are available:
```
helm search repo hashicorp/vault --versions
```
In this demo I will use the `0.19.0` chart </br>
Let's firstly create a `values` file to customize vault.
Let's grab the manifests:
```
helm template vault hashicorp/vault \
--namespace vault \
--version 0.19.0 \
-f vault-values.yaml \
> ./manifests/vault.yaml
```
## Deployment
```
kubectl -n vault apply -f ./manifests/vault.yaml
kubectl -n vault get pods
```
## Initialising Vault
```
kubectl -n vault exec -it vault-0 -- sh
kubectl -n vault exec -it vault-1 -- sh
kubectl -n vault exec -it vault-2 -- sh
vault operator init
vault operator unseal
kubectl -n vault exec -it vault-0 -- vault status
kubectl -n vault exec -it vault-1 -- vault status
kubectl -n vault exec -it vault-2 -- vault status
```
## Web UI
Let's checkout the web UI:
```
kubectl -n vault get svc
kubectl -n vault port-forward svc/vault-ui 443:8200
```
Now we can access the web UI [here]("https://localhost/")
## Enable Kubernetes Authentication
For the injector to be authorised to access vault, we need to enable K8s auth
```
kubectl -n vault exec -it vault-0 -- sh
vault login
vault auth enable kubernetes
vault write auth/kubernetes/config \
token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \
kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
exit
```
# Summary
So we have a vault, an injector, TLS end to end, stateful storage.
The injector can now inject secrets for pods from the vault.
Now we are ready to use the platform for different types of secrets:
## Secret Injection Guides
### Basic Secrets
Objective:
----------
* Let's create a basic secret in vault manually
* Application consumes the secret automatically
[Try it](./example-apps/basic-secret/readme.md)

View File

@ -0,0 +1,13 @@
{
"signing": {
"default": {
"expiry": "175200h"
},
"profiles": {
"default": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "175200h"
}
}
}
}

View File

@ -0,0 +1,18 @@
{
"hosts": [
"cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "AU",
"L": "Melbourne",
"O": "Example",
"OU": "CA",
"ST": "Example"
}
]
}

View File

@ -0,0 +1,40 @@
# Use CFSSL to generate certificates
More about [CFSSL here]("https://github.com/cloudflare/cfssl")
```
cd hashicorp\vault-2022\tls
docker run -it --rm -v ${PWD}:/work -w /work debian bash
apt-get update && apt-get install -y curl &&
curl -L https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64 -o /usr/local/bin/cfssl && \
curl -L https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64 -o /usr/local/bin/cfssljson && \
chmod +x /usr/local/bin/cfssl && \
chmod +x /usr/local/bin/cfssljson
#generate ca in /tmp
cfssl gencert -initca ca-csr.json | cfssljson -bare /tmp/ca
#generate certificate in /tmp
cfssl gencert \
-ca=/tmp/ca.pem \
-ca-key=/tmp/ca-key.pem \
-config=ca-config.json \
-hostname="vault,vault.vault.svc.cluster.local,vault.vault.svc,localhost,127.0.0.1" \
-profile=default \
ca-csr.json | cfssljson -bare /tmp/vault
```
view the files:
```
ls -l /tmp
```
access the files:
```
mv /tmp/* .
```

View File

@ -0,0 +1,85 @@
# Vault Helm Chart Value Overrides
global:
enabled: true
tlsDisable: false
injector:
enabled: true
# Use the Vault K8s Image https://github.com/hashicorp/vault-k8s/
image:
repository: "hashicorp/vault-k8s"
tag: "0.14.1"
resources:
requests:
memory: 50Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 250m
server:
image:
repository: "hashicorp/vault"
tag: "1.9.2"
# These Resource Limits are in line with node requirements in the
# Vault Reference Architecture for a Small Cluster
resources:
requests:
memory: 50Mi
cpu: 500m
limits:
memory: 16Gi
cpu: 2000m
# For HA configuration and because we need to manually init the vault,
# we need to define custom readiness/liveness Probe settings
readinessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204"
livenessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true"
initialDelaySeconds: 60
# extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be
# used to include variables required for auto-unseal.
extraEnvironmentVars:
VAULT_CACERT: /vault/userconfig/tls-ca/tls.crt
# extraVolumes is a list of extra volumes to mount. These will be exposed
# to Vault in the path `/vault/userconfig/<name>/`.
extraVolumes:
- type: secret
name: tls-server
- type: secret
name: tls-ca
standalone:
enabled: false
# Run Vault in "HA" mode.
ha:
enabled: true
replicas: 3
config: |
ui = true
listener "tcp" {
tls_disable = 0
address = "0.0.0.0:8200"
tls_cert_file = "/vault/userconfig/tls-server/tls.crt"
tls_key_file = "/vault/userconfig/tls-server/tls.key"
tls_min_version = "tls12"
}
storage "consul" {
path = "vault"
address = "consul-consul-server:8500"
}
# Vault UI
ui:
enabled: true
externalPort: 8200

View File

@ -1,4 +1,4 @@
# Hashicorp Vault Guide
# Hashicorp Vault Guide - Deprecated
# Vault