diff --git a/hashicorp/vault-deprecated/example-apps/basic-secret/deployment.yaml b/hashicorp/vault-deprecated/example-apps/basic-secret/deployment.yaml new file mode 100644 index 0000000..6afa3d8 --- /dev/null +++ b/hashicorp/vault-deprecated/example-apps/basic-secret/deployment.yaml @@ -0,0 +1,39 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: basic-secret + labels: + app: basic-secret +spec: + selector: + matchLabels: + app: basic-secret + replicas: 1 + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/tls-skip-verify: "true" + vault.hashicorp.com/agent-inject-secret-helloworld: "secret/basic-secret/helloworld" + vault.hashicorp.com/agent-inject-template-helloworld: | + {{- with secret "secret/basic-secret/helloworld" -}} + { + "username" : "{{ .Data.username }}", + "password" : "{{ .Data.password }}" + } + {{- end }} + vault.hashicorp.com/role: "basic-secret-role" + labels: + app: basic-secret + spec: + serviceAccountName: basic-secret + containers: + - name: app + image: jweissig/app:0.0.1 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: basic-secret + labels: + app: basic-secret \ No newline at end of file diff --git a/hashicorp/vault-deprecated/example-apps/basic-secret/readme.md b/hashicorp/vault-deprecated/example-apps/basic-secret/readme.md new file mode 100644 index 0000000..09bae17 --- /dev/null +++ b/hashicorp/vault-deprecated/example-apps/basic-secret/readme.md @@ -0,0 +1,50 @@ +# Basic Secret Injection + + +In order for us to start using secrets in vault, we need to setup a policy. + + +``` +#Create a role for our app + +kubectl -n vault-example exec -it vault-example-0 sh + +vault write auth/kubernetes/role/basic-secret-role \ + bound_service_account_names=basic-secret \ + bound_service_account_namespaces=vault-example \ + policies=basic-secret-policy \ + ttl=1h +``` + +The above maps our Kubernetes service account, used by our pod, to a policy. +Now lets create the policy to map our service account to a bunch of secrets + + +``` +kubectl -n vault-example exec -it vault-example-0 sh +cat < /home/vault/app-policy.hcl +path "secret/basic-secret/*" { + capabilities = ["read"] +} +EOF +vault policy write basic-secret-policy /home/vault/app-policy.hcl +exit +``` + +Now our service account for our pod can access all secrets under `secret/basic-secret/*` +Lets create some secrets. + + +``` +kubectl -n vault-example exec -it vault-example-0 sh +vault secrets enable -path=secret/ kv +vault kv put secret/basic-secret/helloworld username=dbuser password=sUp3rS3cUr3P@ssw0rd +exit +``` + +Lets deploy our app and see if it works: + +``` +kubectl -n vault-example apply -f ./hashicorp/vault/example-apps/basic-secret/deployment.yaml +kubectl -n vault-example get pods +``` \ No newline at end of file diff --git a/hashicorp/vault/example-apps/dynamic-postgresql/deployment.yaml b/hashicorp/vault-deprecated/example-apps/dynamic-postgresql/deployment.yaml similarity index 100% rename from hashicorp/vault/example-apps/dynamic-postgresql/deployment.yaml rename to hashicorp/vault-deprecated/example-apps/dynamic-postgresql/deployment.yaml diff --git a/hashicorp/vault/example-apps/dynamic-postgresql/pgadmin.yaml b/hashicorp/vault-deprecated/example-apps/dynamic-postgresql/pgadmin.yaml similarity index 100% rename from hashicorp/vault/example-apps/dynamic-postgresql/pgadmin.yaml rename to hashicorp/vault-deprecated/example-apps/dynamic-postgresql/pgadmin.yaml diff --git a/hashicorp/vault/example-apps/dynamic-postgresql/postgres.yaml b/hashicorp/vault-deprecated/example-apps/dynamic-postgresql/postgres.yaml similarity index 100% rename from hashicorp/vault/example-apps/dynamic-postgresql/postgres.yaml rename to hashicorp/vault-deprecated/example-apps/dynamic-postgresql/postgres.yaml diff --git a/hashicorp/vault/example-apps/dynamic-postgresql/readme.md b/hashicorp/vault-deprecated/example-apps/dynamic-postgresql/readme.md similarity index 100% rename from hashicorp/vault/example-apps/dynamic-postgresql/readme.md rename to hashicorp/vault-deprecated/example-apps/dynamic-postgresql/readme.md diff --git a/hashicorp/vault/injector/injector-clusterrole.yaml b/hashicorp/vault-deprecated/injector/injector-clusterrole.yaml similarity index 100% rename from hashicorp/vault/injector/injector-clusterrole.yaml rename to hashicorp/vault-deprecated/injector/injector-clusterrole.yaml diff --git a/hashicorp/vault/injector/injector-clusterrolebinding.yaml b/hashicorp/vault-deprecated/injector/injector-clusterrolebinding.yaml similarity index 100% rename from hashicorp/vault/injector/injector-clusterrolebinding.yaml rename to hashicorp/vault-deprecated/injector/injector-clusterrolebinding.yaml diff --git a/hashicorp/vault/injector/injector-deployment.yaml b/hashicorp/vault-deprecated/injector/injector-deployment.yaml similarity index 100% rename from hashicorp/vault/injector/injector-deployment.yaml rename to hashicorp/vault-deprecated/injector/injector-deployment.yaml diff --git a/hashicorp/vault/injector/injector-mutating-webhook.yaml b/hashicorp/vault-deprecated/injector/injector-mutating-webhook.yaml similarity index 100% rename from hashicorp/vault/injector/injector-mutating-webhook.yaml rename to hashicorp/vault-deprecated/injector/injector-mutating-webhook.yaml diff --git a/hashicorp/vault/injector/injector-service.yaml b/hashicorp/vault-deprecated/injector/injector-service.yaml similarity index 100% rename from hashicorp/vault/injector/injector-service.yaml rename to hashicorp/vault-deprecated/injector/injector-service.yaml diff --git a/hashicorp/vault/injector/injector-serviceaccount.yaml b/hashicorp/vault-deprecated/injector/injector-serviceaccount.yaml similarity index 100% rename from hashicorp/vault/injector/injector-serviceaccount.yaml rename to hashicorp/vault-deprecated/injector/injector-serviceaccount.yaml diff --git a/hashicorp/vault-deprecated/readme.md b/hashicorp/vault-deprecated/readme.md new file mode 100644 index 0000000..8f80a31 --- /dev/null +++ b/hashicorp/vault-deprecated/readme.md @@ -0,0 +1,112 @@ +# Hashicorp Vault Guide - Deprecated + +# Vault + +For this tutorial, I use Kuberentes 1.17 +It's critical because we'll need certain [admission controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) enabled. + +To get 1.17 for Linux\Windows, just use `kind` since you can create a 1.17 with admissions all setup. + +``` +#Windows +kind create cluster --name vault --image kindest/node:v1.17.0@sha256:9512edae126da271b66b990b6fff768fbb7cd786c7d39e86bdf55906352fdf62 + +#Linux +kind create cluster --name vault --kubeconfig ~/.kube/kind-vault --image kindest/node:v1.17.0@sha256:9512edae126da271b66b990b6fff768fbb7cd786c7d39e86bdf55906352fdf62 +``` + +## TLS End to End Encryption + +VIDEO: `````` +See steps in [./tls/ssl_generate_self_signed.txt](./tls/ssl_generate_self_signed.txt) +You'll need to generate TLS certs (or bring your own) +Create base64 strings from the files, place it in the `server-tls-secret.yaml` and apply it. +Remember not to check-in your TLS to GIT :) + +## Deployment + +``` +kubectl create ns vault-example +kubectl -n vault-example apply -f ./hashicorp/vault/server/ +kubectl -n vault-example get pods +``` + +## Storage + +``` +kubectl -n vault-example get pvc +``` +ensure vault-claim is bound, if not, `kubectl -n vault-example describe pvc vault-claim` +ensure correct storage class is used for your cluster. +if you need to change the storage class, delete the pvc, edit YAML and re-apply + +## Initialising Vault + +``` +kubectl -n vault-example exec -it vault-example-0 vault operator init +# unseal 3 times +kubectl -n vault-example exec -it vault-example-0 vault operator unseal +kubectl -n vault-example get pods +``` + +## Deploy the Injector + +VIDEO: `````` +Injector allows pods to automatically get secrets from the vault. + +``` +kubectl -n vault-example apply -f ./hashicorp/vault/injector/ +kubectl -n vault-example get pods +``` + +## Injector Kubernetes Auth Policy + +For the injector to be authorised to access vault, we need to enable K8s auth + +``` +kubectl -n vault-example exec -it vault-example-0 vault login +kubectl -n vault-example exec -it vault-example-0 vault auth enable kubernetes + +kubectl -n vault-example exec -it vault-example-0 sh +vault write auth/kubernetes/config \ +token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ +kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ +kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt +exit + +kubectl -n vault-example get pods +``` + +# Summary + +So we have a vault, an injector, TLS end to end, stateful storage. +The injector can now inject secrets for pods from the vault. + +Now we are ready to use the platform for different types of secrets: + +## Secret Injection Guides + +I've broken this down into basic guides to avoid this document from becoming too large. + +### Basic Secrets + +Objective: +---------- +* Let's create a basic secret in vault manually +* Application consumes the secret automatically + +[Try it](./example-apps/basic-secret/readme.md) + +### Dynamic Secrets: Postgres + +Objective: +---------- +* We have a Postgres Database +* Let's delegate Vault to manage life cycles of our database credentials +* Deploy an app, that automatically gets it's credentials from vault + +[Try it](./example-apps/dynamic-postgresql/readme.md) + + + + diff --git a/hashicorp/vault/server/server-clusterrolebinding.yaml b/hashicorp/vault-deprecated/server/server-clusterrolebinding.yaml similarity index 100% rename from hashicorp/vault/server/server-clusterrolebinding.yaml rename to hashicorp/vault-deprecated/server/server-clusterrolebinding.yaml diff --git a/hashicorp/vault/server/server-config-configmap.yaml b/hashicorp/vault-deprecated/server/server-config-configmap.yaml similarity index 100% rename from hashicorp/vault/server/server-config-configmap.yaml rename to hashicorp/vault-deprecated/server/server-config-configmap.yaml diff --git a/hashicorp/vault/server/server-disruptionbudget.yaml b/hashicorp/vault-deprecated/server/server-disruptionbudget.yaml similarity index 100% rename from hashicorp/vault/server/server-disruptionbudget.yaml rename to hashicorp/vault-deprecated/server/server-disruptionbudget.yaml diff --git a/hashicorp/vault/server/server-ingress.yaml b/hashicorp/vault-deprecated/server/server-ingress.yaml similarity index 100% rename from hashicorp/vault/server/server-ingress.yaml rename to hashicorp/vault-deprecated/server/server-ingress.yaml diff --git a/hashicorp/vault/server/server-pv.yaml b/hashicorp/vault-deprecated/server/server-pv.yaml similarity index 100% rename from hashicorp/vault/server/server-pv.yaml rename to hashicorp/vault-deprecated/server/server-pv.yaml diff --git a/hashicorp/vault/server/server-pvc.yaml b/hashicorp/vault-deprecated/server/server-pvc.yaml similarity index 100% rename from hashicorp/vault/server/server-pvc.yaml rename to hashicorp/vault-deprecated/server/server-pvc.yaml diff --git a/hashicorp/vault/server/server-service.yaml b/hashicorp/vault-deprecated/server/server-service.yaml similarity index 100% rename from hashicorp/vault/server/server-service.yaml rename to hashicorp/vault-deprecated/server/server-service.yaml diff --git a/hashicorp/vault/server/server-serviceaccount.yaml b/hashicorp/vault-deprecated/server/server-serviceaccount.yaml similarity index 100% rename from hashicorp/vault/server/server-serviceaccount.yaml rename to hashicorp/vault-deprecated/server/server-serviceaccount.yaml diff --git a/hashicorp/vault/server/server-statefulset.yaml b/hashicorp/vault-deprecated/server/server-statefulset.yaml similarity index 100% rename from hashicorp/vault/server/server-statefulset.yaml rename to hashicorp/vault-deprecated/server/server-statefulset.yaml diff --git a/hashicorp/vault/server/server-tls-secret.yaml b/hashicorp/vault-deprecated/server/server-tls-secret.yaml similarity index 100% rename from hashicorp/vault/server/server-tls-secret.yaml rename to hashicorp/vault-deprecated/server/server-tls-secret.yaml diff --git a/hashicorp/vault/server/ui-service.yaml b/hashicorp/vault-deprecated/server/ui-service.yaml similarity index 100% rename from hashicorp/vault/server/ui-service.yaml rename to hashicorp/vault-deprecated/server/ui-service.yaml diff --git a/hashicorp/vault-deprecated/tls/ca-config.json b/hashicorp/vault-deprecated/tls/ca-config.json new file mode 100644 index 0000000..2d4b37a --- /dev/null +++ b/hashicorp/vault-deprecated/tls/ca-config.json @@ -0,0 +1,13 @@ +{ + "signing": { + "default": { + "expiry": "8760h" + }, + "profiles": { + "default": { + "usages": ["signing", "key encipherment", "server auth", "client auth"], + "expiry": "8760h" + } + } + } +} diff --git a/hashicorp/vault-deprecated/tls/ca-csr.json b/hashicorp/vault-deprecated/tls/ca-csr.json new file mode 100644 index 0000000..1c863ca --- /dev/null +++ b/hashicorp/vault-deprecated/tls/ca-csr.json @@ -0,0 +1,18 @@ +{ + "hosts": [ + "cluster.local" + ], + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "AU", + "L": "Melbourne", + "O": "Example", + "OU": "CA", + "ST": "Example" + } + ] +} diff --git a/hashicorp/vault/tls/ssl_generate_self_signed.txt b/hashicorp/vault-deprecated/tls/ssl_generate_self_signed.txt similarity index 100% rename from hashicorp/vault/tls/ssl_generate_self_signed.txt rename to hashicorp/vault-deprecated/tls/ssl_generate_self_signed.txt diff --git a/hashicorp/vault/tls/vault-csr.json b/hashicorp/vault-deprecated/tls/vault-csr.json similarity index 100% rename from hashicorp/vault/tls/vault-csr.json rename to hashicorp/vault-deprecated/tls/vault-csr.json diff --git a/hashicorp/vault/consul-values.yaml b/hashicorp/vault/consul-values.yaml new file mode 100644 index 0000000..87c6002 --- /dev/null +++ b/hashicorp/vault/consul-values.yaml @@ -0,0 +1,11 @@ +global: + datacenter: vault-kubernetes-guide + +client: + enabled: true + +server: + replicas: 1 + bootstrapExpect: 1 + disruptionBudget: + maxUnavailable: 0 \ No newline at end of file diff --git a/hashicorp/vault/example-apps/basic-secret/readme.md b/hashicorp/vault/example-apps/basic-secret/readme.md index 09bae17..8cebe21 100644 --- a/hashicorp/vault/example-apps/basic-secret/readme.md +++ b/hashicorp/vault/example-apps/basic-secret/readme.md @@ -1,17 +1,15 @@ # Basic Secret Injection - In order for us to start using secrets in vault, we need to setup a policy. - ``` #Create a role for our app -kubectl -n vault-example exec -it vault-example-0 sh +kubectl -n vault exec -it vault-0 -- sh vault write auth/kubernetes/role/basic-secret-role \ bound_service_account_names=basic-secret \ - bound_service_account_namespaces=vault-example \ + bound_service_account_namespaces=example-app \ policies=basic-secret-policy \ ttl=1h ``` @@ -19,32 +17,36 @@ vault write auth/kubernetes/role/basic-secret-role \ The above maps our Kubernetes service account, used by our pod, to a policy. Now lets create the policy to map our service account to a bunch of secrets - ``` -kubectl -n vault-example exec -it vault-example-0 sh +kubectl -n vault exec -it vault-0 -- sh + cat < /home/vault/app-policy.hcl path "secret/basic-secret/*" { capabilities = ["read"] } EOF vault policy write basic-secret-policy /home/vault/app-policy.hcl -exit ``` Now our service account for our pod can access all secrets under `secret/basic-secret/*` Lets create some secrets. - ``` -kubectl -n vault-example exec -it vault-example-0 sh +kubectl -n vault exec -it vault-0 -- sh vault secrets enable -path=secret/ kv vault kv put secret/basic-secret/helloworld username=dbuser password=sUp3rS3cUr3P@ssw0rd -exit ``` Lets deploy our app and see if it works: ``` -kubectl -n vault-example apply -f ./hashicorp/vault/example-apps/basic-secret/deployment.yaml -kubectl -n vault-example get pods +kubectl create ns example-app +kubectl -n example-app apply -f ./example-apps/basic-secret/deployment.yaml +kubectl -n example-app get pods +``` + +Once the pod is ready, the secret is injected into the pod at the following location: + +``` +kubectl -n example-app exec -- sh -c "cat /vault/secrets/helloworld" ``` \ No newline at end of file diff --git a/hashicorp/vault/kind.yaml b/hashicorp/vault/kind.yaml new file mode 100644 index 0000000..3328485 --- /dev/null +++ b/hashicorp/vault/kind.yaml @@ -0,0 +1,7 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane +- role: worker +- role: worker +- role: worker \ No newline at end of file diff --git a/hashicorp/vault/manifests/consul.yaml b/hashicorp/vault/manifests/consul.yaml new file mode 100644 index 0000000..f523f7a --- /dev/null +++ b/hashicorp/vault/manifests/consul.yaml @@ -0,0 +1,626 @@ +--- +# Source: consul/templates/server-disruptionbudget.yaml +# PodDisruptionBudget to prevent degrading the server cluster through +# voluntary cluster changes. +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: consul-consul-server + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: server +spec: + maxUnavailable: 0 + selector: + matchLabels: + app: consul + release: "consul" + component: server +--- +# Source: consul/templates/client-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-consul-client + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: client +--- +# Source: consul/templates/server-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-consul-server + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: server +--- +# Source: consul/templates/client-config-configmap.yaml +# ConfigMap with extra configuration specified directly to the chart +# for client agents only. +apiVersion: v1 +kind: ConfigMap +metadata: + name: consul-consul-client-config + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: client +data: + extra-from-values.json: |- + {} + + central-config.json: |- + { + "enable_central_service_config": true + } +--- +# Source: consul/templates/server-config-configmap.yaml +# StatefulSet to run the actual Consul server cluster. +apiVersion: v1 +kind: ConfigMap +metadata: + name: consul-consul-server-config + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: server +data: + extra-from-values.json: |- + {} + + central-config.json: |- + { + "enable_central_service_config": true + } +--- +# Source: consul/templates/client-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-consul-client + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: client +rules: [] +--- +# Source: consul/templates/server-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-consul-server + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: server +rules: [] +--- +# Source: consul/templates/client-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-consul-client + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: client +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-consul-client +subjects: + - kind: ServiceAccount + name: consul-consul-client +--- +# Source: consul/templates/server-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-consul-server + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-consul-server +subjects: + - kind: ServiceAccount + name: consul-consul-server +--- +# Source: consul/templates/dns-service.yaml +# Service for Consul DNS. +apiVersion: v1 +kind: Service +metadata: + name: consul-consul-dns + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: dns +spec: + type: ClusterIP + ports: + - name: dns-tcp + port: 53 + protocol: "TCP" + targetPort: dns-tcp + - name: dns-udp + port: 53 + protocol: "UDP" + targetPort: dns-udp + selector: + app: consul + release: "consul" + hasDNS: "true" +--- +# Source: consul/templates/server-service.yaml +# Headless service for Consul server DNS entries. This service should only +# point to Consul servers. For access to an agent, one should assume that +# the agent is installed locally on the node and the NODE_IP should be used. +# If the node can't run a Consul agent, then this service can be used to +# communicate directly to a server agent. +apiVersion: v1 +kind: Service +metadata: + name: consul-consul-server + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: server + annotations: + # This must be set in addition to publishNotReadyAddresses due + # to an open issue where it may not work: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None + # We want the servers to become available even if they're not ready + # since this DNS is also used for join operations. + publishNotReadyAddresses: true + ports: + - name: http + port: 8500 + targetPort: 8500 + - name: serflan-tcp + protocol: "TCP" + port: 8301 + targetPort: 8301 + - name: serflan-udp + protocol: "UDP" + port: 8301 + targetPort: 8301 + - name: serfwan-tcp + protocol: "TCP" + port: 8302 + targetPort: 8302 + - name: serfwan-udp + protocol: "UDP" + port: 8302 + targetPort: 8302 + - name: server + port: 8300 + targetPort: 8300 + - name: dns-tcp + protocol: "TCP" + port: 8600 + targetPort: dns-tcp + - name: dns-udp + protocol: "UDP" + port: 8600 + targetPort: dns-udp + selector: + app: consul + release: "consul" + component: server +--- +# Source: consul/templates/ui-service.yaml +# UI Service for Consul Server +apiVersion: v1 +kind: Service +metadata: + name: consul-consul-ui + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: ui +spec: + selector: + app: consul + release: "consul" + component: server + ports: + - name: http + port: 80 + targetPort: 8500 +--- +# Source: consul/templates/client-daemonset.yaml +# DaemonSet to run the Consul clients on every node. +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: consul-consul + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: client +spec: + selector: + matchLabels: + app: consul + chart: consul-helm + release: consul + component: client + hasDNS: "true" + template: + metadata: + labels: + app: consul + chart: consul-helm + release: consul + component: client + hasDNS: "true" + annotations: + "consul.hashicorp.com/connect-inject": "false" + "consul.hashicorp.com/config-checksum": 797b3593a73b78fc74f3b1e3b978107b3022d4649802185631f959f000234331 + spec: + terminationGracePeriodSeconds: 10 + serviceAccountName: consul-consul-client + securityContext: + fsGroup: 1000 + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 100 + + volumes: + - name: data + emptyDir: {} + - name: config + configMap: + name: consul-consul-client-config + containers: + - name: consul + image: "hashicorp/consul:1.11.1" + env: + - name: ADVERTISE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: CONSUL_DISABLE_PERM_MGMT + value: "true" + + command: + - "/bin/sh" + - "-ec" + - | + CONSUL_FULLNAME="consul-consul" + + mkdir -p /consul/extra-config + cp /consul/config/extra-from-values.json /consul/extra-config/extra-from-values.json + [ -n "${HOST_IP}" ] && sed -Ei "s|HOST_IP|${HOST_IP?}|g" /consul/extra-config/extra-from-values.json + [ -n "${POD_IP}" ] && sed -Ei "s|POD_IP|${POD_IP?}|g" /consul/extra-config/extra-from-values.json + [ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /consul/extra-config/extra-from-values.json + + exec /usr/local/bin/docker-entrypoint.sh consul agent \ + -node="${NODE}" \ + -advertise="${ADVERTISE_IP}" \ + -bind=0.0.0.0 \ + -client=0.0.0.0 \ + -node-meta=host-ip:${HOST_IP} \ + -node-meta=pod-name:${HOSTNAME} \ + -hcl='leave_on_terminate = true' \ + -hcl='ports { grpc = 8502 }' \ + -config-dir=/consul/config \ + -datacenter=vault-kubernetes-guide \ + -data-dir=/consul/data \ + -retry-join="${CONSUL_FULLNAME}-server-0.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc:8301" \ + -config-file=/consul/extra-config/extra-from-values.json \ + -domain=consul + volumeMounts: + - name: data + mountPath: /consul/data + - name: config + mountPath: /consul/config + ports: + - containerPort: 8500 + hostPort: 8500 + name: http + - containerPort: 8502 + hostPort: 8502 + name: grpc + - containerPort: 8301 + protocol: "TCP" + name: serflan-tcp + - containerPort: 8301 + protocol: "UDP" + name: serflan-udp + - containerPort: 8600 + name: dns-tcp + protocol: "TCP" + - containerPort: 8600 + name: dns-udp + protocol: "UDP" + readinessProbe: + # NOTE(mitchellh): when our HTTP status endpoints support the + # proper status codes, we should switch to that. This is temporary. + exec: + command: + - "/bin/sh" + - "-ec" + - | + curl http://127.0.0.1:8500/v1/status/leader \ + 2>/dev/null | grep -E '".+"' + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + null +--- +# Source: consul/templates/server-statefulset.yaml +# StatefulSet to run the actual Consul server cluster. +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: consul-consul-server + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + component: server +spec: + serviceName: consul-consul-server + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app: consul + chart: consul-helm + release: consul + component: server + hasDNS: "true" + template: + metadata: + labels: + app: consul + chart: consul-helm + release: consul + component: server + hasDNS: "true" + annotations: + "consul.hashicorp.com/connect-inject": "false" + "consul.hashicorp.com/config-checksum": c9b100f895d5bda6a5c8bbebac73e1ab5bdc4cad06b04e72eb1b620677bfe41d + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: consul + release: "consul" + component: server + topologyKey: kubernetes.io/hostname + terminationGracePeriodSeconds: 30 + serviceAccountName: consul-consul-server + securityContext: + fsGroup: 1000 + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 100 + volumes: + - name: config + configMap: + name: consul-consul-server-config + containers: + - name: consul + image: "hashicorp/consul:1.11.1" + env: + - name: ADVERTISE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONSUL_DISABLE_PERM_MGMT + value: "true" + + command: + - "/bin/sh" + - "-ec" + - | + CONSUL_FULLNAME="consul-consul" + + mkdir -p /consul/extra-config + cp /consul/config/extra-from-values.json /consul/extra-config/extra-from-values.json + [ -n "${HOST_IP}" ] && sed -Ei "s|HOST_IP|${HOST_IP?}|g" /consul/extra-config/extra-from-values.json + [ -n "${POD_IP}" ] && sed -Ei "s|POD_IP|${POD_IP?}|g" /consul/extra-config/extra-from-values.json + [ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /consul/extra-config/extra-from-values.json + + exec /usr/local/bin/docker-entrypoint.sh consul agent \ + -advertise="${ADVERTISE_IP}" \ + -bind=0.0.0.0 \ + -bootstrap-expect=1 \ + -client=0.0.0.0 \ + -config-dir=/consul/config \ + -datacenter=vault-kubernetes-guide \ + -data-dir=/consul/data \ + -domain=consul \ + -hcl="connect { enabled = true }" \ + -ui \ + -retry-join="${CONSUL_FULLNAME}-server-0.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc:8301" \ + -serf-lan-port=8301 \ + -config-file=/consul/extra-config/extra-from-values.json \ + -server + volumeMounts: + - name: data-vault + mountPath: /consul/data + - name: config + mountPath: /consul/config + ports: + - name: http + containerPort: 8500 + - name: serflan-tcp + containerPort: 8301 + protocol: "TCP" + - name: serflan-udp + containerPort: 8301 + protocol: "UDP" + - name: serfwan-tcp + containerPort: 8302 + protocol: "TCP" + - name: serfwan-udp + containerPort: 8302 + protocol: "UDP" + - name: server + containerPort: 8300 + - name: dns-tcp + containerPort: 8600 + protocol: "TCP" + - name: dns-udp + containerPort: 8600 + protocol: "UDP" + readinessProbe: + # NOTE(mitchellh): when our HTTP status endpoints support the + # proper status codes, we should switch to that. This is temporary. + exec: + command: + - "/bin/sh" + - "-ec" + - | + curl http://127.0.0.1:8500/v1/status/leader \ + 2>/dev/null | grep -E '".+"' + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 5 + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + null + volumeClaimTemplates: + - metadata: + name: data-vault + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +--- +# Source: consul/templates/tests/test-runner.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "consul-consul-test" + namespace: vault + labels: + app: consul + chart: consul-helm + heritage: Helm + release: consul + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: consul-test + image: "hashicorp/consul:1.11.1" + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + command: + - "/bin/sh" + - "-ec" + - | + consul members | tee members.txt + if [ $(grep -c consul-server members.txt) != $(grep consul-server members.txt | grep -c alive) ] + then + echo "Failed because not all consul servers are available" + exit 1 + fi + + restartPolicy: Never diff --git a/hashicorp/vault/manifests/vault.yaml b/hashicorp/vault/manifests/vault.yaml new file mode 100644 index 0000000..59bf1e6 --- /dev/null +++ b/hashicorp/vault/manifests/vault.yaml @@ -0,0 +1,626 @@ +--- +# Source: vault/templates/server-disruptionbudget.yaml +# PodDisruptionBudget to prevent degrading the server cluster through +# voluntary cluster changes. +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: vault + namespace: vault + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + component: server +--- +# Source: vault/templates/injector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vault-agent-injector + namespace: vault + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +--- +# Source: vault/templates/server-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vault + namespace: vault + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +--- +# Source: vault/templates/server-config-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: vault-config + namespace: vault + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +data: + extraconfig-from-values.hcl: |- + disable_mlock = true + ui = true + + listener "tcp" { + tls_disable = 0 + address = "0.0.0.0:8200" + tls_cert_file = "/vault/userconfig/tls-server/tls.crt" + tls_key_file = "/vault/userconfig/tls-server/tls.key" + tls_min_version = "tls12" + } + + storage "consul" { + path = "vault" + address = "consul-consul-server:8500" + } +--- +# Source: vault/templates/injector-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: vault-agent-injector-clusterrole + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations"] + verbs: + - "get" + - "list" + - "watch" + - "patch" +--- +# Source: vault/templates/injector-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: vault-agent-injector-binding + namespace: vault + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vault-agent-injector-clusterrole +subjects: +- kind: ServiceAccount + name: vault-agent-injector + namespace: vault +--- +# Source: vault/templates/server-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: vault-server-binding + namespace: vault + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: vault + namespace: vault +--- +# Source: vault/templates/server-discovery-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: vault + name: vault-discovery-role + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list", "update", "patch"] +--- +# Source: vault/templates/server-discovery-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: vault-discovery-rolebinding + namespace: vault + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: vault-discovery-role +subjects: +- kind: ServiceAccount + name: vault + namespace: vault +--- +# Source: vault/templates/injector-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: vault-agent-injector-svc + namespace: vault + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +spec: + ports: + - port: 443 + targetPort: 8080 + selector: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: vault + component: webhook +--- +# Source: vault/templates/server-ha-active-service.yaml +# Service for active Vault pod +apiVersion: v1 +kind: Service +metadata: + name: vault-active + namespace: vault + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm + annotations: +spec: + type: ClusterIP + publishNotReadyAddresses: true + ports: + - name: http + port: 8200 + targetPort: 8200 + - name: internal + port: 8201 + targetPort: 8201 + selector: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + component: server + vault-active: "true" +--- +# Source: vault/templates/server-ha-standby-service.yaml +# Service for active Vault pod +apiVersion: v1 +kind: Service +metadata: + name: vault-standby + namespace: vault + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm + annotations: +spec: + type: ClusterIP + publishNotReadyAddresses: true + ports: + - name: http + port: 8200 + targetPort: 8200 + - name: internal + port: 8201 + targetPort: 8201 + selector: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + component: server + vault-active: "false" +--- +# Source: vault/templates/server-headless-service.yaml +# Service for Vault cluster +apiVersion: v1 +kind: Service +metadata: + name: vault-internal + namespace: vault + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: "https" + port: 8200 + targetPort: 8200 + - name: internal + port: 8201 + targetPort: 8201 + selector: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + component: server +--- +# Source: vault/templates/server-service.yaml +# Service for Vault cluster +apiVersion: v1 +kind: Service +metadata: + name: vault + namespace: vault + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm + annotations: + # This must be set in addition to publishNotReadyAddresses due + # to an open issue where it may not work: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + # We want the servers to become available even if they're not ready + # since this DNS is also used for join operations. + publishNotReadyAddresses: true + ports: + - name: http + port: 8200 + targetPort: 8200 + - name: internal + port: 8201 + targetPort: 8201 + selector: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + component: server +--- +# Source: vault/templates/ui-service.yaml +# Headless service for Vault server DNS entries. This service should only +# point to Vault servers. For access to an agent, one should assume that +# the agent is installed locally on the node and the NODE_IP should be used. +# If the node can't run a Vault agent, then this service can be used to +# communicate directly to a server agent. +apiVersion: v1 +kind: Service +metadata: + name: vault-ui + namespace: vault + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault-ui + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +spec: + selector: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + component: server + publishNotReadyAddresses: true + ports: + - name: http + port: 8200 + targetPort: 8200 + type: ClusterIP +--- +# Source: vault/templates/injector-deployment.yaml +# Deployment for the injector +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vault-agent-injector + namespace: vault + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm + component: webhook +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: vault + component: webhook + template: + metadata: + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: vault + component: webhook + spec: + + + + serviceAccountName: "vault-agent-injector" + securityContext: + runAsNonRoot: true + runAsGroup: 1000 + runAsUser: 100 + containers: + - name: sidecar-injector + resources: + limits: + cpu: 250m + memory: 256Mi + requests: + cpu: 50m + memory: 50Mi + + image: "hashicorp/vault-k8s:0.14.1" + imagePullPolicy: "IfNotPresent" + env: + - name: AGENT_INJECT_LISTEN + value: ":8080" + - name: AGENT_INJECT_LOG_LEVEL + value: info + - name: AGENT_INJECT_VAULT_ADDR + value: https://vault.vault.svc:8200 + - name: AGENT_INJECT_VAULT_AUTH_PATH + value: auth/kubernetes + - name: AGENT_INJECT_VAULT_IMAGE + value: "vault:1.4.0" + - name: AGENT_INJECT_TLS_AUTO + value: vault-agent-injector-cfg + - name: AGENT_INJECT_TLS_AUTO_HOSTS + value: vault-agent-injector-svc,vault-agent-injector-svc.vault,vault-agent-injector-svc.vault.svc + - name: AGENT_INJECT_LOG_FORMAT + value: standard + - name: AGENT_INJECT_REVOKE_ON_SHUTDOWN + value: "false" + + args: + - agent-inject + - 2>&1 + livenessProbe: + httpGet: + path: /health/ready + port: 8080 + scheme: HTTPS + failureThreshold: 2 + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /health/ready + port: 8080 + scheme: HTTPS + failureThreshold: 2 + initialDelaySeconds: 2 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 +--- +# Source: vault/templates/server-statefulset.yaml +# StatefulSet to run the actual vault server cluster. +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: vault + namespace: vault + labels: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +spec: + serviceName: vault-internal + podManagementPolicy: Parallel + replicas: 3 + updateStrategy: + type: OnDelete + selector: + matchLabels: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + component: server + template: + metadata: + labels: + helm.sh/chart: vault-0.5.0 + app.kubernetes.io/name: vault + app.kubernetes.io/instance: vault + component: server + spec: + + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: vault + app.kubernetes.io/instance: "vault" + component: server + topologyKey: kubernetes.io/hostname + + + + terminationGracePeriodSeconds: 10 + serviceAccountName: vault + + securityContext: + runAsNonRoot: true + runAsGroup: 1000 + runAsUser: 100 + fsGroup: 1000 + volumes: + + - name: config + configMap: + name: vault-config + + - name: userconfig-tls-server + secret: + secretName: tls-server + - name: userconfig-tls-ca + secret: + secretName: tls-ca + containers: + - name: vault + resources: + limits: + cpu: 2000m + memory: 16Gi + requests: + cpu: 500m + memory: 50Mi + + securityContext: + capabilities: + add: ["IPC_LOCK"] + image: hashicorp/vault:1.9.0 + imagePullPolicy: IfNotPresent + command: + - "/bin/sh" + - "-ec" + + args: + - | + sed -E "s/HOST_IP/${HOST_IP?}/g" /vault/config/extraconfig-from-values.hcl > /tmp/storageconfig.hcl; + sed -Ei "s/POD_IP/${POD_IP?}/g" /tmp/storageconfig.hcl; + /usr/local/bin/docker-entrypoint.sh vault server -config=/tmp/storageconfig.hcl + + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: VAULT_K8S_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: VAULT_K8S_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: VAULT_ADDR + value: "https://127.0.0.1:8200" + - name: VAULT_API_ADDR + value: "https://$(POD_IP):8200" + - name: SKIP_CHOWN + value: "true" + - name: SKIP_SETCAP + value: "true" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: VAULT_CLUSTER_ADDR + value: "https://$(HOSTNAME).vault-internal:8201" + + + - name: "VAULT_CACERT" + value: "/vault/userconfig/tls-ca/tls.crt" + + volumeMounts: + + + + - name: config + mountPath: /vault/config + + - name: userconfig-tls-server + readOnly: true + mountPath: /vault/userconfig/tls-server + - name: userconfig-tls-ca + readOnly: true + mountPath: /vault/userconfig/tls-ca + ports: + - containerPort: 8200 + name: http + - containerPort: 8201 + name: internal + - containerPort: 8202 + name: replication + readinessProbe: + httpGet: + path: "/v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204" + port: 8200 + scheme: HTTPS + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 5 + livenessProbe: + httpGet: + path: "/v1/sys/health?standbyok=true" + port: 8200 + scheme: HTTPS + initialDelaySeconds: 60 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 5 + lifecycle: + # Vault container doesn't receive SIGTERM from Kubernetes + # and after the grace period ends, Kube sends SIGKILL. This + # causes issues with graceful shutdowns such as deregistering itself + # from Consul (zombie services). + preStop: + exec: + command: [ + "/bin/sh", "-c", + # Adding a sleep here to give the pod eviction a + # chance to propagate, so requests will not be made + # to this pod while it's terminating + "sleep 5 && kill -SIGTERM $(pidof vault)", + ] + + volumeClaimTemplates: +--- +# Source: vault/templates/injector-mutating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: vault-agent-injector-cfg + labels: + app.kubernetes.io/name: vault-agent-injector + app.kubernetes.io/instance: vault + app.kubernetes.io/managed-by: Helm +webhooks: + - name: vault.hashicorp.com + clientConfig: + service: + name: vault-agent-injector-svc + namespace: vault + path: "/mutate" + caBundle: + rules: + - operations: ["CREATE", "UPDATE"] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] diff --git a/hashicorp/vault/readme.md b/hashicorp/vault/readme.md index f45e43e..8f60244 100644 --- a/hashicorp/vault/readme.md +++ b/hashicorp/vault/readme.md @@ -1,112 +1,212 @@ -# Hashicorp Vault Guide - -# Vault - -For this tutorial, I use Kuberentes 1.17 -It's critical because we'll need certain [admission controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) enabled. - -To get 1.17 for Linux\Windows, just use `kind` since you can create a 1.17 with admissions all setup. - -``` -#Windows -kind create cluster --name vault --image kindest/node:v1.17.0@sha256:9512edae126da271b66b990b6fff768fbb7cd786c7d39e86bdf55906352fdf62 - -#Linux -kind create cluster --name vault --kubeconfig ~/.kube/kind-vault --image kindest/node:v1.17.0@sha256:9512edae126da271b66b990b6fff768fbb7cd786c7d39e86bdf55906352fdf62 -``` - -## TLS End to End Encryption - -VIDEO: `````` -See steps in [./tls/ssl_generate_self_signed.txt](./tls/ssl_generate_self_signed.txt) -You'll need to generate TLS certs (or bring your own) -Create base64 strings from the files, place it in the `server-tls-secret.yaml` and apply it. -Remember not to check-in your TLS to GIT :) - -## Deployment - -``` -kubectl create ns vault-example -kubectl -n vault-example apply -f ./hashicorp/vault/server/ -kubectl -n vault-example get pods -``` - -## Storage - -``` -kubectl -n vault-example get pvc -``` -ensure vault-claim is bound, if not, `kubectl -n vault-example describe pvc vault-claim` -ensure correct storage class is used for your cluster. -if you need to change the storage class, delete the pvc, edit YAML and re-apply - -## Initialising Vault - -``` -kubectl -n vault-example exec -it vault-example-0 vault operator init -# unseal 3 times -kubectl -n vault-example exec -it vault-example-0 vault operator unseal -kubectl -n vault-example get pods -``` - -## Deploy the Injector - -VIDEO: `````` -Injector allows pods to automatically get secrets from the vault. - -``` -kubectl -n vault-example apply -f ./hashicorp/vault/injector/ -kubectl -n vault-example get pods -``` - -## Injector Kubernetes Auth Policy - -For the injector to be authorised to access vault, we need to enable K8s auth - -``` -kubectl -n vault-example exec -it vault-example-0 vault login -kubectl -n vault-example exec -it vault-example-0 vault auth enable kubernetes - -kubectl -n vault-example exec -it vault-example-0 sh -vault write auth/kubernetes/config \ -token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ -kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ -kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt -exit - -kubectl -n vault-example get pods -``` - -# Summary - -So we have a vault, an injector, TLS end to end, stateful storage. -The injector can now inject secrets for pods from the vault. - -Now we are ready to use the platform for different types of secrets: - -## Secret Injection Guides - -I've broken this down into basic guides to avoid this document from becoming too large. - -### Basic Secrets - -Objective: ----------- -* Let's create a basic secret in vault manually -* Application consumes the secret automatically - -[Try it](./example-apps/basic-secret/readme.md) - -### Dynamic Secrets: Postgres - -Objective: ----------- -* We have a Postgres Database -* Let's delegate Vault to manage life cycles of our database credentials -* Deploy an app, that automatically gets it's credentials from vault - -[Try it](./example-apps/dynamic-postgresql/readme.md) - - - - +# Hashicorp Vault Guide + + +Requirements: + +* Kubernetes 1.23 +* Kind or Minikube + +For this tutorial, I will be using Kubernetes 1.23. +If you are watching the old guide for Kuberentes 1.17, go [here](..\vault-deprecated\readme.md) + +Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) + +``` +cd hashicorp/vault + +kind create cluster --name vault --image kindest/node:v1.21.1 --config kind.yaml +``` + +Next up, I will be running a small container where I will be doing all the work from: +You can skip this part if you already have `kubectl` and `helm` on your machine. + +``` +docker run -it --rm --net host -v ${HOME}/.kube/:/root/.kube/ -v ${PWD}:/work -w /work alpine sh +``` + +Install `kubectl` + +``` +apk add --no-cache curl +curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl +chmod +x ./kubectl +mv ./kubectl /usr/local/bin/kubectl +``` + +Install `helm` + +``` +curl -LO https://get.helm.sh/helm-v3.7.2-linux-amd64.tar.gz +tar -C /tmp/ -zxvf helm-v3.7.2-linux-amd64.tar.gz +rm helm-v3.7.2-linux-amd64.tar.gz +mv /tmp/linux-amd64/helm /usr/local/bin/helm +chmod +x /usr/local/bin/helm +``` + +Now we have `helm` and `kubectl` and can access our `kind` cluster: + +``` +kubectl get nodes +NAME STATUS ROLES AGE VERSION +vault-control-plane Ready control-plane,master 37s v1.21.1 +``` + +Let's add the Helm repositories, so we can access the Kubernetes manifests + +``` +helm repo add hashicorp https://helm.releases.hashicorp.com +``` + +## Storage: Consul + +We will use a very basic Consul cluster for our Vault backend.
+Let's find what versions of Consul are available: + +``` +helm search repo hashicorp/consul --versions +``` + +We can use chart `0.39.0` which is the latest at the time of this demo +Let's create a manifests folder and grab the YAML: + +``` + +mkdir manifests + +helm template consul hashicorp/consul \ + --namespace vault \ + --version 0.39.0 \ + -f consul-values.yaml \ + > ./manifests/consul.yaml +``` + +Deploy the consul services: + +``` +kubectl create ns vault +kubectl -n vault apply -f ./manifests/consul.yaml +kubectl -n vault get pods +``` + + +## TLS End to End Encryption + +See steps in [./tls/ssl_generate_self_signed.md](./tls/ssl_generate_self_signed.md) +You'll need to generate TLS certs (or bring your own) +Remember not to check-in your TLS to GIT :) + +Create the TLS secret + +``` +kubectl -n vault create secret tls tls-ca \ + --cert ./tls/ca.pem \ + --key ./tls/ca-key.pem + +kubectl -n vault create secret tls tls-server \ + --cert ./tls/vault.pem \ + --key ./tls/vault-key.pem +``` + +## Generate Kubernetes Manifests + + +Let's find what versions of vault are available: + +``` +helm search repo hashicorp/vault --versions +``` + +In this demo I will use the `0.18.0` chart
+ +Let's firstly create a `values` file to customize vault. +Let's grab the manifests: + +``` +helm template vault hashicorp/vault \ + --namespace vault \ + --version 0.5.0 \ + -f vault-values.yaml \ + > ./manifests/vault.yaml +``` + +## Deployment + +``` +kubectl -n vault apply -f ./manifests/vault.yaml +kubectl -n vault get pods +``` + +## Initialising Vault + +``` +kubectl -n vault exec -it vault-0 -- sh +kubectl -n vault exec -it vault-1 -- sh +kubectl -n vault exec -it vault-2 -- sh + +vault operator init +vault operator unseal + +kubectl -n vault exec -it vault-0 -- vault status +kubectl -n vault exec -it vault-1 -- vault status +kubectl -n vault exec -it vault-2 -- vault status + +``` +## Web UI + +Let's checkout the web UI: + +``` +kubectl -n vault port-forward svc/vault-ui 443:8200 +``` +Now we can access the web UI [here]("https://localhost/") + +## Enable Kubernetes Autnetication + +For the injector to be authorised to access vault, we need to enable K8s auth + +``` +kubectl -n vault exec -it vault-0 -- sh + +vault login +vault auth enable kubernetes + +vault write auth/kubernetes/config \ +token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ +kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ +kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt +exit +``` + +# Summary + +So we have a vault, an injector, TLS end to end, stateful storage. +The injector can now inject secrets for pods from the vault. + +Now we are ready to use the platform for different types of secrets: + +## Secret Injection Guides + +I've broken this down into basic guides to avoid this document from becoming too large. + +### Basic Secrets + +Objective: +---------- +* Let's create a basic secret in vault manually +* Application consumes the secret automatically + +[Try it](./example-apps/basic-secret/readme.md) + +### Dynamic Secrets: Postgres + +Objective: +---------- +* We have a Postgres Database +* Let's delegate Vault to manage life cycles of our database credentials +* Deploy an app, that automatically gets it's credentials from vault + +[Try it](./example-apps/dynamic-postgresql/readme.md) + + + + diff --git a/hashicorp/vault/tls/ca-config.json b/hashicorp/vault/tls/ca-config.json index 2d4b37a..aace4b8 100644 --- a/hashicorp/vault/tls/ca-config.json +++ b/hashicorp/vault/tls/ca-config.json @@ -1,12 +1,12 @@ { "signing": { "default": { - "expiry": "8760h" + "expiry": "175200h" }, "profiles": { "default": { "usages": ["signing", "key encipherment", "server auth", "client auth"], - "expiry": "8760h" + "expiry": "175200h" } } } diff --git a/hashicorp/vault/tls/ssl_generate_self_signed.md b/hashicorp/vault/tls/ssl_generate_self_signed.md new file mode 100644 index 0000000..695304c --- /dev/null +++ b/hashicorp/vault/tls/ssl_generate_self_signed.md @@ -0,0 +1,40 @@ +# Use CFSSL to generate certificates + +More about [CFSSL here]("https://github.com/cloudflare/cfssl") + +``` + +cd hashicorp\vault\tls + +docker run -it --rm -v ${PWD}:/work -w /work debian bash + +apt-get update && apt-get install -y curl && +curl -L https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64 -o /usr/local/bin/cfssl && \ +curl -L https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64 -o /usr/local/bin/cfssljson && \ +chmod +x /usr/local/bin/cfssl && \ +chmod +x /usr/local/bin/cfssljson + +#generate ca in /tmp +cfssl gencert -initca ca-csr.json | cfssljson -bare /tmp/ca + +#generate certificate in /tmp +cfssl gencert \ + -ca=/tmp/ca.pem \ + -ca-key=/tmp/ca-key.pem \ + -config=ca-config.json \ + -hostname="vault,vault.vault.svc.cluster.local,vault.vault.svc,localhost,127.0.0.1" \ + -profile=default \ + ca-csr.json | cfssljson -bare /tmp/vault +``` + +view the files: + +``` +ls -l /tmp +``` + +access the files: + +``` +mv /tmp/* . +``` \ No newline at end of file diff --git a/hashicorp/vault/vault-values.yaml b/hashicorp/vault/vault-values.yaml new file mode 100644 index 0000000..d9b57f5 --- /dev/null +++ b/hashicorp/vault/vault-values.yaml @@ -0,0 +1,85 @@ +# Vault Helm Chart Value Overrides +global: + enabled: true + tlsDisable: false + +injector: + enabled: true + # Use the Vault K8s Image https://github.com/hashicorp/vault-k8s/ + image: + repository: "hashicorp/vault-k8s" + tag: "0.14.1" + + resources: + requests: + memory: 50Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 250m + +server: + image: + repository: "hashicorp/vault" + tag: "1.9.0" + + # These Resource Limits are in line with node requirements in the + # Vault Reference Architecture for a Small Cluster + resources: + requests: + memory: 50Mi + cpu: 500m + limits: + memory: 16Gi + cpu: 2000m + + # For HA configuration and because we need to manually init the vault, + # we need to define custom readiness/liveness Probe settings + readinessProbe: + enabled: true + path: "/v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204" + livenessProbe: + enabled: true + path: "/v1/sys/health?standbyok=true" + initialDelaySeconds: 60 + + # extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be + # used to include variables required for auto-unseal. + extraEnvironmentVars: + VAULT_CACERT: /vault/userconfig/tls-ca/tls.crt + + # extraVolumes is a list of extra volumes to mount. These will be exposed + # to Vault in the path `/vault/userconfig//`. + extraVolumes: + - type: secret + name: tls-server + - type: secret + name: tls-ca + + standalone: + enabled: false + + # Run Vault in "HA" mode. + ha: + enabled: true + replicas: 3 + config: | + ui = true + + listener "tcp" { + tls_disable = 0 + address = "0.0.0.0:8200" + tls_cert_file = "/vault/userconfig/tls-server/tls.crt" + tls_key_file = "/vault/userconfig/tls-server/tls.key" + tls_min_version = "tls12" + } + + storage "consul" { + path = "vault" + address = "consul-consul-server:8500" + } + +# Vault UI +ui: + enabled: true + externalPort: 8200 \ No newline at end of file