marcel-dempers 6f155829b5 updates
2022-01-16 22:58:08 +11:00

627 lines
17 KiB
YAML

---
# Source: consul/templates/server-disruptionbudget.yaml
# PodDisruptionBudget to prevent degrading the server cluster through
# voluntary cluster changes.
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
spec:
maxUnavailable: 0
selector:
matchLabels:
app: consul
release: "consul"
component: server
---
# Source: consul/templates/client-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul-consul-client
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: client
---
# Source: consul/templates/server-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
---
# Source: consul/templates/client-config-configmap.yaml
# ConfigMap with extra configuration specified directly to the chart
# for client agents only.
apiVersion: v1
kind: ConfigMap
metadata:
name: consul-consul-client-config
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: client
data:
extra-from-values.json: |-
{}
central-config.json: |-
{
"enable_central_service_config": true
}
---
# Source: consul/templates/server-config-configmap.yaml
# StatefulSet to run the actual Consul server cluster.
apiVersion: v1
kind: ConfigMap
metadata:
name: consul-consul-server-config
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
data:
extra-from-values.json: |-
{}
central-config.json: |-
{
"enable_central_service_config": true
}
---
# Source: consul/templates/client-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul-consul-client
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: client
rules: []
---
# Source: consul/templates/server-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
rules: []
---
# Source: consul/templates/client-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul-consul-client
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: client
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul-consul-client
subjects:
- kind: ServiceAccount
name: consul-consul-client
---
# Source: consul/templates/server-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul-consul-server
subjects:
- kind: ServiceAccount
name: consul-consul-server
---
# Source: consul/templates/dns-service.yaml
# Service for Consul DNS.
apiVersion: v1
kind: Service
metadata:
name: consul-consul-dns
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: dns
spec:
type: ClusterIP
ports:
- name: dns-tcp
port: 53
protocol: "TCP"
targetPort: dns-tcp
- name: dns-udp
port: 53
protocol: "UDP"
targetPort: dns-udp
selector:
app: consul
release: "consul"
hasDNS: "true"
---
# Source: consul/templates/server-service.yaml
# Headless service for Consul server DNS entries. This service should only
# point to Consul servers. For access to an agent, one should assume that
# the agent is installed locally on the node and the NODE_IP should be used.
# If the node can't run a Consul agent, then this service can be used to
# communicate directly to a server agent.
apiVersion: v1
kind: Service
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
annotations:
# This must be set in addition to publishNotReadyAddresses due
# to an open issue where it may not work:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
clusterIP: None
# We want the servers to become available even if they're not ready
# since this DNS is also used for join operations.
publishNotReadyAddresses: true
ports:
- name: http
port: 8500
targetPort: 8500
- name: serflan-tcp
protocol: "TCP"
port: 8301
targetPort: 8301
- name: serflan-udp
protocol: "UDP"
port: 8301
targetPort: 8301
- name: serfwan-tcp
protocol: "TCP"
port: 8302
targetPort: 8302
- name: serfwan-udp
protocol: "UDP"
port: 8302
targetPort: 8302
- name: server
port: 8300
targetPort: 8300
- name: dns-tcp
protocol: "TCP"
port: 8600
targetPort: dns-tcp
- name: dns-udp
protocol: "UDP"
port: 8600
targetPort: dns-udp
selector:
app: consul
release: "consul"
component: server
---
# Source: consul/templates/ui-service.yaml
# UI Service for Consul Server
apiVersion: v1
kind: Service
metadata:
name: consul-consul-ui
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: ui
spec:
selector:
app: consul
release: "consul"
component: server
ports:
- name: http
port: 80
targetPort: 8500
---
# Source: consul/templates/client-daemonset.yaml
# DaemonSet to run the Consul clients on every node.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: consul-consul
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: client
spec:
selector:
matchLabels:
app: consul
chart: consul-helm
release: consul
component: client
hasDNS: "true"
template:
metadata:
labels:
app: consul
chart: consul-helm
release: consul
component: client
hasDNS: "true"
annotations:
"consul.hashicorp.com/connect-inject": "false"
"consul.hashicorp.com/config-checksum": 797b3593a73b78fc74f3b1e3b978107b3022d4649802185631f959f000234331
spec:
terminationGracePeriodSeconds: 10
serviceAccountName: consul-consul-client
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 100
volumes:
- name: data
emptyDir: {}
- name: config
configMap:
name: consul-consul-client-config
containers:
- name: consul
image: "hashicorp/consul:1.11.1"
env:
- name: ADVERTISE_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: CONSUL_DISABLE_PERM_MGMT
value: "true"
command:
- "/bin/sh"
- "-ec"
- |
CONSUL_FULLNAME="consul-consul"
mkdir -p /consul/extra-config
cp /consul/config/extra-from-values.json /consul/extra-config/extra-from-values.json
[ -n "${HOST_IP}" ] && sed -Ei "s|HOST_IP|${HOST_IP?}|g" /consul/extra-config/extra-from-values.json
[ -n "${POD_IP}" ] && sed -Ei "s|POD_IP|${POD_IP?}|g" /consul/extra-config/extra-from-values.json
[ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /consul/extra-config/extra-from-values.json
exec /usr/local/bin/docker-entrypoint.sh consul agent \
-node="${NODE}" \
-advertise="${ADVERTISE_IP}" \
-bind=0.0.0.0 \
-client=0.0.0.0 \
-node-meta=host-ip:${HOST_IP} \
-node-meta=pod-name:${HOSTNAME} \
-hcl='leave_on_terminate = true' \
-hcl='ports { grpc = 8502 }' \
-config-dir=/consul/config \
-datacenter=vault-kubernetes-guide \
-data-dir=/consul/data \
-retry-join="${CONSUL_FULLNAME}-server-0.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc:8301" \
-config-file=/consul/extra-config/extra-from-values.json \
-domain=consul
volumeMounts:
- name: data
mountPath: /consul/data
- name: config
mountPath: /consul/config
ports:
- containerPort: 8500
hostPort: 8500
name: http
- containerPort: 8502
hostPort: 8502
name: grpc
- containerPort: 8301
protocol: "TCP"
name: serflan-tcp
- containerPort: 8301
protocol: "UDP"
name: serflan-udp
- containerPort: 8600
name: dns-tcp
protocol: "TCP"
- containerPort: 8600
name: dns-udp
protocol: "UDP"
readinessProbe:
# NOTE(mitchellh): when our HTTP status endpoints support the
# proper status codes, we should switch to that. This is temporary.
exec:
command:
- "/bin/sh"
- "-ec"
- |
curl http://127.0.0.1:8500/v1/status/leader \
2>/dev/null | grep -E '".+"'
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
null
---
# Source: consul/templates/server-statefulset.yaml
# StatefulSet to run the actual Consul server cluster.
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul-consul-server
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
component: server
spec:
serviceName: consul-consul-server
podManagementPolicy: Parallel
replicas: 1
selector:
matchLabels:
app: consul
chart: consul-helm
release: consul
component: server
hasDNS: "true"
template:
metadata:
labels:
app: consul
chart: consul-helm
release: consul
component: server
hasDNS: "true"
annotations:
"consul.hashicorp.com/connect-inject": "false"
"consul.hashicorp.com/config-checksum": c9b100f895d5bda6a5c8bbebac73e1ab5bdc4cad06b04e72eb1b620677bfe41d
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
release: "consul"
component: server
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 30
serviceAccountName: consul-consul-server
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 100
volumes:
- name: config
configMap:
name: consul-consul-server-config
containers:
- name: consul
image: "hashicorp/consul:1.11.1"
env:
- name: ADVERTISE_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONSUL_DISABLE_PERM_MGMT
value: "true"
command:
- "/bin/sh"
- "-ec"
- |
CONSUL_FULLNAME="consul-consul"
mkdir -p /consul/extra-config
cp /consul/config/extra-from-values.json /consul/extra-config/extra-from-values.json
[ -n "${HOST_IP}" ] && sed -Ei "s|HOST_IP|${HOST_IP?}|g" /consul/extra-config/extra-from-values.json
[ -n "${POD_IP}" ] && sed -Ei "s|POD_IP|${POD_IP?}|g" /consul/extra-config/extra-from-values.json
[ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /consul/extra-config/extra-from-values.json
exec /usr/local/bin/docker-entrypoint.sh consul agent \
-advertise="${ADVERTISE_IP}" \
-bind=0.0.0.0 \
-bootstrap-expect=1 \
-client=0.0.0.0 \
-config-dir=/consul/config \
-datacenter=vault-kubernetes-guide \
-data-dir=/consul/data \
-domain=consul \
-hcl="connect { enabled = true }" \
-ui \
-retry-join="${CONSUL_FULLNAME}-server-0.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc:8301" \
-serf-lan-port=8301 \
-config-file=/consul/extra-config/extra-from-values.json \
-server
volumeMounts:
- name: data-vault
mountPath: /consul/data
- name: config
mountPath: /consul/config
ports:
- name: http
containerPort: 8500
- name: serflan-tcp
containerPort: 8301
protocol: "TCP"
- name: serflan-udp
containerPort: 8301
protocol: "UDP"
- name: serfwan-tcp
containerPort: 8302
protocol: "TCP"
- name: serfwan-udp
containerPort: 8302
protocol: "UDP"
- name: server
containerPort: 8300
- name: dns-tcp
containerPort: 8600
protocol: "TCP"
- name: dns-udp
containerPort: 8600
protocol: "UDP"
readinessProbe:
# NOTE(mitchellh): when our HTTP status endpoints support the
# proper status codes, we should switch to that. This is temporary.
exec:
command:
- "/bin/sh"
- "-ec"
- |
curl http://127.0.0.1:8500/v1/status/leader \
2>/dev/null | grep -E '".+"'
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 5
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
null
volumeClaimTemplates:
- metadata:
name: data-vault
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
# Source: consul/templates/tests/test-runner.yaml
apiVersion: v1
kind: Pod
metadata:
name: "consul-consul-test"
namespace: vault
labels:
app: consul
chart: consul-helm
heritage: Helm
release: consul
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: consul-test
image: "hashicorp/consul:1.11.1"
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: CONSUL_HTTP_ADDR
value: http://$(HOST_IP):8500
command:
- "/bin/sh"
- "-ec"
- |
consul members | tee members.txt
if [ $(grep -c consul-server members.txt) != $(grep consul-server members.txt | grep -c alive) ]
then
echo "Failed because not all consul servers are available"
exit 1
fi
restartPolicy: Never