Merge branch 'master' into fluxcd-2022

This commit is contained in:
marcel-dempers 2023-04-17 09:51:24 +10:00
commit 689a52bf71
13 changed files with 1500 additions and 1457 deletions

View File

@ -160,7 +160,7 @@ Let's checkout the web UI:
kubectl -n vault get svc kubectl -n vault get svc
kubectl -n vault port-forward svc/vault-ui 443:8200 kubectl -n vault port-forward svc/vault-ui 443:8200
``` ```
Now we can access the web UI [here]("https://localhost/") Now we can access the web UI [here](https://localhost/)
## Enable Kubernetes Authentication ## Enable Kubernetes Authentication

View File

@ -55,29 +55,22 @@ helm repo add datree-webhook https://datreeio.github.io/admission-webhook-datree
helm search repo datree-webhook --versions helm search repo datree-webhook --versions
``` ```
Grab the manifest: Install the Helm chart:
``` ```
CHART_VERSION="0.3.22" CHART_VERSION="0.3.22"
APP_VERSION="0.1.46"
DATREE_TOKEN="" DATREE_TOKEN=""
mkdir ./kubernetes/datree/manifests/ helm install datree-webhook datree-webhook/datree-admission-webhook \
helm template datree-webhook datree-webhook/datree-admission-webhook \
--create-namespace \ --create-namespace \
--set datree.token=${DATREE_TOKEN} \ --set datree.token=${DATREE_TOKEN} \
--set datree.policy="Default" \
--set datree.clusterName=$(kubectl config current-context) \ --set datree.clusterName=$(kubectl config current-context) \
--version ${CHART_VERSION} \ --version ${CHART_VERSION} \
--namespace datree \ --namespace datree
> ./kubernetes/datree/manifests/datree.${APP_VERSION}.yaml
``` ```
Apply the manifests:
```
kubectl create namespace datree
kubectl apply -n datree -f kubernetes/datree/manifests/
```
Check the install Check the install
``` ```
@ -244,16 +237,14 @@ We can use `helm upgrade` with the `--set` flag and set enforce to true like:
Let's apply it to a new manifest and deploy it to our cluster: Let's apply it to a new manifest and deploy it to our cluster:
``` ```
helm template datree-webhook datree-webhook/datree-admission-webhook \ helm upgrade datree-webhook datree-webhook/datree-admission-webhook \
--create-namespace \ --create-namespace \
--set datree.enforce=true \ --set datree.enforce=true \
--set datree.policy="Default" \
--set datree.token=${DATREE_TOKEN} \ --set datree.token=${DATREE_TOKEN} \
--set datree.clusterName=$(kubectl config current-context) \ --set datree.clusterName=$(kubectl config current-context) \
--version ${CHART_VERSION} \ --version ${CHART_VERSION} \
--namespace datree \ --namespace datree
> ./kubernetes/datree/manifests/datree.${APP_VERSION}-enforce.yaml
kubectl apply -n datree -f kubernetes/datree/manifests/datree.0.1.46-enforce.yaml
``` ```
Try to apply our Wordpress MySQL which violates policies : Try to apply our Wordpress MySQL which violates policies :

View File

@ -1,718 +0,0 @@
---
# Source: datree-admission-webhook/templates/cluster-scan-cronjob.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: cluster-scan-job-service-account
namespace: datree
---
# Source: datree-admission-webhook/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: datree-webhook-server
namespace: datree
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
---
# Source: datree-admission-webhook/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: datree-label-namespaces-hook-post-install
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
---
# Source: datree-admission-webhook/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: datree-cleanup-namespaces-hook-pre-delete
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
---
# Source: datree-admission-webhook/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: datree-wait-server-ready-hook-post-install
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
---
# Source: datree-admission-webhook/templates/webhook-with-cert-secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: datree-ca-tls
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
namespace: datree
type: kubernetes.io/tls
data:
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBM3gydlg4YzdlanZpb0dyZWJmeGNSaDA5WWk2bmFxU3YvQXlEQ0lnVW9LSnFIZ1NBCmNUbU1FVXY0R3F2YUFGdWk5QmVFY0lZUXNhektZSWRTUkUxdmw3bm94K2hLSmRmRVZ4Y1lVaHZaOExaQ0tDNzEKRTdsZGZmUmQyM0kveHhrRE1rZCtTN1FNV1IxaXd5U05nRDU3cktBbVNxQlNVWStkSVpwQWNyQ0EwVStaY2ZteAprNUtRQXRodk9GYzZXWndtVjNpSjhOdzk0Q3ppVkIyRTNIVDZDZDlYcG9IdWlhN3pPWFZKamlCT0RubVRlZ0Y1Cmg5ZStZekx6cThxbUFLb3RkU08xNUQvSVhqSUNIYXdGMUFYeHNDQXlFUm1iN2FvZmo5a3N0M3BiTVJZeG0zTVUKbnY2aUJ6MGxKOXExcTBoNUJleXVmdk5JWTVXY3l5cEd2SGQrd1FJREFRQUJBb0lCQUVpMzlDRFRYcDlJUldUagpiL3VJOU1vbFhZeFNpRjVKcnRJSGdlMlY3S011VEVmY1Q4Q1hjUDl5TXpyK0o5OVYvcFp2MDhxWTUzZ0JTVFNNCjVsTThxZEpaMVhUU1VOaGtxcWwzN1lWVmJvTDE1RG9Vayt3SnpsN3U5bWcvcEduUHpTcm1BbFBLS3Z3Z2g3L3kKZWV3Q2NXeWlCZGpzeCtldFZ4bE1uUlRFVWpmbGpkbzhJdHJ4ajBEem5zUmgxZDVJaC9NZjJ6ekRQTXN0UGMyZgpxUDBGNGFNVkFLN2p6Qnp6cjNMNC81d1lwUXR2RUowSDcrViswTjZYOTFJUE85YWtaVE5UMmJXNDR1MXN6UmhFCkJ3dDJINmJUQ1VoTjVMUmszYTRnNEozTVd0cEhZSThHRVRVeGdaeElBeFhMRiszak1zU3JGb3NyN05EUTZhUWQKL1BDVWJRRUNnWUVBOURmcGxNdkNMVnhQRUF6RDdhcDQvNGRxYloyTXU5SGVwcFpkbUViRTVoWE9zNXVRZzN1dQp1U203OVB4dXQ0QTlhK3Z2NWNIditXWVdQc005MnAyWXA5Z2k3OVJQeHJFclhTdlFHU3E0UGh4MVlKV0VnY2R3ClIra1NiYm9rdTNLeGZKYlJta21SR2dMQ2tyOE9WSjBMWENnamEvaEJUQmkvZ0svUDZQZHRlWDBDZ1lFQTZlRW8KZ0RnaDgvbUl6TUxkOXBVVHZIQVUxNFlsTk11ay9qOW8rajFNd3gxbjJMcnM0RFlVeWMxR0RTeFlaK2l5VTRQTgppZ1lwRlY0SmRiTDRaYThBc21TRVcxUXNUckFZN1Y2UzN6Nkc1NVNZQmtYTnRSQnhQSHREbU5oY2JIYlhEdUNBCkc5cEpBK3ZSY21sbFBVZlRRTkt3bElaSU90aGgrQy80djJTUlBaVUNnWUErdnRiT21nTkxzRG5ILzkrZkFudVAKKzNUR3NRSGxoNmhTMkxNM1dvZGdMaDRyV3o2bjZYRWN0YkpLNFVoNDhRUFc1SW1BV0hHVmZEc2U2UDdOV2t4TQpZMldtaEwveVpyYWplNHc5eXhJSE16eWRFZzAzWXN4Z1RXdWtzWHlhaEg5QmFXWjA0NDNhUnZkQ3lMK2YwYkdICmZmQ0wzdjYzMUd2dlhqeG11SnR4NlFLQmdDNDRxV0J0dDRnWUVNa20yZWNabjBUbWdiZjJjdlAwS3k5MEtMTUwKMmxmVlAraTlTSU1uTFFTVTVQdEZnRk5JMGJWZm53ZGdJRTV3dnozYm1PdS9va3VmUWVrcXdYYnJwb0dDNTFQbgpiNUhrOUFhSlZSWXJvYlZxUnZtMkNNNEd6b25LSklkY3BJRjU0WExURVljQzR1VTB2bUVjQ0xwWWVVUXJkdVdjClluZmhBb0dBSkttM0RIYmlTU3MyaXZQa1FJNVFDNGZtLzBLV0IyZmpkVkZwTitLSzFrdDBBVUxWbTQ0OWhwTFcKWmVWMndGM29qUkxhamRmZnFGNjJCekYrU2pyY25Ed1g2SXFsT0F6b0xvaFdMc3hRYUlNL0xQRk9OakxlQW1YTAp2UUt6UXdJRElIaCtnekFDUy9jdEFzVXpuS0tIRTRqWmxFVnRnUko0WWxVSDdwd0FaZTQ9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURVakNDQWpxZ0F3SUJBZ0lSQUs4TTRaaDl3TzJicFJieUY5VVR1UU13RFFZSktvWklodmNOQVFFTEJRQXcKTXpFeE1DOEdBMVVFQXhNb0wwTk9QVUZrYldsemMybHZiaUJEYjI1MGNtOXNiR1Z5SUZkbFltaHZiMnNnUkdWdApieUJEUVRBZUZ3MHlNekF4TVRnd09EVTFNRFphRncweU9EQXhNVGt3T0RVMU1EWmFNRE14TVRBdkJnTlZCQU1UCktDOURUajFCWkcxcGMzTnBiMjRnUTI5dWRISnZiR3hsY2lCWFpXSm9iMjlySUVSbGJXOGdRMEV3Z2dFaU1BMEcKQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURmSGE5Znh6dDZPK0tnYXQ1dC9GeEdIVDFpTHFkcQpwSy84RElNSWlCU2dvbW9lQklCeE9Zd1JTL2dhcTlvQVc2TDBGNFJ3aGhDeHJNcGdoMUpFVFcrWHVlakg2RW9sCjE4UlhGeGhTRzlud3RrSW9MdlVUdVYxOTlGM2Jjai9IR1FNeVIzNUx0QXhaSFdMREpJMkFQbnVzb0NaS29GSlIKajUwaG1rQnlzSURSVDVseCtiR1RrcEFDMkc4NFZ6cFpuQ1pYZUludzNEM2dMT0pVSFlUY2RQb0ozMWVtZ2U2Sgpydk01ZFVtT0lFNE9lWk42QVhtSDE3NWpNdk9yeXFZQXFpMTFJN1hrUDhoZU1nSWRyQVhVQmZHd0lESVJHWnZ0CnFoK1AyU3kzZWxzeEZqR2JjeFNlL3FJSFBTVW4ycldyU0hrRjdLNSs4MGhqbFp6TEtrYThkMzdCQWdNQkFBR2oKWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDcERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjREFRWUlLd1lCQlFVSApBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVV0MvNUp3bTNQZ1BXYW9TanNpelE2aHJCCm82Y3dEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS2lEdDZoVkZVN1RTekkvQWV4bXd0b3I3eUo4Qmg4L2Y1ZVIKTWJCSGN3dFRrTUpIazFuVUV2WG5GQS9xK1BDdzd3eXdUaHp0T0hwUkM1N3QvWkMwYkF5WUtRV1JJVEx5NWpDVwpUbDJRL1l5UkdKVlJjT0xQUWhWT1krcW1BdzluVklVTGRROWs0SEtPeUM0T1g2TmRCUktOazdjdlBzakpOc1M5CjRreUtCVUQyelArUGpGdDVEZUFFZXpRSmRwR2xiNXVyQnNHUldCZC8zODNYa01pOG5sSWhtbUFxVVlpcjFsc3cKRlNEWS9saDc5RDg0bTUzdFlVc0R2UjdwZ0pKbUtCOWRBUGJxOG1jQzdRUm5jd0tQSjdhUUJjTlpvNU1IZ3FFNAptelRlMnNybGhqbXcvSEFnMGdiM0RnME5hQzNzYlpTUytzeUhyVllyWVdQSHRWdDk2ZXc9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
---
# Source: datree-admission-webhook/templates/webhook-with-cert-secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: webhook-server-tls
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
namespace: datree
annotations:
self-signed-cert: "true"
type: kubernetes.io/tls
data:
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeDV5WmFMaWFaMFdrQXFWMFBSejRFR0plYldyYkJ2bFl0RGxwbDZxa241U3FXdUE0ClR1amdLVW8zNFYvcjQ4N3JWR3c5eHBaN3FoWkxCQzRBYndjZWJuRGI3UFdHZzc5TXNUTjl3M0tzUEY2YkthcUcKWkdUOUk3WVZGc2QycUJleGVnSEtFSVFQc3NXelNteVFHUWRGZXlsSDlPRkN5a1ZKUDJpdTM2T1dWc3NwbHpCSgp3RW56MjYzQ2JIQXIvWFZ3QUE0MXlBaXRIUDJrWmYyL2ZaWGRKU0JCQ0JnSjdGb1YzTVZVbmRhVzhXRWVYRDdhCjQ1MkJKWkpLQzJORCs2WFEwdHluaFFVeFlYSzA3eTdTSHZiSDlOTWQ4bWd5S3NHVWdTRFdNV3lBV2srMElYTlcKZVFpbVNkd1I3czFyMkFJNUNteUVWSmMwMTk3VFR0ZGluNzBQSFFJREFRQUJBb0lCQUdvdDNkaTdvYjVmWi8vVQpYUUdKSUZjdXpFWHR1alo2ZW5uYnRGUnQvQVc3QWVjM01CeWhlV1BkUzk1QnRPdklESndxdTYyZ0xJWHNOOWt3ClV5QzhLNjdacjlMYlE4TmZCZitZZ1VSdkFqbFdwYmpETVp2RHNIZkhpbTVFaWRTZVRkUzFrUE82RzlPZm9HQnQKWVRVL0RmR1dvdVVhMGZsZ1k3Y3NDeUdCRmg1eUlKOUhHRUl5d1NBNUNQd2tnR3RtYks5M085MXNWcG13YjcvVAo1aTJOUGRVOThHaHFORjJMQjg3cTBBYVI3ekhPMGNkQ29OZitZUTQzR0JKM0hvRFpSNHJCQ2xvY3dacXJrN2FPClF6Mlp3SDQ2U084cHlkTWdTUE1IckpOdVlqZ01peWdZQ2ZXT0VqcHVwVThUWVZ6T3V6TmQ0TWQyU3doSVVYUm8KVWNRNHAwMENnWUVBN0lLODAraXpNYTNDZkFyM05UangzWnhFUjFodXdUTGJXUHg3VmNQdktoYkJNajRpVUVHVQptY1BKeWdscmF2TUlEMm9tdzBlTzhJSDdWTDU5Tm9rcGFSV0RZbU84eFFrRyt1TVFUcGtpaTYvOWJxNVg5eEQrClRNaUZWcGxFNGZBQThwcFhxSVRIUHFrQTlOeFhHWXROMjJOaEtBUWJuYTFPSHNXVzVrbFdnZE1DZ1lFQTJBOTMKaGsrMG92b3JKM0VGdWZVT0tDTmpLRXBIeFN1Ym5xMTJBS1Q3enFSVlpYZm9NV0hBcWpVZVBWTG9jK3l0NzkvOQpVVGJpY2ZmcDFaS1VTSS90Yk9iZkkrM2MvM3VqZjFmRHBieWw0Q05iaUZIOUZpbnBCbUpXM3BMaDZQWjVTNGt2ClZJZFFzQ2ZwQkVEWWdBeEZ2bm1saHlWdldHZXBBYkllcnJscXBVOENnWUVBeHFBWmN5SW5jOTVJeWlIdmNNd3QKRy85RHZHTkJTSkdzY3pRL1pFelR5NVltbEVwb1NOeDZyeFFsb0w1K2J1aEI2YWd0ZTZ6YUY1UWgvZzZvVzZlZgpsbmdSeWd5WEdTYTJyUGNLMStkMWdyaS9iemVOK3BsVDZDb3pDUUpaUGlKd3VVM3p0andrbExRY2NJZW53blVpCll0QTRaUUhtSzJyRGc4WlBMNEdCM0M4Q2dZRUFwTU0rdWF6a3FuZ3VHbmpGRGljRE1iYXlzaEhiSTAvNjc0bUYKK0QzWVRKL2pBMnJxSldaUEh6MDhuelV2VU4vSFVLcTJLWTI2SjRFUHo2OWs1dVRqQU80YWNmSzlXaEsxL3JFMQo0Smk0d2ZFVXB5TW01aFQxdjhtVVIwMHBlNWNocm1taUwwcTFUSEJTOE14bWpWZE9oRStOM0Q2KzUySzliaTZmCjJVeEtPRjhDZ1lBLzlBc3hmRHVYRHdlb044eDZRODZlaTM1VTQxNE1NK1ZZK0l1ZWRlMU1MN1p1UCtSMTlvVEQKSjdYTERXaHZpUWMxMThYcWNRb2l6czdmcXB3YlRPM2gwNDBxMFV5Zk9TalQxb2VZcjVua1pIR2VrT0tINldGMQpXcFhCMWZSWDZ2SUxPVVloSGtEUWtCMTI2cWJIRmRXVUkrMENhOVRxeDFlNWN3YVNOOHo4cVE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURlekNDQW1PZ0F3SUJBZ0lRRXN2eFdLU3hJTnVtMUlhOTdmK1RNakFOQmdrcWhraUc5dzBCQVFzRkFEQXoKTVRFd0x3WURWUVFERXlndlEwNDlRV1J0YVhOemFXOXVJRU52Ym5SeWIyeHNaWElnVjJWaWFHOXZheUJFWlcxdgpJRU5CTUI0WERUSXpNREV4T0RBNE5UVXdObG9YRFRJNE1ERXhPVEE0TlRVd05sb3dMekV0TUNzR0ExVUVBeE1rCkwwTk9QV1JoZEhKbFpTMTNaV0pvYjI5ckxYTmxjblpsY2k1a1lYUnlaV1V1YzNaak1JSUJJakFOQmdrcWhraUcKOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXg1eVphTGlhWjBXa0FxVjBQUno0RUdKZWJXcmJCdmxZdERscApsNnFrbjVTcVd1QTRUdWpnS1VvMzRWL3I0ODdyVkd3OXhwWjdxaFpMQkM0QWJ3Y2VibkRiN1BXR2c3OU1zVE45CnczS3NQRjZiS2FxR1pHVDlJN1lWRnNkMnFCZXhlZ0hLRUlRUHNzV3pTbXlRR1FkRmV5bEg5T0ZDeWtWSlAyaXUKMzZPV1Zzc3BsekJKd0VuejI2M0NiSEFyL1hWd0FBNDF5QWl0SFAya1pmMi9mWlhkSlNCQkNCZ0o3Rm9WM01WVQpuZGFXOFdFZVhEN2E0NTJCSlpKS0MyTkQrNlhRMHR5bmhRVXhZWEswN3k3U0h2Ykg5Tk1kOG1neUtzR1VnU0RXCk1XeUFXayswSVhOV2VRaW1TZHdSN3MxcjJBSTVDbXlFVkpjMDE5N1RUdGRpbjcwUEhRSURBUUFCbzRHT01JR0wKTUE0R0ExVWREd0VCL3dRRUF3SUZvREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJdwpEQVlEVlIwVEFRSC9CQUl3QURBZkJnTlZIU01FR0RBV2dCUllML2tuQ2JjK0E5WnFoS095TE5EcUdzR2pwekFyCkJnTlZIUkVFSkRBaWdpQmtZWFJ5WldVdGQyVmlhRzl2YXkxelpYSjJaWEl1WkdGMGNtVmxMbk4yWXpBTkJna3EKaGtpRzl3MEJBUXNGQUFPQ0FRRUFLU2E3TXowSG9xMEprT3h5UjI3Um9rQVM3MVVuVDFZTG5QS2tFSVpZaHVncAowSU5yZFpTVjVDa0FPWitCWkJHRElia2lVVzdnM3lNNUJjRDM3NmV0cFpXWlNnL1JyZ1FvRkxrY2t5dnczWHVDCk43QjU1Y3gvMFozemFOVXg5d1BlSXFJd0FwZjgxQUVqSlEwNllLSFhvbE5aakNTRTdNSlQyc2VpY054MTJUMGgKUVUvdHhLRm03MEhYSlN6L0YzVWxaaUxEeGswZnd3a2FvVVk0ZDlHL0tuRlRRaDEybW05QlNHQVNIdW5zUHdMSwpNcUF3SngzU2lpSURpQk82cVNWdlB0dWhlUHp3S2MxNDYzSHk2dUs4RkVnaktqSGlUd2pMSjNlZTBUZTFOVEtCCmlWTk5VSmxKNHhBa1Fqd1dGbUYvUkdqS1dBRmtwRFAzWUZlMnYwSG1XQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
---
# Source: datree-admission-webhook/templates/cluster-scan-cronjob.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cluster-scan-job-role
rules:
- apiGroups:
- "*"
resources:
- "*"
verbs:
- "get"
- "list"
---
# Source: datree-admission-webhook/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: datree-webhook-server-read
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
rules:
- apiGroups:
- ""
resources:
- "nodes"
- "namespaces"
verbs:
- "get"
- "list"
---
# Source: datree-admission-webhook/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: datree-namespaces-update
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- update
- patch
resourceNames:
- kube-system
- datree
---
# Source: datree-admission-webhook/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: datree-validationwebhook-delete
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
rules:
- apiGroups:
- "admissionregistration.k8s.io"
resources:
- validatingwebhookconfigurations
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
resourceNames:
- datree-webhook
---
# Source: datree-admission-webhook/templates/cluster-scan-cronjob.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-scan-job-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-scan-job-role
subjects:
- kind: ServiceAccount
name: cluster-scan-job-service-account
namespace: datree
---
# Source: datree-admission-webhook/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: datree-webhook-server-read
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: datree-webhook-server-read # datree-webhook-server-read
subjects:
- kind: ServiceAccount
name: datree-webhook-server # datree-webhook-server
namespace: datree
---
# Source: datree-admission-webhook/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: datree-namespaces-update
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: datree-namespaces-update
subjects:
- kind: ServiceAccount
name: "datree-label-namespaces-hook-post-install"
namespace: "datree"
- kind: ServiceAccount
name: "datree-cleanup-namespaces-hook-pre-delete"
namespace: "datree"
---
# Source: datree-admission-webhook/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: datree-validationwebhook-delete
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: datree-validationwebhook-delete
subjects:
- kind: ServiceAccount
name: "datree-cleanup-namespaces-hook-pre-delete"
namespace: "datree"
---
# Source: datree-admission-webhook/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: datree-pods-reader
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
rules:
- apiGroups:
- ""
resources:
- "pods"
- "jobs"
verbs:
- "get"
- "list"
- "watch"
---
# Source: datree-admission-webhook/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: datree-pods-reader
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: datree-pods-reader
subjects:
- kind: ServiceAccount
name: datree-wait-server-ready-hook-post-install
namespace: "datree"
---
# Source: datree-admission-webhook/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: datree-webhook-server
namespace: datree
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
spec:
selector:
app: "datree-webhook-server"
ports:
- port: 443
targetPort: webhook-api
---
# Source: datree-admission-webhook/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: datree-webhook-server
namespace: datree
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
owner: datree
app: "datree-webhook-server"
spec:
replicas: 2
selector:
matchLabels:
app: "datree-webhook-server"
template:
metadata:
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
app: "datree-webhook-server"
spec:
serviceAccountName: datree-webhook-server
containers:
- name: server
# caution: don't change the order of the environment variables
# changing the order will harm resource patching
env:
- name: DATREE_TOKEN
value: "ef7088eb-3096-4533-97d8-f16fb3a5b0c1"
- name: DATREE_POLICY
value: Starter
- name: DATREE_VERBOSE
value: ""
- name: DATREE_OUTPUT
value: ""
- name: DATREE_NO_RECORD
value: ""
- name: DATREE_ENFORCE
value: "true"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 25000
livenessProbe:
httpGet:
path: /health
port: 8443
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8443
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 10
resources:
{}
image: "datree/admission-webhook:0.1.41"
imagePullPolicy: Always
ports:
- containerPort: 8443
name: webhook-api
volumeMounts:
- name: webhook-tls-certs
mountPath: /run/secrets/tls
readOnly: true
- name: webhook-config
mountPath: /config
readOnly: true
volumes:
- name: webhook-tls-certs
secret:
secretName: webhook-server-tls
- name: webhook-config
configMap:
name: webhook-scanning-filters
optional: true
---
# Source: datree-admission-webhook/templates/cluster-scan-cronjob.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: scan-job
namespace: datree
spec:
backoffLimit: 4
template:
spec:
serviceAccountName: cluster-scan-job-service-account
restartPolicy: Never
containers:
- name: scan-job
env:
- name: DATREE_TOKEN
value: ef7088eb-3096-4533-97d8-f16fb3a5b0c1
- name: DATREE_POLICY
value: Starter
- name: CLUSTER_NAME
value: kind-datree
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 25000
seccompProfile:
type: RuntimeDefault
image: "datree/scan-job:0.0.13"
imagePullPolicy: Always
resources:
{}
volumeMounts:
- name: webhook-config
mountPath: /config
readOnly: true
volumes:
- name: webhook-config
configMap:
name: webhook-scanning-filters
optional: true
---
# Source: datree-admission-webhook/templates/cluster-scan-cronjob.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: scan-cronjob
namespace: datree
spec:
# get the current time, subtract 5 minutes, extract the minutes and inject it into the cron expression
# if helm installation was done at 13:35, the cron expression will be 30 * * * *, which means the job will run at 14:30, 15:30, 16:30, etc.
schedule: "50 * * * *" # every hour, starting 55 minutes after helm installation
jobTemplate:
spec:
backoffLimit: 4
template:
spec:
serviceAccountName: cluster-scan-job-service-account
restartPolicy: Never
containers:
- name: scan-job
env:
- name: DATREE_TOKEN
value: ef7088eb-3096-4533-97d8-f16fb3a5b0c1
- name: DATREE_POLICY
value: Starter
- name: CLUSTER_NAME
value: kind-datree
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 25000
seccompProfile:
type: RuntimeDefault
image: "datree/scan-job:0.0.13"
imagePullPolicy: Always
resources:
{}
volumeMounts:
- name: webhook-config
mountPath: /config
readOnly: true
volumes:
- name: webhook-config
configMap:
name: webhook-scanning-filters
optional: true
---
# Source: datree-admission-webhook/templates/namespace-post-delete.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: datree-cleanup-namespaces-hook-pre-delete
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
namespace: datree
annotations:
"helm.sh/hook": pre-delete, pre-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, hook-failed
spec:
template:
metadata:
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
spec:
restartPolicy: OnFailure
serviceAccount: datree-cleanup-namespaces-hook-pre-delete
nodeSelector:
kubernetes.io/os: linux
containers:
- name: kubectl-label
image: "clastix/kubectl:v1.25"
imagePullPolicy: IfNotPresent
command:
- sh
- "-c"
- >-
kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io datree-webhook -n datree;
kubectl label ns kube-system datree datree.io/skip-;
---
# Source: datree-admission-webhook/templates/namespace-post-install.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: datree-label-namespaces-hook-post-install
namespace: datree
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded, hook-failed
spec:
template:
metadata:
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
spec:
serviceAccount: datree-label-namespaces-hook-post-install
restartPolicy: OnFailure
nodeSelector:
kubernetes.io/os: linux
containers:
- name: kubectl-label
image: "clastix/kubectl:v1.25"
imagePullPolicy: IfNotPresent
args:
- label
- ns
- kube-system
- datree
- admission.datree/validate=skip
- --overwrite
---
# Source: datree-admission-webhook/templates/wait-server-ready-post-install.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: datree-wait-server-ready-hook-post-install
namespace: datree
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded, hook-failed
spec:
template:
metadata:
name: datree-wait-server-ready-hook-post-install
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
spec:
serviceAccountName: datree-wait-server-ready-hook-post-install
restartPolicy: Never
containers:
- name: kubectl-client
image: "clastix/kubectl:v1.25"
imagePullPolicy: IfNotPresent
command:
- sh
- "-c"
- >-
kubectl wait --for=condition=ready pod -l app=datree-webhook-server --timeout="180s"
---
# Source: datree-admission-webhook/templates/webhook-with-cert-secrets.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: datree-webhook
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-weight": "-5"
webhooks:
- name: webhook-server.datree.svc
sideEffects: None
timeoutSeconds: 30
failurePolicy: Ignore
admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: datree-webhook-server
namespace: datree
path: "/validate"
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURVakNDQWpxZ0F3SUJBZ0lSQUs4TTRaaDl3TzJicFJieUY5VVR1UU13RFFZSktvWklodmNOQVFFTEJRQXcKTXpFeE1DOEdBMVVFQXhNb0wwTk9QVUZrYldsemMybHZiaUJEYjI1MGNtOXNiR1Z5SUZkbFltaHZiMnNnUkdWdApieUJEUVRBZUZ3MHlNekF4TVRnd09EVTFNRFphRncweU9EQXhNVGt3T0RVMU1EWmFNRE14TVRBdkJnTlZCQU1UCktDOURUajFCWkcxcGMzTnBiMjRnUTI5dWRISnZiR3hsY2lCWFpXSm9iMjlySUVSbGJXOGdRMEV3Z2dFaU1BMEcKQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURmSGE5Znh6dDZPK0tnYXQ1dC9GeEdIVDFpTHFkcQpwSy84RElNSWlCU2dvbW9lQklCeE9Zd1JTL2dhcTlvQVc2TDBGNFJ3aGhDeHJNcGdoMUpFVFcrWHVlakg2RW9sCjE4UlhGeGhTRzlud3RrSW9MdlVUdVYxOTlGM2Jjai9IR1FNeVIzNUx0QXhaSFdMREpJMkFQbnVzb0NaS29GSlIKajUwaG1rQnlzSURSVDVseCtiR1RrcEFDMkc4NFZ6cFpuQ1pYZUludzNEM2dMT0pVSFlUY2RQb0ozMWVtZ2U2Sgpydk01ZFVtT0lFNE9lWk42QVhtSDE3NWpNdk9yeXFZQXFpMTFJN1hrUDhoZU1nSWRyQVhVQmZHd0lESVJHWnZ0CnFoK1AyU3kzZWxzeEZqR2JjeFNlL3FJSFBTVW4ycldyU0hrRjdLNSs4MGhqbFp6TEtrYThkMzdCQWdNQkFBR2oKWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDcERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjREFRWUlLd1lCQlFVSApBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVV0MvNUp3bTNQZ1BXYW9TanNpelE2aHJCCm82Y3dEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS2lEdDZoVkZVN1RTekkvQWV4bXd0b3I3eUo4Qmg4L2Y1ZVIKTWJCSGN3dFRrTUpIazFuVUV2WG5GQS9xK1BDdzd3eXdUaHp0T0hwUkM1N3QvWkMwYkF5WUtRV1JJVEx5NWpDVwpUbDJRL1l5UkdKVlJjT0xQUWhWT1krcW1BdzluVklVTGRROWs0SEtPeUM0T1g2TmRCUktOazdjdlBzakpOc1M5CjRreUtCVUQyelArUGpGdDVEZUFFZXpRSmRwR2xiNXVyQnNHUldCZC8zODNYa01pOG5sSWhtbUFxVVlpcjFsc3cKRlNEWS9saDc5RDg0bTUzdFlVc0R2UjdwZ0pKbUtCOWRBUGJxOG1jQzdRUm5jd0tQSjdhUUJjTlpvNU1IZ3FFNAptelRlMnNybGhqbXcvSEFnMGdiM0RnME5hQzNzYlpTUytzeUhyVllyWVdQSHRWdDk2ZXc9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
namespaceSelector:
matchExpressions:
- key: admission.datree/validate
operator: DoesNotExist
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: ["*"]
apiVersions: ["*"]
resources: ["*"]

View File

@ -1,718 +0,0 @@
---
# Source: datree-admission-webhook/templates/cluster-scan-cronjob.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: cluster-scan-job-service-account
namespace: datree
---
# Source: datree-admission-webhook/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: datree-webhook-server
namespace: datree
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
---
# Source: datree-admission-webhook/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: datree-label-namespaces-hook-post-install
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
---
# Source: datree-admission-webhook/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: datree-cleanup-namespaces-hook-pre-delete
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
---
# Source: datree-admission-webhook/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: datree-wait-server-ready-hook-post-install
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
---
# Source: datree-admission-webhook/templates/webhook-with-cert-secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: datree-ca-tls
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
namespace: datree
type: kubernetes.io/tls
data:
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBbjBET0hhcklRU1A3Skc1Y1dEZWFmSmFVSHM2YklMTEFtMEF4Q1RFbVpud29BUTlHCmFEM01uNklqd3BGaVV4UGJMcEtqTUtRZm5jYTVLdWhleHZ2LzlNOGN4TFVCK0RGZnhlYkZvaGdoZHhFam94NnEKS0JmcVVqaURhY2xLMUJGWEtnQnZHZjFWczIxbWZwLzA2QnI1alRSTEJZdVZrWmEwNjZJK0drSkpVQ1c1MGpwcApYREdtdVhUaEYwQVhNT01RQS9Nb0tQTlBrYVA1UUZ6bUtyUFkxencxQ0xzVTk3eXRzK2d4N01ZM1dsVHRDWnVVCjYxNnRhNE1qSmNMRXF2ZVNVblhsUUNFMTBJYnJpNTl5eEtZTzRhUHNRUlpBaUd0WWhjWXVhNHdWdXpJK0xTZlcKN202NHlNOWNpN1Z4UlVjemNqRlM5NWR6R1hKWk9VVVB3YUduMndJREFRQUJBb0lCQVFDVHBjaXpWcmh0TklmTwpnZ2RadnN1YlFSdzQ1OEtKY1ZFRFgyTlhLMXQzM3hwVHlTNjB6TDhmTFh0TUUvQitKOFdwaTBpRGUxYll0L3JMCkhqOW82eENtanpNVDZPSFhreWRCV3pEV2xOcktBbmp3N2loQ0hkSWd3c2FMMkpWb3dsNzIwUW93cFdERWh1UmsKOTdaZlQwc1pNR2R4ejdVdkV2UFFGMDdPbDdCUy9nQzc0dnlaYTR4VmptdXBKNld5Y1VOTlR0WG42MUVxLzVjVwpTL3ZzRFNxdzlCaXIvRUUrL3N3K3lSdnlXeXIzMC9iZm44ZVY0blZnZmJic2U1Z1B3dmVNYlJOR2R3cjNzL1hzClcycW5tZ3NLWFg4b3lmUjlWUy82alozNnNzY0NLckx3bFhNQTRlcUhEWXJtOFZDZk5sc0ZFMnFhOVpOd21ublUKeHV6T2V4R3hBb0dCQU1nRllSakRyK283NDNFL3RlUXJLcTViN25XcktRbjdxdWpIVXg5b1pQYzFvajdDSUdndApITVQyaTM4eU1tbEw1ZXZ0cTd5NjNUcXA1ZGdIVlZaRjZqTENrZnBBTlhLaDBHL3FlaFFkMTNnMlZYTVBWTFRSCnUvUWdha2kxdEYwWkkyOTEzZU1zazdscVJOVGJxOGVTbmpPdkF6NXFtTXY3TU9DZ2JuQTJEbVhUQW9HQkFNdlMKbHFhc3E3RlNIMUVDNXE0b1pURTlYVFUxTkNRM09oSGtwTTBkMjJURmp0bGVWVnFPZ0c1Y0cwMHlTN2dyTGtZRwowbGV6Tm1TSVhFZ1VqYjZSRjg3aTlieXFIeFQ2cXNlNEU3LzlYNDM3NWkxTHlnSkxNY0xEMGo2aUpxdUJQZ01WCjBMT1BFdUZNczdmL3FyY2ozSHpyTlVMT1pFZEdYOTBOVGtGaHpralpBb0dBVkhWNUIzVHgzZzFGdjdjd1BkVkEKWTNsc0dvR1loWitnRGtURVE1bllNRTZVWUwybDQzZFJFNVlyVngxQ0RoWS9Vcno3N0doWExBTTdpMW1sWGhXTgppN3QrMmxXc2UrZjUxSmdFem1PL2JRSThXS1pibFRLT2s4bndOeDJLdUZqNkRvR05uUFJndUVVNEpVMVFucWU1Clo0ZDU3aXdpc3RjeFQxaE82ZERaaVlNQ2dZQkl4eXdsM1pmODIrNzB0VTE3T0U5UnNyQ2FkQ0huSUpVcW1ITEUKRHZvczFHSDZlYldPZlQyY3FtVFJQcmxNekpaY1NNbElxV1F0cDRjVDhjcmZGZDNqY0tVQU5kcWRXaGdxOGk2VApLank1YlEyMmRNNXYzVHVxYU5Pa3E2K1ZJN1BwMUJ0T1VqTVNvWm0yaEtNSGU5V2FBVDVtV1YzekdVelhtSTJ0CnlPZW9tUUtCZ0NtYUJadUdpaEYyTlJORjBRUkhaRmdXRWdwRk1rWFFVcHFSOHVFNlRTTlFJUWVSSEYzaXFhbzMKSmsvYjgzbzZlTUlTMTN0RDNWN0JMY1J2ckhQK0pBcG5sNk5BeXUrUVMzOVpkOVp4d0RGOUZueVJxRVg4ZE9uZApZWkVoMXNFTEdyRlVNa1hkRVZUNFFsQUN1Q01sUmQ0NGNaZ3lPSFZzMWlIZDZyUUJubjUyCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURVakNDQWpxZ0F3SUJBZ0lSQUxKTmg1YnVYN1A0V1ZkcndXWWQzRG93RFFZSktvWklodmNOQVFFTEJRQXcKTXpFeE1DOEdBMVVFQXhNb0wwTk9QVUZrYldsemMybHZiaUJEYjI1MGNtOXNiR1Z5SUZkbFltaHZiMnNnUkdWdApieUJEUVRBZUZ3MHlNekF4TVRnd05URXhNVGxhRncweU9EQXhNVGt3TlRFeE1UbGFNRE14TVRBdkJnTlZCQU1UCktDOURUajFCWkcxcGMzTnBiMjRnUTI5dWRISnZiR3hsY2lCWFpXSm9iMjlySUVSbGJXOGdRMEV3Z2dFaU1BMEcKQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUNmUU00ZHFzaEJJL3NrYmx4WU41cDhscFFlenBzZwpzc0NiUURFSk1TWm1mQ2dCRDBab1BjeWZvaVBDa1dKVEU5c3VrcU13cEIrZHhya3E2RjdHKy8vMHp4ekV0UUg0Ck1WL0Y1c1dpR0NGM0VTT2pIcW9vRitwU09JTnB5VXJVRVZjcUFHOFovVld6YldaK24vVG9Hdm1OTkVzRmk1V1IKbHJUcm9qNGFRa2xRSmJuU09tbGNNYWE1ZE9FWFFCY3c0eEFEOHlnbzgwK1JvL2xBWE9ZcXM5alhQRFVJdXhUMwp2SzJ6NkRIc3hqZGFWTzBKbTVUclhxMXJneU1sd3NTcTk1SlNkZVZBSVRYUWh1dUxuM0xFcGc3aG8reEJGa0NJCmExaUZ4aTVyakJXN01qNHRKOWJ1YnJqSXoxeUx0WEZGUnpOeU1WTDNsM01aY2xrNVJRL0JvYWZiQWdNQkFBR2oKWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDcERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjREFRWUlLd1lCQlFVSApBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVeG82MXp0eEUrbEdia2JGcGpUOU0wTWVnCkgzWXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ2lWSVhqREJPcXU5elR0d1FUMkFpZkJ2eFlXTWM4bXJoVnUKcWMzMnJUT0VRQ05vUkpQYkxZM01KeUFwZjJtOUxJNEN2SU1SMTIwc0ttYzRQTXE5ZzRCb291Yng0aWNsOFl1OAp1bmRuVWhmODAwSUp5YUthMittZjgzZjJmcmZXSlF1NzVMMnRrYys4WWtFWFZnR2cyazdxVXZkeThzdzRUTEZICmlPMktvVm5Xeit4R2FQb25BK09OK01lSUxDOGgrNlVNdjM5a2pTb29TV1M3amFHVDZXS2Z3aFExa1JJM2JIZS8KL05ZZHpjVkJibXJ0eFg1K1RvcmxNOSswcnoybnBwNkN5MlFSZHpuM3hKWHNGVk4wTml6V3pVZWErLzVEVndwSQpBeE1uSXBJNmpzME02cVJ4VUdZVHFOdTk1YkJSanVwQTFwVDJDZGFhYnp5NU0xK2VTaTg9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
---
# Source: datree-admission-webhook/templates/webhook-with-cert-secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: webhook-server-tls
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
namespace: datree
annotations:
self-signed-cert: "true"
type: kubernetes.io/tls
data:
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcDk2Yzc2eTREMlVybGtVNlZDMmZzZytaWS9VWFVmL1NvZFZyVlBFbU5zMUpNbGp0CmZOYSt2RVZXNllqQ3cyQnc2RExBZURGeWw2SWsvYjR1TGpHOEYwSEhhK3RjMk5Mc2tnLzJhUTQrM1NnWkxsM3UKOXJmaFNDMWUwMkVNWHh0bnIyZU82RW1EVlE1SjFCSzVWSG1Qa1VYMHI5Mm84TXVSOGZERytPYjFPUW82TWtocgo1WHZNWW0wMmVURnlwLzc3alNzN1JLZEZnOXF0Z1VVcHhYejVOVnhLNDlRaVpleWIvSklUbnc4T0R6Qk5ac0FZCjJScStiZjhFUU95MktFNDZmNWhSN2Vhb1VsRFp3RnFYRzRkRkhzRmNkRlNhYzh5SXJJUXlNejFxVEZSanFyYjQKU2JWVjNzY0RFZ3lwUVh0Z3NHcjFVbUFwcDJCVXRVSDhHUm1BZFFJREFRQUJBb0lCQUFxTHBJWTE3blloSCtUWAp3bnRKUm0vMEpPbXZtdUJ1MXJlTjVhazNZUFF1WHp2SGRGdlVUYlVjRWdLbnNieCtVWGwwdnJ5T05xbXA2UEw3CndJRHNaT2w5RzE3L01SejUyeHl0M2dmcGVpK0FkbHlBVUNPMWwzUm1UVCt3S0F2TmQrei83MjFPT083ZDcrdGYKcGI3VnlCd1RMZlRpVXR1Vm5qeDVxTFk0SkEyS0tkdVJnZXg4R0lVcXNtQncrcms0T04xRVJFOWZKQjVveHV6bgo1VmhnU2VhaVVWVXRrYmFtYjBLNDBpTVRKSUh2bWlMTGwyTkxaeTVkSkg3MFlaQkh6bEtLaFpqY3huaXBVOURECkVpSmp2TllkUXVlMisyNlB4bENpNC8rdDNtQ015T01LcjFqZzJ0TnZrQzNMNjBzb1BCQUZ6S2VMSW41dVZLcWsKS2RmY3BrRUNnWUVBMmdVanlTa2M2dXhlQ1R5cXV2WWtudGRLRlV1eEF4TTk2SGlNZG1Sb2tQSmkzUjdvQk5SRgpuZEVLV3pGcEFBc2RnZzlEdFprK3lYTkowWGxtZUV4WEU2QnFBemtOZ1Fic0NTMG5TV1ovazFDdjdCNUdYbFRJCkxMNk5SaS9wK2NMaWxtS1d1SlE0aW5mQi9nZ3QvbXVLVDIrWDRKVFVMK2haNTlqaHZqRlBQOUVDZ1lFQXhSejAKQ3FRZVpnUGhrR2dud2JHTG9ySXo0Q3BlTWkwbStrZm9vSFdQWW12Z1AxbnFJZWh5dERvVFUzUFpjVUNSbzdVbAowZkJubGhyMXNHRWVuWlBBQlpWUnZmeWs5blNDRW9zdnBDd3RYTUFTUHZjOGZSRVRXam1nVTRKZTBMU0dxdWdGClBQQWNubDE3VC9ITXNQeWwwSUd3Sjc4ZFNtd1dGcjNiQlFJUDQyVUNnWUJjcWhpV3RHbTlFKyszLzFnVmxPN2wKc0YybGhZRmI3RDdBNHhQWWNqN2JkSm8rbjVkQURqVDBxZGU4QU5rL0VucGRRRDJvSHRWSDdEOXcwQ2VVYytZQwp5b2lrakFoSVVmZmF3cDFUSGtTVkNaTnNTVVhoYkNtVWt2MGEydHlZc3BONkZiYzRCbyt0a3M4YU9NSEx4RXVLCkRjVkF5Q0VUcDY4bTB0REg5TTlaTVFLQmdRQ2lYK1o5T1pNOUVHZHBFUlBuRUgzcHlZaklXYjU4OFFzUjA5akQKRGZUTzYvU3Yyejd2TGRBSHZXdWNMR3ZzU25kdTkxT3ZiSzI0VG44a0MrMHZlNzRNRzJSWjhGeG9GYlBzMkxHbgpPU2twSmFRaU1JS291RDlMN1Bxd3NFMncrWFdTSmszaVZCNFBLd3pnMzF4eVU3MjRWSTByUU5rOUxHckoweDR3Ck12R3ByUUtCZ0JYejVhVUIrQk8wQVBna1ZZZFo2TElIRHJod3lnc0Y4T0VYcFEzbHdkM0pIS1Y0VVpOUVQya0wKZXhZK3g3Z0FadytKTVczdmpkaVVpVzV6cjBESUlNMDF1QitQcGFRemZ3QkM2Qy8vSVUrZy9Sa1R0TlJ3NzRkaAp3QWN1azRMRWxiNTVNT1VjRlJ2d2EvWXY3NWpRK3BGOUYwa3JNS1U2bDhReEQyaFhCcjJUCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURlekNDQW1PZ0F3SUJBZ0lRYzlNU2dBY1VhcWpMdG9vYUYrdlVxREFOQmdrcWhraUc5dzBCQVFzRkFEQXoKTVRFd0x3WURWUVFERXlndlEwNDlRV1J0YVhOemFXOXVJRU52Ym5SeWIyeHNaWElnVjJWaWFHOXZheUJFWlcxdgpJRU5CTUI0WERUSXpNREV4T0RBMU1URXhPVm9YRFRJNE1ERXhPVEExTVRFeE9Wb3dMekV0TUNzR0ExVUVBeE1rCkwwTk9QV1JoZEhKbFpTMTNaV0pvYjI5ckxYTmxjblpsY2k1a1lYUnlaV1V1YzNaak1JSUJJakFOQmdrcWhraUcKOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXA5NmM3Nnk0RDJVcmxrVTZWQzJmc2crWlkvVVhVZi9Tb2RWcgpWUEVtTnMxSk1sanRmTmErdkVWVzZZakN3MkJ3NkRMQWVERnlsNklrL2I0dUxqRzhGMEhIYSt0YzJOTHNrZy8yCmFRNCszU2daTGwzdTlyZmhTQzFlMDJFTVh4dG5yMmVPNkVtRFZRNUoxQks1VkhtUGtVWDByOTJvOE11UjhmREcKK09iMU9RbzZNa2hyNVh2TVltMDJlVEZ5cC83N2pTczdSS2RGZzlxdGdVVXB4WHo1TlZ4SzQ5UWlaZXliL0pJVApudzhPRHpCTlpzQVkyUnErYmY4RVFPeTJLRTQ2ZjVoUjdlYW9VbERad0ZxWEc0ZEZIc0ZjZEZTYWM4eUlySVF5Ck16MXFURlJqcXJiNFNiVlYzc2NERWd5cFFYdGdzR3IxVW1BcHAyQlV0VUg4R1JtQWRRSURBUUFCbzRHT01JR0wKTUE0R0ExVWREd0VCL3dRRUF3SUZvREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJdwpEQVlEVlIwVEFRSC9CQUl3QURBZkJnTlZIU01FR0RBV2dCVEdqclhPM0VUNlVadVJzV21OUDB6UXg2QWZkakFyCkJnTlZIUkVFSkRBaWdpQmtZWFJ5WldVdGQyVmlhRzl2YXkxelpYSjJaWEl1WkdGMGNtVmxMbk4yWXpBTkJna3EKaGtpRzl3MEJBUXNGQUFPQ0FRRUFDSThNc1hMejcyRThwOUE2MGdqVzZWdkcrdlN3ZzczQys3TDgxYUIvd0IzTwpxVnVYaVRHQmgrZmI2UlJLbzQxS25Pa0RkRlNCM0lWTHFJaHQvOU5uVHl0eWlVSFRmcEhvVGUwak1meFRXeGQ0CndVMnhLeWNRVmVBcFkzSlFpMWU4MEhZU3NFQ3NMVDBlRloxNmcyVmE0bUhvUnpHOWswTk5LL2tVc0xRL3lUMlcKUk5vWjcwV0NNdEV0MlE0eUU3NXBDdXBnc3B5b1J2SUNScjczTiszOVBVVDA2SndZSnZnaTBVQ3RjMlhVK2I5SQpVYU5TK3JSQzRFVm5qM003VVgrZVZXKzJFNXdpT3NKU1g0Z0M3S0kxcTNTcnlveXNlbVJJYXNaZjBkaENmNjI0CkE2ZmtZdGRVMmN3SzBrMVhnVC9oaHVZS2FpRDc1TjlTSWYxTGZtZklrUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
---
# Source: datree-admission-webhook/templates/cluster-scan-cronjob.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cluster-scan-job-role
rules:
- apiGroups:
- "*"
resources:
- "*"
verbs:
- "get"
- "list"
---
# Source: datree-admission-webhook/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: datree-webhook-server-read
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
rules:
- apiGroups:
- ""
resources:
- "nodes"
- "namespaces"
verbs:
- "get"
- "list"
---
# Source: datree-admission-webhook/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: datree-namespaces-update
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- update
- patch
resourceNames:
- kube-system
- datree
---
# Source: datree-admission-webhook/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: datree-validationwebhook-delete
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
rules:
- apiGroups:
- "admissionregistration.k8s.io"
resources:
- validatingwebhookconfigurations
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
resourceNames:
- datree-webhook
---
# Source: datree-admission-webhook/templates/cluster-scan-cronjob.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-scan-job-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-scan-job-role
subjects:
- kind: ServiceAccount
name: cluster-scan-job-service-account
namespace: datree
---
# Source: datree-admission-webhook/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: datree-webhook-server-read
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: datree-webhook-server-read # datree-webhook-server-read
subjects:
- kind: ServiceAccount
name: datree-webhook-server # datree-webhook-server
namespace: datree
---
# Source: datree-admission-webhook/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: datree-namespaces-update
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: datree-namespaces-update
subjects:
- kind: ServiceAccount
name: "datree-label-namespaces-hook-post-install"
namespace: "datree"
- kind: ServiceAccount
name: "datree-cleanup-namespaces-hook-pre-delete"
namespace: "datree"
---
# Source: datree-admission-webhook/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: datree-validationwebhook-delete
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: datree-validationwebhook-delete
subjects:
- kind: ServiceAccount
name: "datree-cleanup-namespaces-hook-pre-delete"
namespace: "datree"
---
# Source: datree-admission-webhook/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: datree-pods-reader
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
rules:
- apiGroups:
- ""
resources:
- "pods"
- "jobs"
verbs:
- "get"
- "list"
- "watch"
---
# Source: datree-admission-webhook/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: datree-pods-reader
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: datree-pods-reader
subjects:
- kind: ServiceAccount
name: datree-wait-server-ready-hook-post-install
namespace: "datree"
---
# Source: datree-admission-webhook/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: datree-webhook-server
namespace: datree
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
spec:
selector:
app: "datree-webhook-server"
ports:
- port: 443
targetPort: webhook-api
---
# Source: datree-admission-webhook/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: datree-webhook-server
namespace: datree
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
owner: datree
app: "datree-webhook-server"
spec:
replicas: 2
selector:
matchLabels:
app: "datree-webhook-server"
template:
metadata:
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
app: "datree-webhook-server"
spec:
serviceAccountName: datree-webhook-server
containers:
- name: server
# caution: don't change the order of the environment variables
# changing the order will harm resource patching
env:
- name: DATREE_TOKEN
value: "ef7088eb-3096-4533-97d8-f16fb3a5b0c1"
- name: DATREE_POLICY
value: Starter
- name: DATREE_VERBOSE
value: ""
- name: DATREE_OUTPUT
value: ""
- name: DATREE_NO_RECORD
value: ""
- name: DATREE_ENFORCE
value: ""
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 25000
livenessProbe:
httpGet:
path: /health
port: 8443
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8443
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 10
resources:
{}
image: "datree/admission-webhook:0.1.41"
imagePullPolicy: Always
ports:
- containerPort: 8443
name: webhook-api
volumeMounts:
- name: webhook-tls-certs
mountPath: /run/secrets/tls
readOnly: true
- name: webhook-config
mountPath: /config
readOnly: true
volumes:
- name: webhook-tls-certs
secret:
secretName: webhook-server-tls
- name: webhook-config
configMap:
name: webhook-scanning-filters
optional: true
---
# Source: datree-admission-webhook/templates/cluster-scan-cronjob.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: scan-job
namespace: datree
spec:
backoffLimit: 4
template:
spec:
serviceAccountName: cluster-scan-job-service-account
restartPolicy: Never
containers:
- name: scan-job
env:
- name: DATREE_TOKEN
value: ef7088eb-3096-4533-97d8-f16fb3a5b0c1
- name: DATREE_POLICY
value: Starter
- name: CLUSTER_NAME
value: kind-datree
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 25000
seccompProfile:
type: RuntimeDefault
image: "datree/scan-job:0.0.13"
imagePullPolicy: Always
resources:
{}
volumeMounts:
- name: webhook-config
mountPath: /config
readOnly: true
volumes:
- name: webhook-config
configMap:
name: webhook-scanning-filters
optional: true
---
# Source: datree-admission-webhook/templates/cluster-scan-cronjob.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: scan-cronjob
namespace: datree
spec:
# get the current time, subtract 5 minutes, extract the minutes and inject it into the cron expression
# if helm installation was done at 13:35, the cron expression will be 30 * * * *, which means the job will run at 14:30, 15:30, 16:30, etc.
schedule: "06 * * * *" # every hour, starting 55 minutes after helm installation
jobTemplate:
spec:
backoffLimit: 4
template:
spec:
serviceAccountName: cluster-scan-job-service-account
restartPolicy: Never
containers:
- name: scan-job
env:
- name: DATREE_TOKEN
value: ef7088eb-3096-4533-97d8-f16fb3a5b0c1
- name: DATREE_POLICY
value: Starter
- name: CLUSTER_NAME
value: kind-datree
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 25000
seccompProfile:
type: RuntimeDefault
image: "datree/scan-job:0.0.13"
imagePullPolicy: Always
resources:
{}
volumeMounts:
- name: webhook-config
mountPath: /config
readOnly: true
volumes:
- name: webhook-config
configMap:
name: webhook-scanning-filters
optional: true
---
# Source: datree-admission-webhook/templates/namespace-post-delete.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: datree-cleanup-namespaces-hook-pre-delete
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
namespace: datree
annotations:
"helm.sh/hook": pre-delete, pre-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, hook-failed
spec:
template:
metadata:
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
spec:
restartPolicy: OnFailure
serviceAccount: datree-cleanup-namespaces-hook-pre-delete
nodeSelector:
kubernetes.io/os: linux
containers:
- name: kubectl-label
image: "clastix/kubectl:v1.25"
imagePullPolicy: IfNotPresent
command:
- sh
- "-c"
- >-
kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io datree-webhook -n datree;
kubectl label ns kube-system datree datree.io/skip-;
---
# Source: datree-admission-webhook/templates/namespace-post-install.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: datree-label-namespaces-hook-post-install
namespace: datree
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded, hook-failed
spec:
template:
metadata:
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
spec:
serviceAccount: datree-label-namespaces-hook-post-install
restartPolicy: OnFailure
nodeSelector:
kubernetes.io/os: linux
containers:
- name: kubectl-label
image: "clastix/kubectl:v1.25"
imagePullPolicy: IfNotPresent
args:
- label
- ns
- kube-system
- datree
- admission.datree/validate=skip
- --overwrite
---
# Source: datree-admission-webhook/templates/wait-server-ready-post-install.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: datree-wait-server-ready-hook-post-install
namespace: datree
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded, hook-failed
spec:
template:
metadata:
name: datree-wait-server-ready-hook-post-install
labels:
app.kubernetes.io/name: datree-admission-webhook
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "datree-webhook"
app.kubernetes.io/version: 0.1.41
app.kubernetes.io/part-of: "datree"
meta.helm.sh/release-name: "datree-admission-webhook"
meta.helm.sh/release-namespace: "datree"
helm.sh/chart: datree-admission-webhook-0.3.22
spec:
serviceAccountName: datree-wait-server-ready-hook-post-install
restartPolicy: Never
containers:
- name: kubectl-client
image: "clastix/kubectl:v1.25"
imagePullPolicy: IfNotPresent
command:
- sh
- "-c"
- >-
kubectl wait --for=condition=ready pod -l app=datree-webhook-server --timeout="180s"
---
# Source: datree-admission-webhook/templates/webhook-with-cert-secrets.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: datree-webhook
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-weight": "-5"
webhooks:
- name: webhook-server.datree.svc
sideEffects: None
timeoutSeconds: 30
failurePolicy: Ignore
admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: datree-webhook-server
namespace: datree
path: "/validate"
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURVakNDQWpxZ0F3SUJBZ0lSQUxKTmg1YnVYN1A0V1ZkcndXWWQzRG93RFFZSktvWklodmNOQVFFTEJRQXcKTXpFeE1DOEdBMVVFQXhNb0wwTk9QVUZrYldsemMybHZiaUJEYjI1MGNtOXNiR1Z5SUZkbFltaHZiMnNnUkdWdApieUJEUVRBZUZ3MHlNekF4TVRnd05URXhNVGxhRncweU9EQXhNVGt3TlRFeE1UbGFNRE14TVRBdkJnTlZCQU1UCktDOURUajFCWkcxcGMzTnBiMjRnUTI5dWRISnZiR3hsY2lCWFpXSm9iMjlySUVSbGJXOGdRMEV3Z2dFaU1BMEcKQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUNmUU00ZHFzaEJJL3NrYmx4WU41cDhscFFlenBzZwpzc0NiUURFSk1TWm1mQ2dCRDBab1BjeWZvaVBDa1dKVEU5c3VrcU13cEIrZHhya3E2RjdHKy8vMHp4ekV0UUg0Ck1WL0Y1c1dpR0NGM0VTT2pIcW9vRitwU09JTnB5VXJVRVZjcUFHOFovVld6YldaK24vVG9Hdm1OTkVzRmk1V1IKbHJUcm9qNGFRa2xRSmJuU09tbGNNYWE1ZE9FWFFCY3c0eEFEOHlnbzgwK1JvL2xBWE9ZcXM5alhQRFVJdXhUMwp2SzJ6NkRIc3hqZGFWTzBKbTVUclhxMXJneU1sd3NTcTk1SlNkZVZBSVRYUWh1dUxuM0xFcGc3aG8reEJGa0NJCmExaUZ4aTVyakJXN01qNHRKOWJ1YnJqSXoxeUx0WEZGUnpOeU1WTDNsM01aY2xrNVJRL0JvYWZiQWdNQkFBR2oKWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDcERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjREFRWUlLd1lCQlFVSApBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVeG82MXp0eEUrbEdia2JGcGpUOU0wTWVnCkgzWXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ2lWSVhqREJPcXU5elR0d1FUMkFpZkJ2eFlXTWM4bXJoVnUKcWMzMnJUT0VRQ05vUkpQYkxZM01KeUFwZjJtOUxJNEN2SU1SMTIwc0ttYzRQTXE5ZzRCb291Yng0aWNsOFl1OAp1bmRuVWhmODAwSUp5YUthMittZjgzZjJmcmZXSlF1NzVMMnRrYys4WWtFWFZnR2cyazdxVXZkeThzdzRUTEZICmlPMktvVm5Xeit4R2FQb25BK09OK01lSUxDOGgrNlVNdjM5a2pTb29TV1M3amFHVDZXS2Z3aFExa1JJM2JIZS8KL05ZZHpjVkJibXJ0eFg1K1RvcmxNOSswcnoybnBwNkN5MlFSZHpuM3hKWHNGVk4wTml6V3pVZWErLzVEVndwSQpBeE1uSXBJNmpzME02cVJ4VUdZVHFOdTk1YkJSanVwQTFwVDJDZGFhYnp5NU0xK2VTaTg9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
namespaceSelector:
matchExpressions:
- key: admission.datree/validate
operator: DoesNotExist
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: ["*"]
apiVersions: ["*"]
resources: ["*"]

View File

@ -7,7 +7,7 @@
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
``` ```
kind create cluster --name helm --image kindest/node:v1.19.1 kind create cluster --name helm --image kindest/node:v1.26.0
``` ```
# Getting Started with Helm # Getting Started with Helm
@ -32,7 +32,7 @@ export KUBE_EDITOR="nano"
# test cluster access: # test cluster access:
/work # kubectl get nodes /work # kubectl get nodes
NAME STATUS ROLES AGE VERSION NAME STATUS ROLES AGE VERSION
helm-control-plane Ready master 26m v1.19.1 helm-control-plane Ready master 26m v1.26.0
``` ```

View File

@ -0,0 +1,403 @@
# Introduction to NGINX Ingress Controller
## Create a kubernetes cluster
In this guide we we''ll need a Kubernetes cluster for testing. Let's create one using [kind](https://kind.sigs.k8s.io/) </br>
```
kind create cluster --name nginx-ingress --image kindest/node:v1.23.5
```
See cluster up and running:
```
kubectl get nodes
NAME STATUS ROLES AGE VERSION
nginx-ingress-control-plane Ready control-plane,master 2m12s v1.23.5
```
## Run a container to work in
### run Alpine Linux:
```
docker run -it --rm -v ${HOME}:/root/ -v ${PWD}:/work -w /work --net host alpine sh
```
### install some tools
```
# install curl
apk add --no-cache curl
# install kubectl
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
chmod +x ./kubectl
mv ./kubectl /usr/local/bin/kubectl
# install helm
curl -o /tmp/helm.tar.gz -LO https://get.helm.sh/helm-v3.10.1-linux-amd64.tar.gz
tar -C /tmp/ -zxvf /tmp/helm.tar.gz
mv /tmp/linux-amd64/helm /usr/local/bin/helm
chmod +x /usr/local/bin/helm
```
### test cluster access:
```
/work # kubectl get nodes
NAME STATUS ROLES AGE VERSION
nginx-ingress-control-plane Ready control-plane,master 3m26s v1.23.5
```
## NGINX Ingress Controller
We'll start with the documentation as always </br>
You can find the [Kubernetes NGINX documentation here](https://kubernetes.github.io/ingress-nginx/) </br>
First thing we do is check the compatibility matrix to ensure we are deploying a compatible version of NGINX Ingress on our Kubernetes cluster </br>
The Documentation also has a link to the [GitHub Repo](https://github.com/kubernetes/ingress-nginx/) which has a compatibility matrix </br>
### Get the installation YAML
The controller ships as a `helm` chart, so we can grab version `v1.5.1` as per the compatibility
matrix. </br>
From our container we can do this:
```
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm search repo ingress-nginx --versions
```
From the app version we select the version that matches the compatibility matrix. </br>
```
NAME CHART VERSION APP VERSION DESCRIPTION
ingress-nginx/ingress-nginx 4.4.0 1.5.1 Ingress controller for Kubernetes using NGINX a...
```
Now we can use `helm` to install the chart directly if we want. </br>
Or we can use `helm` to grab the manifest and explore its content. </br>
We can also add that manifest to our git repo if we are using a GitOps workflow to deploy it. </br>
```
CHART_VERSION="4.4.0"
APP_VERSION="1.5.1"
mkdir ./kubernetes/ingress/controller/nginx/manifests/
helm template ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--version ${CHART_VERSION} \
--namespace ingress-nginx \
> ./kubernetes/ingress/controller/nginx/manifests/nginx-ingress.${APP_VERSION}.yaml
```
### Deploy the Ingress controller
```
kubectl create namespace ingress-nginx
kubectl apply -f ./kubernetes/ingress/controller/nginx/manifests/nginx-ingress.${APP_VERSION}.yaml
```
### Check the installation
```
kubectl -n ingress-nginx get pods
```
The traffic for our cluster will come in over the Ingress service </br>
Note that we dont have load balancer capability in `kind` by default, so our `LoadBalancer` is pending:
```
kubectl -n ingress-nginx get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx-controller LoadBalancer 10.96.130.21 <pending> 80:31011/TCP,443:31772/TCP 26m
ingress-nginx-controller-admission ClusterIP 10.96.125.210 <none> 443/TCP 26m
```
For testing purposes, we will simply setup `port-forward`ing </br>
If you are running in the cloud, you will get a real IP address. </br>
```
kubectl -n ingress-nginx port-forward svc/ingress-nginx-controller 443
```
We can reach our controller on [https://localhost/](https://localhost/) </br>
It's important to understand that Ingress runs on two ports `80` and `443` </br>
NGINX Ingress creates a fake certificate which is served for default `HTTPS` traffic on port `443`. </br>
If you look in the browser you will notice the name of the certificate `Common Name (CN) Kubernetes Ingress Controller Fake Certificate`
## Features
Now before we take a look at the features we'll need two web applications that we can use as our test harness, `service-a` and `service-b` </br>
In this demo, i have a deployment that runs a pod and a service that exposes the pod on port 80. </br>
This is a typical scenario where you have a micrservice you want to expose publicly. </br>
### Deploy Service A & B
Will deploy these two apps to the default namespace:
```
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/service-a.yaml
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/service-b.yaml
```
Test our service : `kubectl port-forward svc/service-a 80`
Our services accept traffic on:
* `http://localhost/` which goes to the root `/`
* `http://localhost/path-a.html` which goes to the root `/path-a.html`
* `http://localhost/path-b.html` which goes to the root `/path-b.html`
* `http://localhost/<any-other-path>.html` which goes to the root `404`
### Routing by Domain
The most common way to route traffic with ingress is by domain:
* https://public.service-a.com/ --> Ingress --> k8s service --> http://service-a/
* https://public.service-b.com/ --> Ingress --> k8s service --> http://service-b/
To showcase this, let's deploy an ingress for service-a and service-b that routes by domain. </br>
<i>Note: we don't own public domain `public.service-a.com` so we're using a `/etc/hosts` file</i>
Example Ingress:
```
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-a
spec:
ingressClassName: nginx
rules:
- host: public.service-a.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: service-a
port:
number: 80
```
<i>Note: we don't own public domain `public.my-services.com` so we're using a `/etc/hosts` file</i>
Deploy our ingresses:
```
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/routing-by-domain.yaml
```
Now we can access service-a and service-b on:
* https://public.service-a.com/
* https://public.service-b.com/
### Routing by Path
Another popular routing strategy is to use a shared domain and route based on the HTTP path. For example: </br>
* https://public.my-services.com/path-a --> Ingress --> k8s service --> http://service-a/path-a
* https://public.my-services.com/path-b --> Ingress --> k8s service --> http://service-b/path-b
This way public path `/path-a` will hit our application on `/path-a` </br>
Example Ingress:
```
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-a
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx
rules:
- host: public.my-services.com
http:
paths:
- path: /path-a
pathType: Prefix
backend:
service:
name: service-a
port:
number: 80
```
Deploy our ingresses:
```
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/routing-by-path.yaml
```
Now notice the following routing:
* https://public.my-services.com/ --> Ingress (404)
* https://public.my-services.com/path-a --> Ingress --> k8s service --> http://service-a/
* https://public.my-services.com/path-b --> Ingress --> k8s service --> http://service-b/
No matter what path you place on the front end, as long as the path matches `/path-a` or `/path-b`
it will be routed to the correct service on `/` </br>
It's important to note that no extra paths or querystrings will NOT be passed to the upstream </br>
We can see this by looking at our NGINX Ingress controller logs as the controller will write the path it sees as well as the upstream service where it sent the request
```
kubectl -n ingress-nginx logs -l app.kubernetes.io/instance=ingress-nginx
```
### App Root
Sometimes applications have different root paths and don't simply serve traffic on `/` </br>
For example, the base path may be `http://localhost/home` </br>
To tell the Ingress controller that our application root path is `/home`, we can set the annotation `nginx.ingress.kubernetes.io/app-root: /home` </br>
This means the controller will be aware that all traffic that matches `path-a` should go to `/home` on service-a. </br>
### URL Rewrite
We saw earlier when we routed by path, that we could pass `/path-a` to service-a and `/path-b` to service-b. </br>
However, the traffic would always go to `/` so we lost any trailing URL, parameters and querystring. </br>
Not very useful. </br>
To allow the Ingress controller to pass paths to the upstream you need to look into [Rewrite Configuration](https://kubernetes.github.io/ingress-nginx/examples/rewrite/)
Example Ingress:
```
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-a
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2
spec:
ingressClassName: nginx
rules:
- host: public.my-services.com
http:
paths:
- path: /path-a(/|$)(.*)
pathType: Prefix
backend:
service:
name: service-a
port:
number: 80
```
Deploy our ingresses:
```
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/routing-by-path-rewrite.yaml
```
Now notice the following routing:
* https://public.my-services.com/ --> Ingress (404)
* https://public.my-services.com/path-a* --> Ingress --> k8s service --> http://service-a/*
* https://public.my-services.com/path-b* --> Ingress --> k8s service --> http://service-b/*
```
kubectl -n ingress-nginx logs -l app.kubernetes.io/instance=ingress-nginx
```
It's important to study the logs of the Ingress Controller to learn what path it saw, where it routed to
```
127.0.0.1 - - [13/Nov/2022:02:17:47 +0000] "GET /path-a/path.html HTTP/2.0" 404 19 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36" 485 0.000 [default-service-a-80] [] 10.244.0.8:80 19 0.000 404 206ed4b88b712564fc073c3adb845dff
```
In the above case, the controller saw ` /path-a/path.html` , routed to service-a and we can see what our service-a saw, by looking at its logs:
```
kubectl logs -l app=service-a
10.244.0.7 - - [13/Nov/2022:02:28:36 +0000] "GET /path-a.html HTTP/1.1" 200 28 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
```
### SSL terminating & passthrough
As we noticed by logs, its default for the Ingress controller to offload SSL. </br>
We can see this because when it routes to upstreams, it routes to our service on port 80 </br>
Ingress offloads the TLS connection and creates a new connection with its upstream. </br>
This is a common approach to offload TLS on the edge as internal traffic is generally unencrypted in private
networks especially in large microservice environments where security is tightened in other manners so TLS is not needed all the way through. </br>
We can enable SSL pass through with the annotation: `nginx.ingress.kubernetes.io/ssl-passthrough`. </br>
SSL Passthrough is disabled by default and requires starting the controller with the --enable-ssl-passthrough flag. </br>
### IP Whitelist
We can add a layer of protection to our services that are exposed by an ingress. </br>
One popular way is IP whitelisting. </br>
This can be done with a [whitelist source range annotation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#whitelist-source-range) for example: </br>
`nginx.ingress.kubernetes.io/whitelist-source-range: <ip,ip,ip>`</br>
You can set this globally if you want using the [Customization ConfigMap](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#whitelist-source-range). </br>
We'll take a look at this customization in a bit. </br>
### Authentication
You can add a layer of protection to services exposed by ingress by several [Authentication methods](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#authentication). </br>
A simple example is basic Authentication where the client supplied a `username\password` to access our service. </br>
This is controlled by annotations:
* `nginx.ingress.kubernetes.io/auth-type: basic`
* `nginx.ingress.kubernetes.io/auth-secret: server-a-secret`
* `nginx.ingress.kubernetes.io/auth-secret-type: auth-file`
Create a username and password:
```
apk add apache2-utils
htpasswd -c auth service-a-user
kubectl create secret generic server-a-secret --from-file=auth
```
Deploy our ingresses:
```
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/basic-auth.yaml
```
### Server snippet
Every ingress is technically an NGINX server block with a NGINX proxy pass. </br>
We can even customise this server block with a [Server Snippet annotation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#server-snippet)
### Customization
As mentioned before, the NGINX Ingress controller can be customized quite heavily with the [ConfigMap](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/)
We can customize log format to JSON as well for example:
```
log-format-escape-json: "true"
log-format-upstream: '{"time":"$time_iso8601","remote_addr":"$remote_addr","proxy_protocol_addr":"$proxy_protocol_addr","proxy_protocol_port":"$proxy_protocol_port","x_forward_for":"$proxy_add_x_forwarded_for","remote_user":"$remote_user","host":"$host","request_method":"$request_method","request_uri":"$request_uri","server_protocol":"$server_protocol","status":$status,"request_time":$request_time,"request_length":$request_length,"bytes_sent":$bytes_sent,"upstream_name":"$proxy_upstream_name","upstream_addr":"$upstream_addr","upstream_uri":"$uri","upstream_response_length":$upstream_response_length,"upstream_response_time":$upstream_response_time,"upstream_status":$upstream_status,"http_referrer":"$http_referer","http_user_agent":"$http_user_agent","http_cookie":"$http_cookie","http_device_id":"$http_x_device_id","http_customer_id":"$http_x_customer_id"}'
```
Apply the changes and restart Ingress:
```
kubectl apply -f ./kubernetes/ingress/controller/nginx/manifests/nginx-ingress.${APP_VERSION}.yaml
```
kubectl -n ingress-nginx logs -l app.kubernetes.io/instance=ingress-nginx

View File

@ -0,0 +1,43 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-a
annotations:
nginx.ingress.kubernetes.io/auth-type: basic
nginx.ingress.kubernetes.io/auth-secret: server-a-secret
nginx.ingress.kubernetes.io/auth-secret-type: auth-file
nginx.ingress.kubernetes.io/rewrite-target: /$2
spec:
ingressClassName: nginx
rules:
- host: public.my-services.com
http:
paths:
- path: /path-a(/|$)(.*)
pathType: Prefix
backend:
service:
name: service-a
port:
number: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-b
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2
spec:
ingressClassName: nginx
rules:
- host: public.my-services.com
http:
paths:
- path: /path-b(/|$)(.*)
pathType: Prefix
backend:
service:
name: service-b
port:
number: 80
---

View File

@ -0,0 +1,36 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-a
spec:
ingressClassName: nginx
rules:
- host: public.service-a.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: service-a
port:
number: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-b
spec:
ingressClassName: nginx
rules:
- host: public.service-b.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: service-b
port:
number: 80
---

View File

@ -0,0 +1,40 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-a
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2
spec:
ingressClassName: nginx
rules:
- host: public.my-services.com
http:
paths:
- path: /path-a(/|$)(.*)
pathType: Prefix
backend:
service:
name: service-a
port:
number: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-b
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2
spec:
ingressClassName: nginx
rules:
- host: public.my-services.com
http:
paths:
- path: /path-b(/|$)(.*)
pathType: Prefix
backend:
service:
name: service-b
port:
number: 80
---

View File

@ -0,0 +1,40 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-a
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx
rules:
- host: public.my-services.com
http:
paths:
- path: /path-a
pathType: Prefix
backend:
service:
name: service-a
port:
number: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: service-b
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx
rules:
- host: public.my-services.com
http:
paths:
- path: /path-b
pathType: Prefix
backend:
service:
name: service-b
port:
number: 80
---

View File

@ -0,0 +1,92 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: service-a
data:
path-a.html: |
"/path-a.html" on service-a
path-b.html: |
"/path-b.html" on service-a
index.html: |
"/" on service-a
404.html: |
service-a 404 page
---
apiVersion: v1
kind: ConfigMap
metadata:
name: service-a-nginx.conf
data:
nginx.conf: |
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
sendfile on;
server {
listen 80;
server_name localhost;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
error_page 404 /404.html;
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: service-a
labels:
app: service-a
spec:
replicas: 1
selector:
matchLabels:
app: service-a
template:
metadata:
labels:
app: service-a
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
volumeMounts:
- name: html
mountPath: "/usr/share/nginx/html/"
- name: config
mountPath: "/etc/nginx/"
volumes:
- name: html
configMap:
name: service-a
- name: config
configMap:
name: service-a-nginx.conf
---
apiVersion: v1
kind: Service
metadata:
name: service-a
spec:
selector:
app: service-a
ports:
- protocol: TCP
port: 80
targetPort: 80

View File

@ -0,0 +1,92 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: service-b
data:
path-a.html: |
"/path-a.html" on service-b
path-b.html: |
"/path-b.html" on service-b
index.html: |
"/" on service-b
404.html: |
service-b 404 page
---
apiVersion: v1
kind: ConfigMap
metadata:
name: service-b-nginx.conf
data:
nginx.conf: |
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
sendfile on;
server {
listen 80;
server_name localhost;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
error_page 404 /404.html;
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: service-b
labels:
app: service-b
spec:
replicas: 1
selector:
matchLabels:
app: service-b
template:
metadata:
labels:
app: service-b
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
volumeMounts:
- name: html
mountPath: "/usr/share/nginx/html/"
- name: config
mountPath: "/etc/nginx/"
volumes:
- name: html
configMap:
name: service-b
- name: config
configMap:
name: service-b-nginx.conf
---
apiVersion: v1
kind: Service
metadata:
name: service-b
spec:
selector:
app: service-b
ports:
- protocol: TCP
port: 80
targetPort: 80

View File

@ -0,0 +1,742 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: "true"
log-format-escape-json: "true"
log-format-upstream: '{"time":"$time_iso8601","remote_addr":"$remote_addr","proxy_protocol_addr":"$proxy_protocol_addr","proxy_protocol_port":"$proxy_protocol_port","x_forward_for":"$proxy_add_x_forwarded_for","remote_user":"$remote_user","host":"$host","request_method":"$request_method","request_uri":"$request_uri","server_protocol":"$server_protocol","status":$status,"request_time":$request_time,"request_length":$request_length,"bytes_sent":$bytes_sent,"upstream_name":"$proxy_upstream_name","upstream_addr":"$upstream_addr","upstream_uri":"$uri","upstream_response_length":$upstream_response_length,"upstream_response_time":$upstream_response_time,"upstream_status":$upstream_status,"http_referrer":"$http_referer","http_user_agent":"$http_user_agent","http_cookie":"$http_cookie","http_device_id":"$http_x_device_id","http_customer_id":"$http_x_customer_id"}'
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
# TODO(Jintao Zhang)
# Once we release a new version of the controller,
# we will be able to remove the configmap related permissions
# We have used the Lease API for selection
# ref: https://github.com/kubernetes/ingress-nginx/pull/8921
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-nginx-leader
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- ingress-nginx-leader
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: LoadBalancer
ipFamilyPolicy: SingleStack
ipFamilies:
- IPv4
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
replicas: 1
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: "registry.k8s.io/ingress-nginx/controller:v1.5.1@sha256:4ba73c697770664c1e00e9f968de14e08f606ff961c76e5d7033a4a9c593c629"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
namespace: "ingress-nginx"
name: ingress-nginx-controller-admission
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f"
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
namespace: ingress-nginx
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f"
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000