mirror of
https://github.com/marcel-dempers/docker-development-youtube-series.git
synced 2025-06-04 16:56:56 +00:00
Merge branch 'master' into postgresql-prototyping
This commit is contained in:
commit
00dd2257f0
20
.github/workflows/self-hosted-runner._yaml
vendored
Normal file
20
.github/workflows/self-hosted-runner._yaml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
###########################################################
|
||||
# IMPORTANT -> Rename the file extension to ".yaml" (remove "_") to enable this
|
||||
###########################################################
|
||||
|
||||
name: Self-Hosted Runner Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- <branch-name>
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: docker build python
|
||||
run: |
|
||||
docker build ./python/introduction/ -t python:1.0.0
|
19
.github/workflows/stale.yaml
vendored
19
.github/workflows/stale.yaml
vendored
@ -1,19 +0,0 @@
|
||||
name: Close inactive issues
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
with:
|
||||
days-before-issue-stale: 30
|
||||
days-before-issue-close: 10
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
|
||||
close-issue-message: "This issue was closed because it has been inactive for 10 days since being marked as stale."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
repo-token: ${{ secrets.ISSUES_TOKEN }}
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -15,9 +15,9 @@ kubernetes/portainer/volume*
|
||||
kubernetes/rancher/volume/*
|
||||
kubernetes/portainer/business/volume*
|
||||
|
||||
#ignore postgres data for sample and database tutorials
|
||||
pgdata
|
||||
storage/databases/postgresql/docker/backup/
|
||||
storage/databases/postgresql/docker/archive/
|
||||
storage/databases/postgresql/3-replication/postgres-1/archive/*
|
||||
storage/databases/postgresql/3-replication/postgres-2/archive/*
|
||||
|
||||
|
151
ai/openai/introduction/README.md
Normal file
151
ai/openai/introduction/README.md
Normal file
@ -0,0 +1,151 @@
|
||||
# Introduction to Open AI
|
||||
|
||||
## Overview
|
||||
|
||||
What is [Open AI](https://openai.com/) ?
|
||||
|
||||
* Research company on AI development
|
||||
* Builds and provides models
|
||||
* Builds and provides a standard protocol for using AI
|
||||
|
||||
What is a model ?
|
||||
|
||||
I see a model as a language super database. </br>
|
||||
Instead of writing a query, that is slow to query a traditional database like SQL, you can throw a question at a model and it gives you an answer really fast </br>
|
||||
|
||||
Model examples:
|
||||
* GPT 3.5
|
||||
* GPT 4
|
||||
|
||||
## Getting started
|
||||
|
||||
The best way to get started and to understand OpenAI, is to learn hands on
|
||||
|
||||
* Create an OpenAI account [here](https://openai.com/)
|
||||
|
||||
## Chat GPT
|
||||
|
||||
Here you can find the link to [ChatGPT](https://chat.openai.com/)
|
||||
|
||||
## Open AI Playground
|
||||
|
||||
Here you can find the link to the [OpenAI Playground](https://platform.openai.com/playground)
|
||||
|
||||
## Build an AI powered app
|
||||
|
||||
We can start with a `main.py` that reads a message
|
||||
|
||||
```
|
||||
import sys
|
||||
|
||||
message = sys.argv[0]
|
||||
|
||||
```
|
||||
Then we will need the code from the Open AI playground and add it to our `main.py`. </br>
|
||||
Move the `import` statements to the top </br>
|
||||
|
||||
Once you have tidied up everything, you can get the response message from the AI:
|
||||
|
||||
```
|
||||
responseMessage = response.choices[0].message.content
|
||||
```
|
||||
|
||||
Let's build our app
|
||||
|
||||
```
|
||||
cd ai\openai\introduction
|
||||
docker build . -t ai-app
|
||||
```
|
||||
|
||||
Set my OpenAI API key
|
||||
|
||||
```
|
||||
$ENV:OPENAI_API_KEY=""
|
||||
```
|
||||
|
||||
Run our AI App:
|
||||
|
||||
```
|
||||
docker run -it -e OPENAI_API_KEY=$ENV:OPENAI_API_KEY ai-app
|
||||
```
|
||||
|
||||
When we run the app, notice it has no concept of memory. </br>
|
||||
The playground works because it keeps track of all the user and AI messages and keeps appending new messages to it </br>
|
||||
So it can track the conversation.
|
||||
|
||||
Let's keep track of messages, by writing it to a local file </br>
|
||||
We will also take the system message out and keep it as a constant in our code </br>
|
||||
|
||||
Full example:
|
||||
|
||||
```
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import openai
|
||||
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
#read the incoming message
|
||||
message = sys.argv[1]
|
||||
user_message = {
|
||||
"role" : "user",
|
||||
"content" : message
|
||||
}
|
||||
|
||||
systemMessage = {
|
||||
"role": "system",
|
||||
"content": "You are a kubernetes exper that can assist developers with troubleshooting deployments\n\nTo help the developer you will need to know the namespaces as well as the pod name. Ask for missing information\n\nGenerate a command to help the developer surface logs or information\n"
|
||||
}
|
||||
|
||||
# read the cached user messages if there are any
|
||||
userMessages = []
|
||||
if os.path.isfile("messages.json"):
|
||||
with open('messages.json', newline='') as messagesFile:
|
||||
data = messagesFile.read()
|
||||
userMessages = json.loads(data)
|
||||
|
||||
# add the new message to it and update the cached messages
|
||||
userMessages.append(user_message)
|
||||
with open('messages.json', 'w', newline='') as messagesFile:
|
||||
msgJSON = json.dumps(userMessages)
|
||||
messagesFile.write(msgJSON)
|
||||
print(msgJSON)
|
||||
|
||||
messages = []
|
||||
messages.append(systemMessage)
|
||||
messages.extend(userMessages)
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=messages,
|
||||
temperature=1,
|
||||
max_tokens=256,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0
|
||||
)
|
||||
|
||||
responseMessage = response.choices[0].message.content
|
||||
print(responseMessage)
|
||||
|
||||
```
|
||||
|
||||
Now we can mount our volume so we persist the cache of messages
|
||||
|
||||
```
|
||||
docker run -it -e OPENAI_API_KEY=$ENV:OPENAI_API_KEY -v ${PWD}:/app ai-app "can you help me with my deployment?"
|
||||
Of course! I'd be happy to help with your deployment. Could you please provide me with the namespace and the name of the pod you're encountering issues with?
|
||||
|
||||
docker run -it -e OPENAI_API_KEY=$ENV:OPENAI_API_KEY -v ${PWD}:/app ai-app "my pod is pod-123"
|
||||
Sure, I can help you with your deployment. Can you please provide me with the namespace in which the pod is running?
|
||||
|
||||
docker run -it -e OPENAI_API_KEY=$ENV:OPENAI_API_KEY -v ${PWD}:/app ai-app "its in the products namespace"
|
||||
Great! To surface the logs for the pod "pod-123" in the "products" namespace, you can use the following command:
|
||||
|
||||
```shell
|
||||
kubectl logs -n products pod-123
|
||||
```
|
||||
|
||||
This command will retrieve the logs for the specified pod in the given namespace. Make sure you have the necessary permissions to access the namespace.
|
||||
```
|
11
ai/openai/introduction/dockerfile
Normal file
11
ai/openai/introduction/dockerfile
Normal file
@ -0,0 +1,11 @@
|
||||
FROM python:3.11-alpine
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt /app/requirements.txt
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
COPY main.py /app/
|
||||
|
||||
ENTRYPOINT ["python3", "main.py"]
|
49
ai/openai/introduction/main.py
Normal file
49
ai/openai/introduction/main.py
Normal file
@ -0,0 +1,49 @@
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import openai
|
||||
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
#read the incoming message
|
||||
message = sys.argv[1]
|
||||
user_message = {
|
||||
"role" : "user",
|
||||
"content" : message
|
||||
}
|
||||
|
||||
systemMessage = {
|
||||
"role": "system",
|
||||
"content": "You are a kubernetes exper that can assist developers with troubleshooting deployments\n\nTo help the developer you will need to know the namespaces as well as the pod name. Ask for missing information\n\nGenerate a command to help the developer surface logs or information\n"
|
||||
}
|
||||
|
||||
# read the cached user messages if there are any
|
||||
userMessages = []
|
||||
if os.path.isfile("messages.json"):
|
||||
with open('messages.json', newline='') as messagesFile:
|
||||
data = messagesFile.read()
|
||||
userMessages = json.loads(data)
|
||||
|
||||
# add the new message to it and update the cached messages
|
||||
userMessages.append(user_message)
|
||||
with open('messages.json', 'w', newline='') as messagesFile:
|
||||
msgJSON = json.dumps(userMessages)
|
||||
messagesFile.write(msgJSON)
|
||||
print(msgJSON)
|
||||
|
||||
messages = []
|
||||
messages.append(systemMessage)
|
||||
messages.extend(userMessages)
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=messages,
|
||||
temperature=1,
|
||||
max_tokens=256,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0
|
||||
)
|
||||
|
||||
responseMessage = response.choices[0].message.content
|
||||
print(responseMessage)
|
1
ai/openai/introduction/requirements.txt
Normal file
1
ai/openai/introduction/requirements.txt
Normal file
@ -0,0 +1 @@
|
||||
openai==0.28.0
|
3
argo/argo-cd/README.md
Normal file
3
argo/argo-cd/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Introduction to Argo CD
|
||||
|
||||
<a href="https://youtu.be/2WSJF7d8dUg" title="argo"><img src="https://i.ytimg.com/vi/2WSJF7d8dUg/hqdefault.jpg" width="20%" alt="introduction to argo cd" /></a>
|
@ -22,7 +22,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
image: aimvector/python:1.0.1
|
||||
image: aimvector/python:1.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
@ -44,4 +44,4 @@ spec:
|
||||
secretName: mysecret
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: example-config #name of our configmap object
|
||||
name: example-config #name of our configmap object
|
||||
|
3
deno/README.md
Normal file
3
deno/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Introduction to Deno with Docker
|
||||
|
||||
<a href="https://youtu.be/4EfnECkCx8E" title="Kubernetes"><img src="https://i.ytimg.com/vi/4EfnECkCx8E/hqdefault.jpg" width="20%" alt="introduction to deno" /></a>
|
3
drone-ci/README.md
Normal file
3
drone-ci/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Introduction to Drone CI
|
||||
|
||||
<a href="https://youtu.be/myCcJJ_Fk10" title="drone ci"><img src="https://i.ytimg.com/vi/myCcJJ_Fk10/hqdefault.jpg" width="20%" alt="introduction to drone ci" /></a>
|
84
github/actions/self-hosted-runner/README.md
Normal file
84
github/actions/self-hosted-runner/README.md
Normal file
@ -0,0 +1,84 @@
|
||||
<a href="https://youtu.be/RcHGqCBofvw" title="githubactions"><img src="https://i.ytimg.com/vi/RcHGqCBofvw/hqdefault.jpg" width="20%" alt="introduction to github actions runners" /></a>
|
||||
|
||||
# Introduction to GitHub Actions: Self hosted runners
|
||||
|
||||
## Create a kubernetes cluster
|
||||
|
||||
In this guide we we''ll need a Kubernetes cluster for testing. Let's create one using [kind](https://kind.sigs.k8s.io/) </br>
|
||||
|
||||
```
|
||||
kind create cluster --name githubactions --image kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31
|
||||
```
|
||||
|
||||
Let's test our cluster:
|
||||
```
|
||||
kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
githubactions-control-plane Ready control-plane 2m53s v1.28.0
|
||||
```
|
||||
|
||||
## Running the Runner in Docker
|
||||
|
||||
We can simply install this directly on to virtual machines , but for this demo, I'd like to run it in Kubernetes inside a container. </br>
|
||||
|
||||
### Security notes
|
||||
|
||||
* Running in Docker needs high priviledges.
|
||||
* Would not recommend to use these on public repositories.
|
||||
* Would recommend to always run your CI systems in seperate Kubernetes clusters.
|
||||
|
||||
### Creating a Dockerfile
|
||||
|
||||
* Installing Docker CLI
|
||||
For this to work we need a `dockerfile` and follow instructions to [Install Docker](https://docs.docker.com/engine/install/debian/).
|
||||
I would grab the content and create statements for my `dockerfile` </br>
|
||||
|
||||
Now notice that we only install the `docker` CLI. </br>
|
||||
This is because we want our running to be able to run docker commands , but the actual docker server runs elsewhere </br>
|
||||
This gives you flexibility to tighten security by running docker on the host itself and potentially run the container runtime in a non-root environment </br>
|
||||
|
||||
* Installing Github Actions Runner
|
||||
|
||||
Next up we will need to install the [GitHub actions runner](https://github.com/actions/runner) in our `dockerfile`
|
||||
Now to give you a "behind the scenes" of how I usually build my `dockerfile`s, I run a container to test my installs:
|
||||
|
||||
```
|
||||
docker build . -t github-runner:latest
|
||||
docker run -it github-runner bash
|
||||
```
|
||||
|
||||
Next steps:
|
||||
|
||||
* Now we can see `docker` is installed
|
||||
* To see how a runner is installed, lets go to our repo | runner and click "New self-hosted runner"
|
||||
* Try these steps in the container
|
||||
* We will needfew dependencies
|
||||
* We download the runner
|
||||
* TODO
|
||||
|
||||
|
||||
Finally lets test the runner in `docker`
|
||||
|
||||
```
|
||||
docker run -it -e GITHUB_PERSONAL_TOKEN="" -e GITHUB_OWNER=marcel-dempers -e GITHUB_REPOSITORY=docker-development-youtube-series github-runner
|
||||
```
|
||||
|
||||
## Deploy to Kubernetes
|
||||
|
||||
Load our github runner image so we dont need to push it to a registry:
|
||||
|
||||
```
|
||||
kind load docker-image github-runner:latest --name githubactions
|
||||
```
|
||||
|
||||
Create a kubernetes secret with our github details
|
||||
|
||||
```
|
||||
kubectl create ns github
|
||||
kubectl -n github create secret generic github-secret `
|
||||
--from-literal GITHUB_OWNER=marcel-dempers `
|
||||
--from-literal GITHUB_REPOSITORY=docker-development-youtube-series `
|
||||
--from-literal GITHUB_PERSONAL_TOKEN=""
|
||||
|
||||
kubectl -n github apply -f kubernetes.yaml
|
||||
```
|
@ -1,40 +1,46 @@
|
||||
FROM debian:buster
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
ARG RUNNER_VERSION="2.169.1"
|
||||
ARG RUNNER_VERSION="2.302.1"
|
||||
|
||||
ENV GITHUB_PERSONAL_TOKEN ""
|
||||
ENV GITHUB_OWNER ""
|
||||
ENV GITHUB_REPOSITORY ""
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
curl \
|
||||
sudo \
|
||||
git \
|
||||
jq \
|
||||
tar \
|
||||
gnupg2 \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
# Install Docker -> https://docs.docker.com/engine/install/debian/
|
||||
|
||||
# Add Docker's official GPG key:
|
||||
RUN apt-get update && \
|
||||
apt-get install -y ca-certificates curl gnupg
|
||||
RUN install -m 0755 -d /etc/apt/keyrings
|
||||
RUN curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||
RUN chmod a+r /etc/apt/keyrings/docker.gpg
|
||||
|
||||
# Add the repository to Apt sources:
|
||||
RUN echo \
|
||||
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
|
||||
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
|
||||
tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
RUN apt-get update
|
||||
|
||||
# I only install the CLI, we will run docker in another container!
|
||||
RUN apt-get install -y docker-ce-cli
|
||||
|
||||
# Install the GitHub Actions Runner
|
||||
RUN apt-get update && apt-get install -y sudo jq
|
||||
|
||||
RUN useradd -m github && \
|
||||
usermod -aG sudo github && \
|
||||
echo "%sudo ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
|
||||
|
||||
#setup docker runner
|
||||
RUN curl -sSL https://get.docker.com/ | sh
|
||||
RUN usermod -aG docker github
|
||||
usermod -aG sudo github && \
|
||||
echo "%sudo ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
|
||||
|
||||
USER github
|
||||
WORKDIR /home/github
|
||||
WORKDIR /actions-runner
|
||||
RUN curl -Ls https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-x64-${RUNNER_VERSION}.tar.gz | tar xz \
|
||||
&& sudo ./bin/installdependencies.sh
|
||||
|
||||
RUN curl -O -L https://github.com/actions/runner/releases/download/v$RUNNER_VERSION/actions-runner-linux-x64-$RUNNER_VERSION.tar.gz
|
||||
RUN tar xzf ./actions-runner-linux-x64-$RUNNER_VERSION.tar.gz
|
||||
RUN sudo ./bin/installdependencies.sh
|
||||
COPY --chown=github:github entrypoint.sh /actions-runner/entrypoint.sh
|
||||
RUN sudo chmod u+x /actions-runner/entrypoint.sh
|
||||
|
||||
COPY --chown=github:github entrypoint.sh ./entrypoint.sh
|
||||
RUN sudo chmod u+x ./entrypoint.sh
|
||||
#working folder for the runner
|
||||
RUN sudo mkdir /work
|
||||
|
||||
ENTRYPOINT ["/home/github/entrypoint.sh"]
|
||||
ENTRYPOINT ["/actions-runner/entrypoint.sh"]
|
@ -2,14 +2,15 @@
|
||||
registration_url="https://api.github.com/repos/${GITHUB_OWNER}/${GITHUB_REPOSITORY}/actions/runners/registration-token"
|
||||
echo "Requesting registration URL at '${registration_url}'"
|
||||
|
||||
payload=$(curl -sX POST -H "Authorization: token ${GITHUB_PAT}" ${registration_url})
|
||||
payload=$(curl -sX POST -H "Authorization: token ${GITHUB_PERSONAL_TOKEN}" ${registration_url})
|
||||
export RUNNER_TOKEN=$(echo $payload | jq .token --raw-output)
|
||||
|
||||
./config.sh \
|
||||
--name $(hostname) \
|
||||
--token ${RUNNER_TOKEN} \
|
||||
-- labels my-runner \
|
||||
--url https://github.com/${GITHUB_OWNER}/${GITHUB_REPOSITORY} \
|
||||
--work ${RUNNER_WORKDIR} \
|
||||
--work "/work" \
|
||||
--unattended \
|
||||
--replace
|
||||
|
||||
|
@ -1,37 +1,64 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: github-secret
|
||||
type: Opaque
|
||||
data:
|
||||
GITHUB_PERSONAL_TOKEN: XXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: github-runner
|
||||
labels:
|
||||
app: github-runner
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: github-runner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: github-runner
|
||||
spec:
|
||||
containers:
|
||||
- name: github-runner
|
||||
image: aimvector/github-runner:latest
|
||||
env:
|
||||
- name: GITHUB_OWNER
|
||||
value: marcel-dempers
|
||||
- name: GITHUB_REPOSITORY
|
||||
value: docker-development-youtube-series
|
||||
- name: GITHUB_PERSONAL_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: github-secret
|
||||
key: GITHUB_PERSONAL_TOKEN
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: github-runner
|
||||
labels:
|
||||
app: github-runner
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: github-runner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: github-runner
|
||||
spec:
|
||||
containers:
|
||||
- name: github-runner
|
||||
imagePullPolicy: Never #use local kind image
|
||||
image: github-runner:latest
|
||||
env:
|
||||
- name: GITHUB_OWNER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: github-secret
|
||||
key: GITHUB_OWNER
|
||||
- name: GITHUB_REPOSITORY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: github-secret
|
||||
key: GITHUB_REPOSITORY
|
||||
- name: GITHUB_PERSONAL_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: github-secret
|
||||
key: GITHUB_PERSONAL_TOKEN
|
||||
- name: DOCKER_HOST
|
||||
value: tcp://localhost:2375
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /work/
|
||||
- name: dind
|
||||
image: docker:24.0.6-dind
|
||||
env:
|
||||
- name: DOCKER_TLS_CERTDIR
|
||||
value: ""
|
||||
resources:
|
||||
requests:
|
||||
cpu: 20m
|
||||
memory: 512Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: docker-graph-storage
|
||||
mountPath: /var/lib/docker
|
||||
- name: data
|
||||
mountPath: /work/
|
||||
volumes:
|
||||
- name: docker-graph-storage
|
||||
emptyDir: {}
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Introduction to Go: JSON
|
||||
|
||||
<a href="https://youtu.be/_ok29xwZ11k" title="golang-part-2"><img src="https://i.ytimg.com/vi/_ok29xwZ11k/hqdefault.jpg" width="20%" alt="introduction to Go part 2" /></a>
|
||||
|
||||
In programming languages, you will very often deal with data structures internally. <br/>
|
||||
Sometimes, you need to pass data outside of your application or read data from another application, or even a file. <br/>
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Introduction to Go: HTTP
|
||||
|
||||
<a href="https://youtu.be/MKkokYpGyTU" title="golang-part-3"><img src="https://i.ytimg.com/vi/MKkokYpGyTU/hqdefault.jpg" width="20%" alt="introduction to Go part 3" /></a>
|
||||
|
||||
HTTP is a fundamental part of Microservices and Web distributed systems <br/>
|
||||
|
||||
Go has a built in HTTP web server package. The package can be found [here](https://golang.org/pkg/net/http/) <br/>
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Introduction to Go: Command Line
|
||||
|
||||
<a href="https://youtu.be/CODqM_rzwtk" title="golang-part-4"><img src="https://i.ytimg.com/vi/CODqM_rzwtk/hqdefault.jpg" width="20%" alt="introduction to Go part 4" /></a>
|
||||
|
||||
Command line apps are a fundamental part of software development <br/>
|
||||
|
||||
Go has a built in Commandline parser package. The package can be found [here](https://golang.org/pkg/flag/) <br/>
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Introduction to Go: Storing data in Redis Database
|
||||
|
||||
<a href="https://youtu.be/6lJCyKwoQaQ" title="golang-part-5"><img src="https://i.ytimg.com/vi/6lJCyKwoQaQ/hqdefault.jpg" width="20%" alt="introduction to Go part 5" /></a>
|
||||
|
||||
Up until now, we've learned the fundamentals of Go and built a small web microservice that handles our video data.
|
||||
Our service has a `/` `GET` endpoint for returning all videos, as well as a simple `/update` endpoint for updating our list of videos.
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Introduction to Learning Go
|
||||
|
||||
<a href="https://youtu.be/jpKysZwllVw" title="golang-part-1"><img src="https://i.ytimg.com/vi/jpKysZwllVw/hqdefault.jpg" width="20%" alt="introduction to Go part 1" /></a>
|
||||
|
||||
Go can be downloaded from [golang.org](https://golang.org/doc/install) <br/>
|
||||
|
||||
Test your `go` installation:
|
||||
|
@ -1,12 +1,14 @@
|
||||
# Hashicorp Vault Guide
|
||||
|
||||
<a href="https://youtu.be/2Owo4Ioo9tQ" title="hashicorp-vault"><img src="https://i.ytimg.com/vi/2Owo4Ioo9tQ/hqdefault.jpg" width="20%" alt="introduction hashicorp vault" /></a>
|
||||
|
||||
Requirements:
|
||||
|
||||
* Kubernetes 1.21
|
||||
* Kind or Minikube
|
||||
|
||||
For this tutorial, I will be using Kubernetes 1.21.
|
||||
If you are watching the old guide for Kuberentes 1.17, go [here](..\vault\readme.md)
|
||||
If you are watching the old guide for Kubernetes 1.17, go [here](..\vault\readme.md)
|
||||
|
||||
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
|
||||
|
||||
@ -158,7 +160,7 @@ Let's checkout the web UI:
|
||||
kubectl -n vault get svc
|
||||
kubectl -n vault port-forward svc/vault-ui 443:8200
|
||||
```
|
||||
Now we can access the web UI [here]("https://localhost/")
|
||||
Now we can access the web UI [here](https://localhost/)
|
||||
|
||||
## Enable Kubernetes Authentication
|
||||
|
||||
|
@ -1,8 +1,10 @@
|
||||
# Hashicorp Vault Guide - Deprecated
|
||||
|
||||
<a href="https://www.youtube.com/playlist?list=PLHq1uqvAteVtq-NRX3yd1ziA_wJSBu3Oj" title="vault"><img src="https://i.ytimg.com/vi/L_o_CG_AGKA/hqdefault.jpg" width="20%" alt="introduction to vault" /></a>
|
||||
|
||||
# Vault
|
||||
|
||||
For this tutorial, I use Kuberentes 1.17
|
||||
For this tutorial, I use Kubernetes 1.17
|
||||
It's critical because we'll need certain [admission controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) enabled.
|
||||
|
||||
To get 1.17 for Linux\Windows, just use `kind` since you can create a 1.17 with admissions all setup.
|
||||
|
@ -4,8 +4,8 @@ cd ./hashicorp/vault/tls/
|
||||
|
||||
docker run -it --rm -v ${PWD}:/work -w /work debian:buster bash
|
||||
apt-get update && apt-get install -y curl &&
|
||||
curl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl && \
|
||||
curl https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson && \
|
||||
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl && \
|
||||
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson && \
|
||||
chmod +x /usr/local/bin/cfssl && \
|
||||
chmod +x /usr/local/bin/cfssljson
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Jenkins on Amazon Kubernetes
|
||||
|
||||
<a href="https://youtu.be/eqOCdNO2Nmk" title="jenkins eks"><img src="https://i.ytimg.com/vi/eqOCdNO2Nmk/hqdefault.jpg" width="20%" alt="jenkins eks" /></a>
|
||||
|
||||
## Create a cluster
|
||||
|
||||
Follow my Introduction to Amazon EKS for beginners guide, to create a cluster <br/>
|
||||
|
@ -5,6 +5,8 @@ For running Jenkins on AMAZON, start [here](./amazon-eks/readme.md)
|
||||
|
||||
# Jenkins on Local (Docker Windows \ Minikube \ etc)
|
||||
|
||||
<a href="https://youtu.be/eRWIJGF3Y2g" title="jenkins"><img src="https://i.ytimg.com/vi/eRWIJGF3Y2g/hqdefault.jpg" width="20%" alt="jenkins" /></a>
|
||||
|
||||
For running Jenkins on Local Docker for Windows or Minikube <br/>
|
||||
Watch the [video](https://youtu.be/eRWIJGF3Y2g)
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
[Admission Webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#what-are-admission-webhooks)
|
||||
|
||||
<a href="https://youtu.be/1mNYSn2KMZk" title="Kubernetes"><img src="https://i.ytimg.com/vi/1mNYSn2KMZk/hqdefault.jpg" width="20%" alt="Kubernetes Admission Controllers" /></a>
|
||||
|
||||
<hr/>
|
||||
|
||||
## Installation (local)
|
||||
|
115
kubernetes/affinity/README.md
Normal file
115
kubernetes/affinity/README.md
Normal file
@ -0,0 +1,115 @@
|
||||
# Kubernetes Concept: Affinity \ Anti-Affinity
|
||||
|
||||
## Create a kubernetes cluster
|
||||
|
||||
In this guide we we''ll need a Kubernetes cluster for testing. Let's create one using [kind](https://kind.sigs.k8s.io/) </br>
|
||||
|
||||
```
|
||||
cd kubernetes/affinity
|
||||
kind create cluster --name demo --image kindest/node:v1.28.0 --config kind.yaml
|
||||
```
|
||||
|
||||
Test the cluster:
|
||||
```
|
||||
kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
demo-control-plane Ready control-plane 59s v1.28.0
|
||||
demo-worker Ready <none> 36s v1.28.0
|
||||
demo-worker2 Ready <none> 35s v1.28.0
|
||||
demo-worker3 Ready <none> 35s v1.28.0
|
||||
|
||||
```
|
||||
|
||||
## Node Affinity
|
||||
|
||||
[Node Affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity) is similar to `nodeSelector` however you can define more complex expressions. "Like my pods must run on SSD nodes or preffer SSD nodes"
|
||||
|
||||
For example:
|
||||
* Node selector is a hard and fast rule meaning a pod will not be scheduled if the selection is not satisfied
|
||||
* For example, when using `os` selector as `linux` , a pod can only be scheduled if there is a node available where `os` label is `linux`
|
||||
|
||||
Node Affinity allows an expression.
|
||||
|
||||
```
|
||||
kubectl apply -f node-affinity.yaml
|
||||
```
|
||||
|
||||
We can see our pods are prefering SSD and are always going to `us-east`
|
||||
|
||||
```
|
||||
kubectl get pods -owide
|
||||
|
||||
#introduce more pods
|
||||
kubectl scale deploy app-disk --replicas 10
|
||||
|
||||
#observe all pods on demo-worker
|
||||
```
|
||||
|
||||
If there is some trouble with our `ssd` disk, `kubectl taint nodes demo-worker type=ssd:NoSchedule`, we can see pods going to the non-ssd disk nodes in `us-east` </br>
|
||||
|
||||
This is because our pods prefer SSD, however there is no SSD available, so would still go to non-SSD nodes as long as there are nodes available in `us-east` </br>
|
||||
|
||||
If something goes wrong in our last `us-east` node: `kubectl taint nodes demo-worker3 type=ssd:NoSchedule` and we roll out more pods `kubectl scale deploy app-disk --replicas 20`,
|
||||
notice that our new pods are now in `Pending` status because no nodes satisfy our node affinity rules </br>
|
||||
|
||||
|
||||
Fix our nodes.
|
||||
```
|
||||
kubectl taint nodes demo-worker type=ssd:NoSchedule-
|
||||
kubectl taint nodes demo-worker3 type=ssd:NoSchedule-
|
||||
```
|
||||
Scale back down to 0
|
||||
```
|
||||
kubectl scale deploy app-disk --replicas 0
|
||||
kubectl scale deploy app-disk --replicas 1
|
||||
|
||||
# pod should go back to demo-worker , node 1
|
||||
kubectl get pods -owide
|
||||
```
|
||||
|
||||
## Pod Affinity
|
||||
|
||||
Now [Pod Affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity) is an expression to allow us to state that pods should gravitate towards other pods
|
||||
|
||||
```
|
||||
kubectl apply -f pod-affinity.yaml
|
||||
|
||||
# observe where pods get deployed
|
||||
kubectl get pods -owide
|
||||
|
||||
kubectl scale deploy app-disk --replicas 3
|
||||
kubectl scale deploy web-disk --replicas 3
|
||||
```
|
||||
|
||||
## Pod Anti-Affinity
|
||||
|
||||
Let's say we observe our `app-disk` application disk usage is quite intense, and we would like to prevent `app-disk` pods from running together. </br>
|
||||
This is where anti-affinity comes in:
|
||||
|
||||
```
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- app-disk
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
```
|
||||
|
||||
After applying the above, we can roll it out and observe scheduling:
|
||||
|
||||
```
|
||||
kubectl scale deploy app-disk --replicas 0
|
||||
kubectl scale deploy web-disk --replicas 0
|
||||
kubectl apply -f node-affinity.yaml
|
||||
kubectl get pods -owide
|
||||
|
||||
kubectl scale deploy app-disk --replicas 2 #notice pending pods when scaling to 3
|
||||
kubectl get pods -owide
|
||||
kubectl scale deploy web-disk --replicas 2
|
||||
kubectl get pods -owide
|
||||
|
||||
```
|
||||
|
18
kubernetes/affinity/kind.yaml
Normal file
18
kubernetes/affinity/kind.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker #demo-worker
|
||||
labels:
|
||||
zone: us-east
|
||||
type: ssd
|
||||
- role: worker #demo-worker2
|
||||
labels:
|
||||
zone: us-west
|
||||
type: ssd
|
||||
- role: worker #demo-worker3
|
||||
labels:
|
||||
zone: us-east
|
||||
- role: worker #demo-worker4
|
||||
labels:
|
||||
zone: us-west
|
46
kubernetes/affinity/node-affinity.yaml
Normal file
46
kubernetes/affinity/node-affinity.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: app-disk
|
||||
labels:
|
||||
app: app-disk
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: app-disk
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: app-disk
|
||||
spec:
|
||||
containers:
|
||||
- name: app-disk
|
||||
image: nginx:latest
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- app-disk
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: zone
|
||||
operator: In
|
||||
values:
|
||||
- us-east
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 1
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: type
|
||||
operator: In
|
||||
values:
|
||||
- ssd
|
30
kubernetes/affinity/pod-affinity.yaml
Normal file
30
kubernetes/affinity/pod-affinity.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: web-disk
|
||||
labels:
|
||||
app: web-disk
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: web-disk
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: web-disk
|
||||
spec:
|
||||
containers:
|
||||
- name: web-disk
|
||||
image: nginx:latest
|
||||
affinity:
|
||||
podAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- app-disk
|
||||
topologyKey: "kubernetes.io/hostname"
|
@ -6,10 +6,14 @@ Cluster autoscaler allows us to scale cluster nodes when they become full <br/>
|
||||
I would recommend to learn about scaling your cluster nodes before scaling pods. <br/>
|
||||
Video [here](https://youtu.be/jM36M39MA3I)
|
||||
|
||||
<a href="https://youtu.be/jM36M39MA3I" title="Kubernetes"><img src="https://i.ytimg.com/vi/jM36M39MA3I/hqdefault.jpg" width="20%" alt="Kubernetes cluster auto scaling" /></a>
|
||||
|
||||
## Horizontal Pod Autoscaling
|
||||
|
||||
HPA allows us to scale pods when their resource utilisation goes over a threshold <br/>
|
||||
|
||||
<a href="https://youtu.be/FfDI08sgrYY" title="Kubernetes"><img src="https://i.ytimg.com/vi/FfDI08sgrYY/hqdefault.jpg" width="20%" alt="Pod auto scaling" /></a>
|
||||
|
||||
## Requirements
|
||||
|
||||
### A Cluster
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Vertical Pod Autoscaling
|
||||
|
||||
<a href="https://youtu.be/jcHQ5SKKTLM" title="Kubernetes"><img src="https://i.ytimg.com/vi/jcHQ5SKKTLM/hqdefault.jpg" width="20%" alt="vertical auto scaling" /></a>
|
||||
|
||||
## We need a Kubernetes cluster
|
||||
|
||||
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Introduction to cert-manager for Kubernetes
|
||||
|
||||
<a href="https://youtu.be/hoLUigg4V18" title="certmanager"><img src="https://i.ytimg.com/vi/hoLUigg4V18/hqdefault.jpg" width="20%" alt="introduction to certmanager" /></a>
|
||||
|
||||
## We need a Kubernetes cluster
|
||||
|
||||
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Getting Started with EKS
|
||||
|
||||
<a href="https://youtu.be/QThadS3Soig" title="k8s-eks"><img src="https://i.ytimg.com/vi/QThadS3Soig/hqdefault.jpg" width="20%" alt="k8s-eks" /></a>
|
||||
|
||||
## Amazon CLI
|
||||
|
||||
```
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Getting Started with AKS
|
||||
|
||||
<a href="https://youtu.be/eyvLwK5C2dw" title="k8s-aks"><img src="https://i.ytimg.com/vi/eyvLwK5C2dw/hqdefault.jpg" width="20%" alt="k8s-aks" /></a>
|
||||
|
||||
## Azure CLI
|
||||
|
||||
```
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Getting Started with DGO
|
||||
|
||||
<a href="https://youtu.be/PvfBCE-xgBY" title="k8s-do"><img src="https://i.ytimg.com/vi/PvfBCE-xgBY/hqdefault.jpg" width="20%" alt="k8s-do" /></a>
|
||||
|
||||
## Trial Account
|
||||
|
||||
Coupon Link to get $100 credit for 60 days: <br/>
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Getting Started with GKE
|
||||
|
||||
<a href="https://youtu.be/-fbH5Qs3QXU" title="k8s-gke"><img src="https://i.ytimg.com/vi/-fbH5Qs3QXU/hqdefault.jpg" width="20%" alt="k8s-gke" /></a>
|
||||
|
||||
## Google Cloud CLI
|
||||
|
||||
https://hub.docker.com/r/google/cloud-sdk/
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Getting Started with Linode
|
||||
|
||||
<a href="https://youtu.be/VSPUWEtqtnY" title="k8s-linode"><img src="https://i.ytimg.com/vi/VSPUWEtqtnY/hqdefault.jpg" width="20%" alt="k8s-linode" /></a>
|
||||
|
||||
## Trial Account
|
||||
|
||||
Promo Link to get $20 credit to try out Linode: <br/>
|
||||
|
3
kubernetes/configmaps/README.md
Normal file
3
kubernetes/configmaps/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Introduction to Kubernetes: Configmaps
|
||||
|
||||
<a href="https://youtu.be/o-gXx7r7Rz4" title="k8s-cm"><img src="https://i.ytimg.com/vi/o-gXx7r7Rz4/hqdefault.jpg" width="20%" alt="k8s-cm" /></a>
|
@ -1,5 +1,7 @@
|
||||
# Kubernetes Daemonsets
|
||||
|
||||
<a href="https://youtu.be/RGSeeN-o-kQ" title="k8s-daemonset"><img src="https://i.ytimg.com/vi/RGSeeN-o-kQ/hqdefault.jpg" width="20%" alt="k8s-daemonset" /></a>
|
||||
|
||||
## We need a Kubernetes cluster
|
||||
|
||||
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) </br>
|
||||
|
254
kubernetes/datree/README-2023.md
Normal file
254
kubernetes/datree/README-2023.md
Normal file
@ -0,0 +1,254 @@
|
||||
|
||||
# Whats new 👉🏽 Datree in 2023
|
||||
|
||||
<a href="https://youtu.be/iwoIjzS33qE" title="Kubernetes"><img src="https://i.ytimg.com/vi/iwoIjzS33qE/hqdefault.jpg" width="20%" alt="Kubernetes Guide" /></a>
|
||||
|
||||
## Create a Kubernetes cluster
|
||||
|
||||
Let's start by creating a local `kind` [cluster](https://kind.sigs.k8s.io/)
|
||||
|
||||
Note that we create a Kubernetes 1.23 cluster. </br>
|
||||
So we want to use `datree` to validate and ensure our manifests comply with that version of Kubernetes. <br/>
|
||||
|
||||
```
|
||||
kind create cluster --name datree --image kindest/node:v1.23.6
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
Best place to start is the [documentation](https://hub.datree.io/)
|
||||
|
||||
I like to start all my work inside a docker container. </br>
|
||||
Let's run a small Alpine linux container
|
||||
|
||||
```
|
||||
docker run -it -v ${PWD}:/work -v ${HOME}/.kube/:/root/.kube/ -w /work --net host alpine sh
|
||||
```
|
||||
### Install Kubectl
|
||||
|
||||
Let's install `kubectl` in our container </br>
|
||||
|
||||
```
|
||||
apk add curl jq
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.23.6/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
```
|
||||
|
||||
### Install Helm
|
||||
|
||||
Let's install `helm` in our container </br>
|
||||
|
||||
```
|
||||
curl -L https://get.helm.sh/helm-v3.5.4-linux-amd64.tar.gz -o /tmp/helm.tar.gz && \
|
||||
tar -xzf /tmp/helm.tar.gz -C /tmp && \
|
||||
chmod +x /tmp/linux-amd64/helm && \
|
||||
mv /tmp/linux-amd64/helm /usr/local/bin/helm
|
||||
|
||||
```
|
||||
|
||||
## Install Datree on our cluster
|
||||
|
||||
Add the Helm repo:
|
||||
```
|
||||
helm repo add datree-webhook https://datreeio.github.io/admission-webhook-datree
|
||||
helm search repo datree-webhook --versions
|
||||
```
|
||||
|
||||
Install the Helm chart:
|
||||
|
||||
```
|
||||
CHART_VERSION="0.3.22"
|
||||
DATREE_TOKEN=""
|
||||
|
||||
helm install datree-webhook datree-webhook/datree-admission-webhook \
|
||||
--create-namespace \
|
||||
--set datree.token=${DATREE_TOKEN} \
|
||||
--set datree.policy="Default" \
|
||||
--set datree.clusterName=$(kubectl config current-context) \
|
||||
--version ${CHART_VERSION} \
|
||||
--namespace datree
|
||||
|
||||
```
|
||||
|
||||
Check the install
|
||||
|
||||
```
|
||||
kubectl -n datree get pods
|
||||
```
|
||||
|
||||
## View our Cluster Score
|
||||
|
||||
Now with Datree installed in our cluster, we can review it's current scoring in the Datree [Dashboard](https://app.datree.io/overview) </br>
|
||||
As we are running a test cluster or if you run in the cloud, there may be some cloud components in namespaces that you may want to ignore. </br>
|
||||
|
||||
We can do this by labeling a namespace which is [documented here](https://hub.datree.io/configuration/behavior#ignore-a-namespace) </br>
|
||||
</p>
|
||||
OR </br>
|
||||
|
||||
We can do this by using the [configuration file](https://hub.datree.io/configuration/behavior#ignore-a-namespace) for datree
|
||||
|
||||
|
||||
```
|
||||
# skip namespace using label
|
||||
kubectl label namespaces local-path-storage "admission.datree/validate=skip"
|
||||
# skip namespace using configmap
|
||||
|
||||
kubectl -n datree apply -f kubernetes/datree/configuration/config.yaml
|
||||
kubectl rollout restart deployment -n datree
|
||||
```
|
||||
|
||||
According to the dashboard, we still have a `D` score, let's rerun the scan:
|
||||
|
||||
```
|
||||
kubectl get job "scan-job" -n datree -o json | jq 'del(.spec.selector)' | jq 'del(.spec.template.metadata.labels)' | kubectl replace --force -f -
|
||||
```
|
||||
|
||||
Now we can see that we have an `A` score. </br>
|
||||
|
||||
## Deploy some workloads to our cluster
|
||||
|
||||
For most companies and larger teams, it's extremely difficult to fix policy issues. </br>
|
||||
Let's walk through what this may look like. </br>
|
||||
|
||||
Deploy some sample workloads:
|
||||
|
||||
```
|
||||
kubectl create namespace cms
|
||||
kubectl -n cms create configmap mysql \
|
||||
--from-literal MYSQL_RANDOM_ROOT_PASSWORD=1
|
||||
|
||||
kubectl -n cms create secret generic wordpress \
|
||||
--from-literal WORDPRESS_DB_HOST=mysql \
|
||||
--from-literal WORDPRESS_DB_USER=exampleuser \
|
||||
--from-literal WORDPRESS_DB_PASSWORD=examplepassword \
|
||||
--from-literal WORDPRESS_DB_NAME=exampledb
|
||||
|
||||
kubectl -n cms create secret generic mysql \
|
||||
--from-literal MYSQL_USER=exampleuser \
|
||||
--from-literal MYSQL_PASSWORD=examplepassword \
|
||||
--from-literal MYSQL_DATABASE=exampledb
|
||||
|
||||
kubectl -n cms apply -f kubernetes/datree/example/cms/
|
||||
```
|
||||
|
||||
Check out workloads
|
||||
|
||||
```
|
||||
kubectl -n cms get all
|
||||
```
|
||||
|
||||
Rerun our scan:
|
||||
|
||||
```
|
||||
kubectl delete jobs/scan-job -n datree; kubectl create job --from=cronjob/scan-cronjob scan-job -n datree
|
||||
```
|
||||
|
||||
Now we can follow the dashboard, to check our `namespace` for policy issues and start fixing them. </br>
|
||||
|
||||
|
||||
Summary of our fixes:
|
||||
|
||||
```
|
||||
spec:
|
||||
containers:
|
||||
- name: wordpress
|
||||
image: wordpress:5.9-apache
|
||||
|
||||
kind: Deployment
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: wordpress
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "500Mi"
|
||||
requests:
|
||||
memory: "500Mi"
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: wordpress
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
|
||||
kind: Deployment
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: wordpress
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: temp
|
||||
- mountPath: /var/run/apache2/
|
||||
name: apache
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: temp
|
||||
- emptyDir: {}
|
||||
name: apache
|
||||
|
||||
kubectl -n cms apply -f kubernetes/datree/example/cms/
|
||||
```
|
||||
## Datree CLI : Testing our YAML locally
|
||||
|
||||
We can install the latest version of Datree with the command advertised:
|
||||
|
||||
```
|
||||
apk add unzip
|
||||
curl https://get.datree.io | /bin/sh
|
||||
```
|
||||
|
||||
### Policy check
|
||||
|
||||
Let's test my example manifests under our datree folder `kubernetes\datree\example`
|
||||
|
||||
```
|
||||
datree test ./kubernetes/datree/example/cms/*.yaml
|
||||
```
|
||||
|
||||
# CI/CD examples
|
||||
|
||||
The tools as well as the dashboards help us solve these policy issues locally. </br>
|
||||
Once we have sorted out our policy issues, we can add Datree to our CI/CD pipeline. </br>
|
||||
|
||||
Checkout the [CI/CD integrations](https://hub.datree.io/cicd-examples) page. </br>
|
||||
|
||||
# Enforcing Policies
|
||||
|
||||
Configure Datree to enforce policies. </br>
|
||||
We can use `helm upgrade` with the `--set` flag and set enforce to true like:
|
||||
|
||||
```
|
||||
--set datree.enforce=true
|
||||
```
|
||||
|
||||
Let's apply it to a new manifest and deploy it to our cluster:
|
||||
|
||||
```
|
||||
helm upgrade datree-webhook datree-webhook/datree-admission-webhook \
|
||||
--create-namespace \
|
||||
--set datree.enforce=true \
|
||||
--set datree.policy="Default" \
|
||||
--set datree.token=${DATREE_TOKEN} \
|
||||
--set datree.clusterName=$(kubectl config current-context) \
|
||||
--version ${CHART_VERSION} \
|
||||
--namespace datree
|
||||
```
|
||||
|
||||
Try to apply our Wordpress MySQL which violates policies :
|
||||
|
||||
```
|
||||
kubectl -n cms apply -f kubernetes/datree/example/cms/statefulset.yaml
|
||||
```
|
388
kubernetes/datree/README.md
Normal file
388
kubernetes/datree/README.md
Normal file
@ -0,0 +1,388 @@
|
||||
|
||||
# Introduction to Datree
|
||||
|
||||
<a href="https://youtu.be/aqiOyXPPadk" title="Kubernetes"><img src="https://i.ytimg.com/vi/aqiOyXPPadk/hqdefault.jpg" width="20%" alt="Kubernetes Guide" /></a>
|
||||
|
||||
## Installation
|
||||
|
||||
Best place to start is the [documentation](https://hub.datree.io/)
|
||||
|
||||
I like to start all my work inside a docker container. </br>
|
||||
Let's run a small Alpine linux container
|
||||
|
||||
```
|
||||
docker run -it -v ${PWD}:/work -v ${HOME}/.kube/:/root/.kube/ -w /work --net host alpine sh
|
||||
```
|
||||
|
||||
### Install some dependancies
|
||||
|
||||
Let's install `curl` and `unzip` because the installation script uses those. <br/>
|
||||
We will also install `sudo` since we are running in a container as root and install scripts have `sudo` commands in them.
|
||||
|
||||
```
|
||||
apk add curl unzip bash sudo
|
||||
```
|
||||
|
||||
### Automatic Installation
|
||||
|
||||
We can install the latest version of Datree with the command advertised:
|
||||
|
||||
```
|
||||
curl https://get.datree.io | /bin/bash
|
||||
```
|
||||
|
||||
### Manual Installation
|
||||
|
||||
Or we can grab a specific version of `datree` on the GitHub releases page. </br>
|
||||
For example: [1.5.20](https://github.com/datreeio/datree/releases/tag/1.5.20) binary
|
||||
|
||||
```
|
||||
curl -L https://github.com/datreeio/datree/releases/download/1.5.20/datree-cli_1.5.20_Linux_x86_64.zip -o /tmp/datree.zip
|
||||
|
||||
unzip /tmp/datree.zip -d /tmp && \
|
||||
chmod +x /tmp/datree && \
|
||||
mv /tmp/datree /usr/local/bin/datree
|
||||
|
||||
```
|
||||
|
||||
Now we can run the `datree` command:
|
||||
|
||||
```
|
||||
datree
|
||||
Datree is a static code analysis tool for kubernetes files. Full code can be found at https://github.com/datreeio/datree
|
||||
|
||||
Usage:
|
||||
datree [command]
|
||||
|
||||
Available Commands:
|
||||
completion Generate completion script for bash,zsh,fish,powershell
|
||||
config Configuration management
|
||||
help Help about any command
|
||||
kustomize Render resources defined in a kustomization.yaml file and run a policy check against them
|
||||
publish Publish policies configuration for given <fileName>.
|
||||
test Execute static analysis for given <pattern>
|
||||
version Print the version number
|
||||
|
||||
Flags:
|
||||
-h, --help help for datree
|
||||
|
||||
Use "datree [command] --help" for more information about a command.
|
||||
|
||||
```
|
||||
|
||||
## Testing Kubernetes Manifests
|
||||
|
||||
We have a number of Kubernetes manifests in this repo. </br>
|
||||
Datree does a few things for us: </br>
|
||||
* YAML validation ( Is this YAML well formatted ? )
|
||||
* Schema validation. ( Is this a Kubernetes YAML file ? For the right version ? )
|
||||
* Policy checks ( Checks YAML to ensure good practises are followed )
|
||||
|
||||
</br>
|
||||
|
||||
Let's test my example manifests under our datree folder `kubernetes\datree\example`
|
||||
|
||||
### YAML validation
|
||||
|
||||
If we break the YAML file format, we can detect that with the YAML validation feature
|
||||
|
||||
```
|
||||
datree test ./kubernetes/datree/example/deployment.yaml
|
||||
```
|
||||
|
||||
### Policy checks
|
||||
|
||||
When we fix our YAML file, notice if we run `datree test` again, we get some policy checks failing
|
||||
|
||||
```
|
||||
datree test ./kubernetes/datree/example/deployment.yaml
|
||||
|
||||
```
|
||||
|
||||
Let's test some other types of Kubernetes objects
|
||||
|
||||
```
|
||||
datree test ./kubernetes/services/service.yaml
|
||||
datree test ./kubernetes/configmaps/configmap.yaml
|
||||
datree test ./kubernetes/statefulsets/statefulset.yaml
|
||||
datree test ./kubernetes/ingress/ingress.yaml
|
||||
```
|
||||
|
||||
### Schema validation
|
||||
|
||||
Datree can also check if our YAML matches the target Kubernetes version schema.
|
||||
For example, our Ingress YAML is a newer version of Kubernetes
|
||||
|
||||
```
|
||||
datree test --schema-version 1.14.0 ./kubernetes/ingress/ingress-nginx-example.yaml
|
||||
datree test --schema-version 1.19.0 ./kubernetes/ingress/ingress-nginx-example.yaml
|
||||
|
||||
```
|
||||
|
||||
We can also test a directory of YAML files and include `*` wildcard in your scans. </br>
|
||||
Let's test my latest Kubernetes tutorial that contains a Wordpress + MySQL + Ingress setup:
|
||||
|
||||
```
|
||||
datree test kubernetes/tutorials/basics/yaml/*.y*ml
|
||||
```
|
||||
|
||||
# Policies
|
||||
|
||||
Now if we take a look at the CLI output of `datree` we notice a link in the Summary output. </br>
|
||||
The URL is in the form of `https://app.datree.io/login?t=<token>` </br>
|
||||
|
||||
```
|
||||
(Summary)
|
||||
|
||||
- Passing YAML validation: 4/4
|
||||
|
||||
- Passing Kubernetes (1.20.0) schema validation: 4/4
|
||||
|
||||
- Passing policy check: 2/4
|
||||
|
||||
+-----------------------------------+------------------------------------------------------+
|
||||
| Enabled rules in policy "Default" | 21 |
|
||||
| Configs tested against policy | 5 |
|
||||
| Total rules evaluated | 84 |
|
||||
| Total rules skipped | 0 |
|
||||
| Total rules failed | 14 |
|
||||
| Total rules passed | 70 |
|
||||
| See all rules in policy | https://app.datree.io/login?t=xxxxxxxxxxxxxxxxxxxxxx |
|
||||
+-----------------------------------+------------------------------------------------------+
|
||||
```
|
||||
|
||||
We can use this URL to access the Datree UI to get a view of the policy management screens </br>
|
||||
Checkout the link to access the UI which helps us manage our policies. </br>
|
||||
|
||||
## Policy examples
|
||||
|
||||
One of the key features about policies is that we can apply rule sets for specific environments. </br>
|
||||
Perhaps you have a development environment where policies are a little loose and a staging server that has tighter restrictions to match production, or even a regulated environment that has very tight controls. </br>
|
||||
|
||||
We can use the Datree UI to create policies with different sets of rules. </br>
|
||||
We can then tell `datree` about the policy we want it to test against:
|
||||
|
||||
```
|
||||
datree test kubernetes/datree/example/deployment.yaml -p production
|
||||
```
|
||||
|
||||
For a new policy, we notice that 0 rules are enabled, so now we have the flexibility to set up the rules we want to protect this environment. </br>
|
||||
|
||||
## Helm
|
||||
|
||||
What if I don't use `kubectl` and use `helm` instead ? </br>
|
||||
Let's install `helm` in our container </br>
|
||||
|
||||
```
|
||||
apk add tar git
|
||||
curl -L https://get.helm.sh/helm-v3.5.4-linux-amd64.tar.gz -o /tmp/helm.tar.gz && \
|
||||
tar -xzf /tmp/helm.tar.gz -C /tmp && \
|
||||
chmod +x /tmp/linux-amd64/helm && \
|
||||
mv /tmp/linux-amd64/helm /usr/local/bin/helm
|
||||
|
||||
```
|
||||
|
||||
Let's install the `helm` plugin for `datree` <br/>
|
||||
|
||||
```
|
||||
helm plugin install https://github.com/datreeio/helm-datree
|
||||
|
||||
```
|
||||
|
||||
Now we can test a `helm` chart we have in our repo from my `helm` tutorial </br>
|
||||
|
||||
```
|
||||
|
||||
cd kubernetes/helm
|
||||
|
||||
helm datree test example-app \
|
||||
-- --values ./example-app/example-app-01.values.yaml
|
||||
```
|
||||
|
||||
## Kustomize
|
||||
|
||||
What if I don't use `helm` and use `kustomize` instead ? <br/>
|
||||
Datree has out the box built-in `kustomize` support <br/>
|
||||
Let's test our `kustomize` template from a video I did on `kustomize`
|
||||
|
||||
```
|
||||
datree kustomize test .\kubernetes\kustomize\application\
|
||||
```
|
||||
|
||||
# CI/CD examples
|
||||
|
||||
We can even run datree in GitHub Actions and various [CI/CD integrations](https://hub.datree.io/cicd-examples). </br>
|
||||
|
||||
|
||||
# Admission Controller
|
||||
|
||||
So far, `datree` helps us detect misconfigurations on our local machine as well as at our CI level. </br>
|
||||
But what about the things that don't flow via our CI ? </br>
|
||||
|
||||
When folks deploy stuff directly to our clusters via `kubectl` or `helm`. </br>
|
||||
Datree now allows us to not only detect but prevent misconfigurations being applied using a new admission controller feature. </br>
|
||||
|
||||
The admission controller is available [here](https://github.com/datreeio/admission-webhook-datree)
|
||||
|
||||
## Create a Kubernetes cluster
|
||||
|
||||
Let's start by creating a local `kind` [cluster](https://kind.sigs.k8s.io/)
|
||||
|
||||
Note that we create a Kubernetes 1.23 cluster. </br>
|
||||
So we want to use `datree` to validate and ensure our manifests comply with that version of Kubernetes. <br/>
|
||||
|
||||
```
|
||||
kind create cluster --name datree --image kindest/node:v1.23.6
|
||||
```
|
||||
|
||||
Let's also grab `kubectl`:
|
||||
|
||||
```
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.23.6/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
```
|
||||
|
||||
We'll need a `datree` token so our admission controller can read our policies
|
||||
|
||||
```
|
||||
export DATREE_TOKEN=[your-token]
|
||||
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
I will need some dependencies since I am running in a lightweight `alpine` container. </br>
|
||||
OpenSSL is needed by the webhook install to generate certificates. </br>
|
||||
|
||||
```
|
||||
apk add openssl
|
||||
```
|
||||
|
||||
Let's grab the `datree` manifests
|
||||
```
|
||||
curl -L https://get.datree.io/admission-webhook -o datree.sh
|
||||
chmod +x datree.sh
|
||||
bash datree.sh
|
||||
```
|
||||
|
||||
With the admission controller now deployed, `datree` will validate things coming into the cluster. <br/>
|
||||
For example, if we bypass our CI/CD, `datree` will catch our deployment and run our policy checks
|
||||
|
||||
I have a separate example deployment in our datree folder that we can play with:
|
||||
|
||||
```
|
||||
kubectl apply -f kubernetes/datree/example/deployment.yaml
|
||||
```
|
||||
|
||||
Output:
|
||||
|
||||
```
|
||||
kubectl apply -f kubernetes/deployments/deployment.yaml
|
||||
Error from server: error when creating "kubernetes/deployments/deployment.yaml": admission webhook "webhook-server.datree.svc" denied the request:
|
||||
---
|
||||
webhook-example-deploy-Deployment.tmp.yaml
|
||||
|
||||
[V] YAML validation
|
||||
[V] Kubernetes schema validation
|
||||
|
||||
[X] Policy check
|
||||
|
||||
❌ Ensure each container has a configured liveness probe [1 occurrence]
|
||||
- metadata.name: example-deploy (kind: Deployment)
|
||||
💡 Missing property object `livenessProbe` - add a properly configured livenessProbe to catch possible deadlocks
|
||||
|
||||
❌ Ensure each container has a configured readiness probe [1 occurrence]
|
||||
- metadata.name: example-deploy (kind: Deployment)
|
||||
💡 Missing property object `readinessProbe` - add a properly configured readinessProbe to notify kubelet your Pods are ready for traffic
|
||||
|
||||
❌ Prevent workload from using the default namespace [1 occurrence]
|
||||
- metadata.name: example-deploy (kind: Deployment)
|
||||
💡 Incorrect value for key `namespace` - use an explicit namespace instead of the default one (`default`)
|
||||
|
||||
|
||||
(Summary)
|
||||
|
||||
- Passing YAML validation: 1/1
|
||||
|
||||
- Passing Kubernetes (v1.23.6) schema validation: 1/1
|
||||
|
||||
- Passing policy check: 0/1
|
||||
|
||||
+-----------------------------------+-----------------------+
|
||||
| Enabled rules in policy "Default" | 21 |
|
||||
| Configs tested against policy | 1 |
|
||||
| Total rules evaluated | 21 |
|
||||
| Total rules skipped | 0 |
|
||||
| Total rules failed | 3 |
|
||||
| Total rules passed | 18 |
|
||||
| See all rules in policy | https://app.datree.io |
|
||||
+-----------------------------------+-----------------------+
|
||||
```
|
||||
|
||||
Now to get this deployment fixed up, let's go ahead and comply to some of the policies </br>
|
||||
Under the `deployment.yaml` I have included a `livenessProbe` as well as a `readinessProbe` </br>
|
||||
Let's add those in. </br>
|
||||
And finally we need to also add CPU and Memory requests and limit values. </br>
|
||||
|
||||
The last one is simple. We should avoid using the default namespace. So I will create an `example` namespace where I will keep all example apps.
|
||||
|
||||
```
|
||||
kubectl create ns examples
|
||||
```
|
||||
|
||||
And finally we can deploy our resource, and specify a namespace:
|
||||
|
||||
```
|
||||
kubectl apply -n examples -f kubernetes/datree/example/deployment.yaml
|
||||
deployment.apps/example-deploy created
|
||||
|
||||
```
|
||||
|
||||
## Kubectl
|
||||
|
||||
But what about resources already in your cluster ? </br>
|
||||
Datree covers this with their `kubectl` plugin.
|
||||
|
||||
We can grab the install script right off the [GitHub Release](https://github.com/datreeio/kubectl-datree/releases) page. </br>
|
||||
For this demo I'll grab the `v0.11` version </br>
|
||||
|
||||
Installation:
|
||||
|
||||
```
|
||||
curl -L https://github.com/datreeio/kubectl-datree/releases/download/v0.1.1/manual_install.sh -o /tmp/kubectl-plugin.sh
|
||||
chmod +x /tmp/kubectl-plugin.sh
|
||||
bash /tmp/kubectl-plugin.sh
|
||||
|
||||
```
|
||||
|
||||
Now we have datree inside `kubectl` and can perform checks in our cluster. </br>
|
||||
We can check our entire namespace now, which should be pretty clean:
|
||||
|
||||
```
|
||||
kubectl datree test -- --namespace examples
|
||||
Fetching resources, this may take some time depending on the amount of resources in your cluster...
|
||||
|
||||
(Summary)
|
||||
|
||||
- Passing YAML validation: 1/1
|
||||
|
||||
- Passing Kubernetes (1.24.2) schema validation: 1/1
|
||||
|
||||
- Passing policy check: 1/1
|
||||
|
||||
+-----------------------------------+------------------------------------------------------+
|
||||
| Enabled rules in policy "Default" | 21 |
|
||||
| Configs tested against policy | 1 |
|
||||
| Total rules evaluated | 21 |
|
||||
| Total rules skipped | 0 |
|
||||
| Total rules failed | 0 |
|
||||
| Total rules passed | 21 |
|
||||
| See all rules in policy | https://app.datree.io/login?t=xxxxxxxxxxxxxxxxxxxxxx |
|
||||
+-----------------------------------+------------------------------------------------------+
|
||||
|
||||
The following cluster resources in namespace 'examples' were checked:
|
||||
|
||||
deployment.apps/example-deploy
|
||||
|
||||
```
|
8
kubernetes/datree/configuration/config.yaml
Normal file
8
kubernetes/datree/configuration/config.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: webhook-scanning-filters
|
||||
namespace: datree
|
||||
data:
|
||||
skiplist: |
|
||||
- local-path-storage;(.*);(.*)
|
174
kubernetes/datree/datree.sh
Executable file
174
kubernetes/datree/datree.sh
Executable file
@ -0,0 +1,174 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Sets up the environment for the admission controller webhook in the active cluster.
|
||||
# check that user have kubectl installed and openssl
|
||||
# generate TLS keys
|
||||
generate_keys () {
|
||||
printf "🔑 Generating TLS keys...\n"
|
||||
|
||||
chmod 0700 "${keydir}"
|
||||
cd "${keydir}"
|
||||
|
||||
cat >server.conf <<EOF
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
prompt = no
|
||||
[req_distinguished_name]
|
||||
CN = webhook-server.datree.svc
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = clientAuth, serverAuth
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = webhook-server.datree.svc
|
||||
EOF
|
||||
|
||||
# Generate the CA cert and private key that is valid for 5 years
|
||||
openssl req -nodes -new -x509 -days 1827 -keyout ca.key -out ca.crt -subj "/CN=Admission Controller Webhook Demo CA"
|
||||
# Generate the private key for the webhook server
|
||||
openssl genrsa -out webhook-server-tls.key 2048
|
||||
# Generate a Certificate Signing Request (CSR) for the private key, and sign it with the private key of the CA.
|
||||
openssl req -new -key webhook-server-tls.key -subj "/CN=webhook-server.datree.svc" -config server.conf \
|
||||
| openssl x509 -req -CA ca.crt -CAkey ca.key -CAcreateserial -out webhook-server-tls.crt -extensions v3_req -extfile server.conf
|
||||
|
||||
cd -
|
||||
}
|
||||
|
||||
verify_prerequisites () {
|
||||
if ! command -v openssl &> /dev/null;then
|
||||
printf '%s\n' "openssl doesn't exist, please install openssl"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v kubectl &> /dev/null;then
|
||||
printf '%s\n' "kubectl doesn't exist, please install kubectl"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
verify_datree_namespace_not_existing () {
|
||||
local namespace_exists
|
||||
namespace_exists="$(kubectl get namespace/datree --ignore-not-found)"
|
||||
|
||||
if [ -n "${namespace_exists}" ] ;
|
||||
then
|
||||
printf '%s\n' "datree namespace already exists"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
verify_webhook_resources_not_existing () {
|
||||
local validating_webhook_exists
|
||||
validating_webhook_exists="$(kubectl get validatingwebhookconfiguration.admissionregistration.k8s.io/webhook-datree --ignore-not-found)"
|
||||
|
||||
if [ -n "${validating_webhook_exists}" ] ;
|
||||
then
|
||||
printf '%s\n' "datree validating webhook already exists"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
are_you_sure () {
|
||||
read -p "Are you sure you want to run as anonymous user? (y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo true
|
||||
else
|
||||
echo false
|
||||
fi
|
||||
}
|
||||
|
||||
verify_correct_token_regex () {
|
||||
if ! [[ $datree_token =~ ^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$
|
||||
|| $datree_token =~ ^[0-9a-zA-Z]{22}$
|
||||
|| $datree_token =~ ^[0-9a-zA-Z]{20}$ ]] ; then
|
||||
echo "🚫 Invalid token format"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
verify_datree_namespace_not_existing
|
||||
|
||||
verify_webhook_resources_not_existing
|
||||
|
||||
verify_prerequisites
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
# Create Temporary directory for TLS keys
|
||||
keydir="$(mktemp -d)"
|
||||
|
||||
# Generate keys into a temporary directory.
|
||||
generate_keys
|
||||
|
||||
basedir="$(pwd)/deployment"
|
||||
|
||||
# Create the `datree` namespace. This cannot be part of the YAML file as we first need to create the TLS secret,
|
||||
# which would fail otherwise.
|
||||
printf "\n🏠 Creating datree namespace...\n"
|
||||
kubectl create namespace datree
|
||||
|
||||
# Label datree namespace to avoid deadlocks in self hosted webhooks
|
||||
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#avoiding-deadlocks-in-self-hosted-webhooks
|
||||
kubectl label namespaces datree admission.datree/validate=skip
|
||||
|
||||
# label kube-system namespace to avoid operating on the kube-system namespace
|
||||
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#avoiding-operating-on-the-kube-system-namespace
|
||||
kubectl label namespaces kube-system admission.datree/validate=skip
|
||||
|
||||
# Override DATREE_TOKEN env
|
||||
if [ -z "$DATREE_TOKEN" ] ;
|
||||
then
|
||||
echo
|
||||
echo =====================================
|
||||
echo === Finish setting up the webhook ===
|
||||
echo =====================================
|
||||
|
||||
token_set=false
|
||||
while [ "$token_set" = false ]; do
|
||||
echo "👉 Insert token (available at https://app.datree.io/settings/token-management)"
|
||||
echo "ℹ️ The token is used to connect the webhook with your account."
|
||||
read datree_token
|
||||
token_set=true
|
||||
|
||||
if [ -z "$datree_token" ]; then
|
||||
is_sure=$(are_you_sure)
|
||||
if [ $is_sure = false ]; then
|
||||
token_set=false
|
||||
fi
|
||||
fi
|
||||
done
|
||||
else
|
||||
datree_token=$DATREE_TOKEN
|
||||
fi
|
||||
|
||||
verify_correct_token_regex
|
||||
|
||||
# Create the TLS secret for the generated keys.
|
||||
kubectl -n datree create secret tls webhook-server-tls \
|
||||
--cert "${keydir}/webhook-server-tls.crt" \
|
||||
--key "${keydir}/webhook-server-tls.key"
|
||||
|
||||
printf "\n🔗 Creating webhook resources...\n"
|
||||
|
||||
# Read the PEM-encoded CA certificate, base64 encode it, and replace the `${CA_PEM_B64}` placeholder in the YAML
|
||||
# template with it. Then, create the Kubernetes resources.
|
||||
ca_pem_b64="$(openssl base64 -A <"${keydir}/ca.crt")"
|
||||
curl "https://raw.githubusercontent.com/datreeio/admission-webhook-datree/main/deployment/admission-webhook-datree.yaml" | sed -e 's@${CA_PEM_B64}@'"$ca_pem_b64"'@g' \
|
||||
| sed 's@${DATREE_TOKEN}@'"$datree_token"'@g' \
|
||||
| kubectl create -f -
|
||||
|
||||
# Delete the key directory to prevent abuse (DO NOT USE THESE KEYS ANYWHERE ELSE).
|
||||
rm -rf "${keydir}"
|
||||
|
||||
# Wait for deployment rollout
|
||||
rolloutExitCode=0
|
||||
(kubectl rollout status deployment webhook-server -n datree --timeout=180s) || rolloutExitCode=$?
|
||||
|
||||
if [ "$rolloutExitCode" != "0" ]; then
|
||||
printf "\n❌ datree webhook rollout failed, please try again. If this keeps happening please contact us: https://github.com/datreeio/admission-webhook-datree/issues\n"
|
||||
else
|
||||
printf "\n🎉 DONE! The webhook server is now deployed and configured\n"
|
||||
fi
|
42
kubernetes/datree/example/cms/deploy.yaml
Normal file
42
kubernetes/datree/example/cms/deploy.yaml
Normal file
@ -0,0 +1,42 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: wordpress-deployment
|
||||
labels:
|
||||
app: wordpress
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: wordpress
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: wordpress
|
||||
spec:
|
||||
containers:
|
||||
- name: wordpress
|
||||
image: wordpress
|
||||
ports:
|
||||
- containerPort: 80
|
||||
env:
|
||||
- name: WORDPRESS_DB_HOST
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: wordpress
|
||||
key: WORDPRESS_DB_HOST
|
||||
- name: WORDPRESS_DB_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: wordpress
|
||||
key: WORDPRESS_DB_USER
|
||||
- name: WORDPRESS_DB_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: wordpress
|
||||
key: WORDPRESS_DB_PASSWORD
|
||||
- name: WORDPRESS_DB_NAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: wordpress
|
||||
key: WORDPRESS_DB_NAME
|
18
kubernetes/datree/example/cms/ingress.yaml
Normal file
18
kubernetes/datree/example/cms/ingress.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: wordpress
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: wordpress
|
||||
port:
|
||||
number: 80
|
14
kubernetes/datree/example/cms/service.yaml
Normal file
14
kubernetes/datree/example/cms/service.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: wordpress
|
||||
labels:
|
||||
app: wordpress
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
name: wordpress
|
||||
targetPort: 80
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: wordpress
|
69
kubernetes/datree/example/cms/statefulset.yaml
Normal file
69
kubernetes/datree/example/cms/statefulset.yaml
Normal file
@ -0,0 +1,69 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mysql
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
name: db
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: mysql
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mysql
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mysql # has to match .spec.template.metadata.labels
|
||||
serviceName: "mysql"
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mysql # has to match .spec.selector.matchLabels
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: mysql
|
||||
image: aimvector/mysql-example
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: db
|
||||
env:
|
||||
- name: MYSQL_DATABASE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: mysql
|
||||
key: MYSQL_DATABASE
|
||||
- name: MYSQL_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: mysql
|
||||
key: MYSQL_USER
|
||||
- name: MYSQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: mysql
|
||||
key: MYSQL_PASSWORD
|
||||
- name: MYSQL_RANDOM_ROOT_PASSWORD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: mysql
|
||||
key: MYSQL_RANDOM_ROOT_PASSWORD
|
||||
volumeMounts:
|
||||
- name: db
|
||||
mountPath: /var/lib/mysql
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: db
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "standard"
|
||||
resources:
|
||||
requests:
|
||||
storage: 500Mi
|
47
kubernetes/datree/example/deployment.yaml
Normal file
47
kubernetes/datree/example/deployment.yaml
Normal file
@ -0,0 +1,47 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-deploy
|
||||
labels:
|
||||
app: example-app
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example-app
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example-app
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
image: aimvector/python:1.0.4
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /status
|
||||
# port: 5000
|
||||
# initialDelaySeconds: 3
|
||||
# periodSeconds: 3
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /status
|
||||
# port: 5000
|
||||
# initialDelaySeconds: 3
|
||||
# periodSeconds: 3
|
||||
# resources:
|
||||
# requests:
|
||||
# memory: "64Mi"
|
||||
# cpu: "50m"
|
||||
# limits:
|
||||
# memory: "256Mi"
|
||||
# cpu: "500m"
|
33
kubernetes/datree/github-actions/datree.yaml
Normal file
33
kubernetes/datree/github-actions/datree.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches: [ datree-scoring ]
|
||||
env:
|
||||
DATREE_TOKEN: ${{ secrets.DATREE_TOKEN }}
|
||||
jobs:
|
||||
k8sPolicyCheck:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: run datree policy check
|
||||
uses: datreeio/action-datree@main
|
||||
with:
|
||||
path: 'kubernetes/datree/example/deployment.yaml'
|
||||
cliArguments: '--only-k8s-files'
|
||||
- name: docker login
|
||||
env:
|
||||
DOCKER_USER: ${{ secrets.DOCKER_USER }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
run: |
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
||||
- name: build
|
||||
run: |
|
||||
docker build ./c# -t aimvector/csharp:1.0.0
|
||||
- name: push
|
||||
run: |
|
||||
docker push aimvector/csharp:1.0.0
|
||||
- name: deploy
|
||||
run: |
|
||||
echo 'deploying...'
|
@ -1,4 +1,6 @@
|
||||
# Deployments
|
||||
# Introduction to Kubernetes: Deployments
|
||||
|
||||
<a href="https://youtu.be/DMpEZEakYVc" title="k8s-deployments"><img src="https://i.ytimg.com/vi/DMpEZEakYVc/hqdefault.jpg" width="20%" alt="k8s-deployments" /></a>
|
||||
|
||||
Build an example app:
|
||||
|
||||
|
305
kubernetes/fluxcd/README.md
Normal file
305
kubernetes/fluxcd/README.md
Normal file
@ -0,0 +1,305 @@
|
||||
# Introduction to Flux CD v2
|
||||
|
||||
## Create a kubernetes cluster
|
||||
|
||||
In this guide we we''ll need a Kubernetes cluster for testing. Let's create one using [kind](https://kind.sigs.k8s.io/) </br>
|
||||
|
||||
```
|
||||
kind create cluster --name fluxcd --image kindest/node:v1.26.3
|
||||
```
|
||||
|
||||
## Run a container to work in
|
||||
|
||||
### run Alpine Linux:
|
||||
```
|
||||
docker run -it --rm -v ${HOME}:/root/ -v ${PWD}:/work -w /work --net host alpine sh
|
||||
```
|
||||
|
||||
### install some tools
|
||||
|
||||
```
|
||||
# install curl
|
||||
apk add --no-cache curl
|
||||
|
||||
# install kubectl
|
||||
curl -sLO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
|
||||
```
|
||||
|
||||
### test cluster access:
|
||||
```
|
||||
/work # kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
fluxcd-control-plane Ready control-plane 54s v1.26.3
|
||||
```
|
||||
|
||||
## Get the Flux CLI
|
||||
|
||||
Let's download the `flux` command-line utility. </br>
|
||||
We can get this utility from the GitHub [Releases page](https://github.com/fluxcd/flux2/releases) </br>
|
||||
|
||||
It's also worth noting that you want to ensure you get a compatible version of flux which supports your version of Kubernetes. Checkout the [prerequisites](https://fluxcd.io/flux/installation/#prerequisites) page. </br>
|
||||
|
||||
```
|
||||
curl -o /tmp/flux.tar.gz -sLO https://github.com/fluxcd/flux2/releases/download/v2.1.1/flux_2.1.1_linux_amd64.tar.gz
|
||||
tar -C /tmp/ -zxvf /tmp/flux.tar.gz
|
||||
mv /tmp/flux /usr/local/bin/flux
|
||||
chmod +x /usr/local/bin/flux
|
||||
```
|
||||
|
||||
Now we can run `flux --help` to see its installed
|
||||
|
||||
## Check our cluster
|
||||
|
||||
```
|
||||
flux check --pre
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
As with every guide, we start with the documentation </br>
|
||||
The [Core Concepts](https://fluxcd.io/flux/concepts/) is a good place to start. </br>
|
||||
|
||||
We begin by following the steps under the [bootstrap](https://fluxcd.io/flux/installation/#bootstrap) section for GitHub </br>
|
||||
|
||||
We'll need to generate a [personal access token (PAT)](https://github.com/settings/tokens/new) that can create repositories by checking all permissions under `repo`. </br>
|
||||
|
||||
Once we have a token, we can set it:
|
||||
|
||||
```
|
||||
export GITHUB_TOKEN=<your-token>
|
||||
```
|
||||
|
||||
Then we can bootstrap it using the GitHub bootstrap method
|
||||
|
||||
```
|
||||
flux bootstrap github \
|
||||
--token-auth \
|
||||
--owner=marcel-dempers \
|
||||
--repository=docker-development-youtube-series \
|
||||
--path=kubernetes/fluxcd/repositories/infra-repo/clusters/dev-cluster \
|
||||
--personal \
|
||||
--branch fluxcd-2022
|
||||
|
||||
flux check
|
||||
|
||||
# flux manages itself using GitOps objects:
|
||||
kubectl -n flux-system get GitRepository
|
||||
kubectl -n flux-system get Kustomization
|
||||
```
|
||||
|
||||
Check the source code that `flux bootstrap` created
|
||||
|
||||
```
|
||||
git pull origin <branch-name>
|
||||
```
|
||||
|
||||
# Understanding GitOps Repository structures
|
||||
|
||||
Generally, in GitOps you have a dedicated repo for infrastructure templates. </br>
|
||||
Your infrastructure will "sync" from the this repo </br>
|
||||
|
||||
```
|
||||
|
||||
developer +-----------+ +----------+
|
||||
| | | CI |
|
||||
----------> | REPO(code)|---> | PIPELINE |
|
||||
+-----------+ +----------+
|
||||
| commit
|
||||
v
|
||||
+----------+ sync +----------+
|
||||
| INFRA |-------> |INFRA |
|
||||
| (k8s) | |REPO(yaml)|
|
||||
+----------+ +----------+
|
||||
|
||||
```
|
||||
|
||||
Flux repository structure [documentation](https://fluxcd.io/flux/guides/repository-structure/)
|
||||
|
||||
* Mono Repo (all k8s YAML in same "infra repo")
|
||||
* Repo per team
|
||||
* Repo per app
|
||||
|
||||
Take note in this guide the folders under `kubernetes/fluxcd/repositories` represent different GIT repos
|
||||
|
||||
```
|
||||
- repositories
|
||||
- infra-repo
|
||||
- example-app-1
|
||||
- example-app-2
|
||||
```
|
||||
|
||||
## build our example apps
|
||||
|
||||
Let's say we have a microservice called `example-app-1` and it has its own GitHub repo somewhere. </br>
|
||||
For demo, it's code is under `kubernetes/fluxcd/repositories/example-app-1/`
|
||||
|
||||
```
|
||||
# go to our "git repo"
|
||||
cd kubernetes/fluxcd/repositories/example-app-1
|
||||
# check the files
|
||||
ls
|
||||
|
||||
cd src
|
||||
docker build . -t example-app-1:0.0.1
|
||||
|
||||
#load the image to our test cluster so we dont need to push to a registry
|
||||
kind load docker-image example-app-1:0.0.1 --name fluxcd
|
||||
```
|
||||
|
||||
## setup our gitops pipeline
|
||||
|
||||
Now we will also have a "infra-repo" GitHub repo where infrastructure configuration files for GitOps live.
|
||||
|
||||
```
|
||||
cd kubernetes/fluxcd
|
||||
|
||||
# tell flux where our Git repo is and where the YAML is
|
||||
# this is once off
|
||||
# flux will monitor the example-app-1 Git repo for when any infrastructure changes, it will sync
|
||||
kubectl -n default apply -f repositories/infra-repo/apps/example-app-1/gitrepository.yaml
|
||||
kubectl -n default apply -f repositories/infra-repo/apps/example-app-1/kustomization.yaml
|
||||
|
||||
# check our flux resources
|
||||
kubectl -n default describe gitrepository example-app-1
|
||||
kubectl -n default describe kustomization example-app-1
|
||||
|
||||
# check deployed resources
|
||||
kubectl get all
|
||||
|
||||
kubectl port-forward svc/example-app-1 80:80
|
||||
|
||||
```
|
||||
|
||||
Now we have setup CD, let's take a look at CI </br>
|
||||
|
||||
## changes to our example apps
|
||||
|
||||
Once we make changes to our `app.py` we can build a new image with a new tag </br>
|
||||
|
||||
```
|
||||
docker build . -t example-app-1:0.0.2
|
||||
|
||||
#load the image to our test cluster so we dont need to push to a registry
|
||||
kind load docker-image example-app-1:0.0.2 --name fluxcd
|
||||
|
||||
# update our kubernetes deployment YAML image tag
|
||||
# git commit with [skip ci] as the prefix of commit message & git push to branch!
|
||||
```
|
||||
|
||||
If we wait a minute or so we can ` kubectl port-forward svc/example-app-1 80:80` again and see the changes
|
||||
|
||||
## automate deploy by updating manifest
|
||||
|
||||
So all we did to update our app is to build a new image, push it to our registry and update the image tag in our kubernetes deployment YAML file and `flux` will sync it. </br>
|
||||
This is generally the role of CI, where `flux` concern is mainly CD. </br>
|
||||
|
||||
Here is an example on [how to automate that](https://fluxcd.io/flux/use-cases/gh-actions-manifest-generation/)
|
||||
|
||||
## automate deploy by image scanning
|
||||
|
||||
```
|
||||
docker push
|
||||
|
||||
developer +-----------+ +----------+ +-------------+
|
||||
| | | CI | |IMAGE |
|
||||
----------> | REPO(code)|---> | PIPELINE | ----->|REGISTRY |
|
||||
+-----------+ +----------+ +-------------+
|
||||
^
|
||||
|sync
|
||||
|
|
||||
+----------+ commit +----------+
|
||||
|INFRA | <-------- | INFRA |
|
||||
|REPO(yaml)| | (k8s) |
|
||||
+----------+ +----------+
|
||||
|
||||
```
|
||||
|
||||
An alternative method is to use your CI to build and push a newly tagged image to your registry (same as first option) and use [Flux image scanner](https://fluxcd.io/flux/guides/image-update/#configure-image-updates) to trigger the rollout instead of automating a commit to your config repo. </br>
|
||||
|
||||
We firstly need to enable image scanning as its not enabled by default. </br>
|
||||
To do this we just need to re-bootstrap `flux` with an addition flag
|
||||
|
||||
```
|
||||
flux bootstrap github \
|
||||
--token-auth \
|
||||
--owner=marcel-dempers \
|
||||
--repository=docker-development-youtube-series \
|
||||
--path=kubernetes/fluxcd/repositories/infra-repo/clusters/dev-cluster \
|
||||
--components-extra=image-reflector-controller,image-automation-controller \
|
||||
--personal \
|
||||
--branch fluxcd-2022
|
||||
```
|
||||
We need to create a image registry credential where we will push our image:
|
||||
|
||||
```
|
||||
kubectl -n default create secret docker-registry dockerhub-credential --docker-username '' --docker-password '' --docker-email 'test@test.com'
|
||||
|
||||
```
|
||||
|
||||
# build and push example-app-2
|
||||
|
||||
```
|
||||
cd kubernetes\fluxcd\repositories\example-app-2\
|
||||
ls
|
||||
cd src
|
||||
ls
|
||||
docker build . -t aimvector/example-app-2:0.0.1
|
||||
docker push aimvector/example-app-2:0.0.1
|
||||
|
||||
```
|
||||
We will need to tell Flux how to manage our image deployment </br>
|
||||
Note that this time our Kubernetes YAML is in the `configs` repo. </br>
|
||||
This is because our application repo triggers it's CI which will build and push a new image to our cluster </br>
|
||||
Flux will then detect the new image tag and update our Kubernetes YAML in our configs repo. </br>
|
||||
If Flux pushed the update to our application repo, it will cause a CI/CD loop.
|
||||
|
||||
## add image policy and repository
|
||||
|
||||
```
|
||||
|
||||
kubectl -n default apply -f kubernetes/fluxcd/repositories/infra-repo/apps/example-app-2/gitrepository.yaml
|
||||
kubectl -n default apply -f kubernetes/fluxcd/repositories/infra-repo/apps/example-app-2/kustomization.yaml
|
||||
|
||||
# see our application
|
||||
kubectl get deploy
|
||||
kubectl get pods
|
||||
|
||||
# tell flux about our image update policy
|
||||
kubectl -n default apply -f kubernetes/fluxcd/repositories/infra-repo/apps/example-app-2/imagerepository.yaml
|
||||
kubectl -n default apply -f kubernetes/fluxcd/repositories/infra-repo/apps/example-app-2/imagepolicy.yaml
|
||||
kubectl -n default apply -f kubernetes/fluxcd/repositories/infra-repo/apps/example-app-2/imageupdateautomation.yaml
|
||||
|
||||
# we will also need to provide authentication for our git repo
|
||||
flux create secret git example-app-2-github --url https://github.com/marcel-dempers/docker-development-youtube-series --username '' --password '' --namespace default
|
||||
```
|
||||
|
||||
There are a number of ways to authenticate with [GitRepositories](https://fluxcd.io/flux/components/source/gitrepositories/#secret-reference)
|
||||
|
||||
```
|
||||
kubectl describe imagepolicy example-app-2
|
||||
kubectl describe imagerepository example-app-2
|
||||
kubectl describe imageupdateautomation example-app-2
|
||||
```
|
||||
|
||||
## Build and push our example-app-2
|
||||
|
||||
```
|
||||
#make application changes and rebuild + push
|
||||
|
||||
docker build . -t aimvector/example-app-2:0.0.2
|
||||
docker push aimvector/example-app-2:0.0.2
|
||||
|
||||
|
||||
#see changes new tags
|
||||
kubectl describe imagerepository
|
||||
|
||||
#see image being updated
|
||||
kubectl describe imagepolicy example-app-2
|
||||
|
||||
# see flux commiting back to the repo
|
||||
kubectl describe imageupdateautomation example-app-2
|
||||
|
||||
```
|
@ -1,4 +1,6 @@
|
||||
# Flux Getting Started Guide
|
||||
# Flux Getting Started Guide (old v1)
|
||||
|
||||
<a href="https://youtu.be/OFgziggbCOg" title="flux cd"><img src="https://i.ytimg.com/vi/OFgziggbCOg/hqdefault.jpg" width="20%" alt="introduction to flux cd" /></a>
|
||||
|
||||
# 1 - Kubernetes
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-app-1
|
||||
namespace: default
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"environment" : "dev"
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-app-1
|
||||
labels:
|
||||
app: example-app-1
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example-app-1
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example-app-1
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app-1
|
||||
image: example-app-1:0.0.2
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /configs/
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: example-app-1
|
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: default
|
||||
name: example-app-1
|
||||
labels:
|
||||
app: example-app-1
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: example-app-1
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: 5000
|
6
kubernetes/fluxcd/repositories/example-app-1/src/app.py
Normal file
6
kubernetes/fluxcd/repositories/example-app-1/src/app.py
Normal file
@ -0,0 +1,6 @@
|
||||
from flask import Flask
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route("/")
|
||||
def hello():
|
||||
return "Hello World! v1.0.0.2"
|
@ -0,0 +1,8 @@
|
||||
FROM python:3.7.3-alpine3.9 as base
|
||||
|
||||
RUN pip install Flask==2.0.3
|
||||
|
||||
WORKDIR /app
|
||||
COPY app.py /app/
|
||||
ENV FLASK_APP=app.py
|
||||
CMD flask run -h 0.0.0 -p 5000
|
6
kubernetes/fluxcd/repositories/example-app-2/src/app.py
Normal file
6
kubernetes/fluxcd/repositories/example-app-2/src/app.py
Normal file
@ -0,0 +1,6 @@
|
||||
from flask import Flask
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route("/")
|
||||
def hello():
|
||||
return "Hello World! v1.0.0.0"
|
@ -0,0 +1,8 @@
|
||||
FROM python:3.7.3-alpine3.9 as base
|
||||
|
||||
RUN pip install Flask==2.0.3
|
||||
|
||||
WORKDIR /app
|
||||
COPY app.py /app/
|
||||
ENV FLASK_APP=app.py
|
||||
CMD flask run -h 0.0.0 -p 5000
|
@ -0,0 +1,10 @@
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: GitRepository
|
||||
metadata:
|
||||
name: example-app-1
|
||||
namespace: default
|
||||
spec:
|
||||
interval: 1m0s
|
||||
ref:
|
||||
branch: fluxcd-2022
|
||||
url: https://github.com/marcel-dempers/docker-development-youtube-series
|
@ -0,0 +1,12 @@
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: example-app-1
|
||||
namespace: default
|
||||
spec:
|
||||
interval: 15m
|
||||
path: "./kubernetes/fluxcd/repositories/example-app-1/deploy"
|
||||
prune: true
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: example-app-1
|
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-app-2
|
||||
namespace: default
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"environment" : "dev"
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-app-2
|
||||
labels:
|
||||
app: example-app-2
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example-app-2
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example-app-2
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app-2
|
||||
image: docker.io/aimvector/example-app-2:0.0.2 # {"$imagepolicy": "default:example-app-2"}
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /configs/
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: example-app-2
|
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: default
|
||||
name: example-app-2
|
||||
labels:
|
||||
app: example-app-2
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: example-app-2
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: 5000
|
@ -0,0 +1,12 @@
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: GitRepository
|
||||
metadata:
|
||||
name: example-app-2
|
||||
namespace: default
|
||||
spec:
|
||||
interval: 1m0s
|
||||
ref:
|
||||
branch: fluxcd-2022
|
||||
url: https://github.com/marcel-dempers/docker-development-youtube-series
|
||||
secretRef:
|
||||
name: example-app-2-github
|
@ -0,0 +1,11 @@
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImagePolicy
|
||||
metadata:
|
||||
name: example-app-2
|
||||
namespace: default
|
||||
spec:
|
||||
imageRepositoryRef:
|
||||
name: example-app-2
|
||||
policy:
|
||||
semver:
|
||||
range: 0.0.x
|
@ -0,0 +1,10 @@
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta2
|
||||
kind: ImageRepository
|
||||
metadata:
|
||||
name: example-app-2
|
||||
namespace: default
|
||||
spec:
|
||||
image: docker.io/aimvector/example-app-2
|
||||
interval: 1m0s
|
||||
secretRef:
|
||||
name: dockerhub-credential
|
@ -0,0 +1,24 @@
|
||||
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||
kind: ImageUpdateAutomation
|
||||
metadata:
|
||||
name: example-app-2
|
||||
namespace: default
|
||||
spec:
|
||||
interval: 1m0s
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: example-app-2
|
||||
git:
|
||||
checkout:
|
||||
ref:
|
||||
branch: fluxcd-2022
|
||||
commit:
|
||||
author:
|
||||
email: fluxcdbot@users.noreply.github.com
|
||||
name: fluxcdbot
|
||||
messageTemplate: '{{range .Updated.Images}}{{println .}}{{end}}'
|
||||
push:
|
||||
branch: fluxcd-2022
|
||||
update:
|
||||
path: ./kubernetes/fluxcd/repositories/infra-repo/apps/example-app-2/deploy/deployment.yaml
|
||||
strategy: Setters
|
@ -0,0 +1,12 @@
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: example-app-2
|
||||
namespace: default
|
||||
spec:
|
||||
interval: 15m
|
||||
path: "./kubernetes/fluxcd/repositories/infra-repo/apps/example-app-2/deploy"
|
||||
prune: true
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: example-app-2
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,27 @@
|
||||
# This manifest was generated by flux. DO NOT EDIT.
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: GitRepository
|
||||
metadata:
|
||||
name: flux-system
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 1m0s
|
||||
ref:
|
||||
branch: fluxcd-2022
|
||||
secretRef:
|
||||
name: flux-system
|
||||
url: https://github.com/marcel-dempers/docker-development-youtube-series.git
|
||||
---
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: flux-system
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 10m0s
|
||||
path: ./kubernetes/fluxcd/repositories/infra-repo/clusters/dev-cluster
|
||||
prune: true
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
@ -0,0 +1,5 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- gotk-components.yaml
|
||||
- gotk-sync.yaml
|
@ -1,11 +1,13 @@
|
||||
# Introduction to Helm
|
||||
|
||||
<a href="https://youtu.be/5_J7RWLLVeQ" title="k8s-helm"><img src="https://i.ytimg.com/vi/5_J7RWLLVeQ/hqdefault.jpg" width="20%" alt="k8s-helm" /></a>
|
||||
|
||||
## We need a Kubernetes cluster
|
||||
|
||||
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
|
||||
|
||||
```
|
||||
kind create cluster --name helm --image kindest/node:v1.19.1
|
||||
kind create cluster --name helm --image kindest/node:v1.26.0
|
||||
```
|
||||
|
||||
# Getting Started with Helm
|
||||
@ -30,7 +32,7 @@ export KUBE_EDITOR="nano"
|
||||
# test cluster access:
|
||||
/work # kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
helm-control-plane Ready master 26m v1.19.1
|
||||
helm-control-plane Ready master 26m v1.26.0
|
||||
|
||||
```
|
||||
|
||||
@ -202,4 +204,4 @@ This may help you keep the `values.yaml` file small <br/>
|
||||
# rollout the change
|
||||
|
||||
helm upgrade example-app example-app --values ./example-app/example-app-01.values.yaml
|
||||
```
|
||||
```
|
||||
|
403
kubernetes/ingress/controller/nginx/README.md
Normal file
403
kubernetes/ingress/controller/nginx/README.md
Normal file
@ -0,0 +1,403 @@
|
||||
# Introduction to NGINX Ingress Controller
|
||||
|
||||
## Create a kubernetes cluster
|
||||
|
||||
In this guide we we''ll need a Kubernetes cluster for testing. Let's create one using [kind](https://kind.sigs.k8s.io/) </br>
|
||||
|
||||
```
|
||||
kind create cluster --name nginx-ingress --image kindest/node:v1.23.5
|
||||
```
|
||||
|
||||
See cluster up and running:
|
||||
|
||||
```
|
||||
kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
nginx-ingress-control-plane Ready control-plane,master 2m12s v1.23.5
|
||||
```
|
||||
|
||||
## Run a container to work in
|
||||
|
||||
### run Alpine Linux:
|
||||
```
|
||||
docker run -it --rm -v ${HOME}:/root/ -v ${PWD}:/work -w /work --net host alpine sh
|
||||
```
|
||||
|
||||
### install some tools
|
||||
|
||||
```
|
||||
# install curl
|
||||
apk add --no-cache curl
|
||||
|
||||
# install kubectl
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
|
||||
# install helm
|
||||
|
||||
curl -o /tmp/helm.tar.gz -LO https://get.helm.sh/helm-v3.10.1-linux-amd64.tar.gz
|
||||
tar -C /tmp/ -zxvf /tmp/helm.tar.gz
|
||||
mv /tmp/linux-amd64/helm /usr/local/bin/helm
|
||||
chmod +x /usr/local/bin/helm
|
||||
|
||||
```
|
||||
|
||||
### test cluster access:
|
||||
```
|
||||
/work # kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
nginx-ingress-control-plane Ready control-plane,master 3m26s v1.23.5
|
||||
```
|
||||
|
||||
## NGINX Ingress Controller
|
||||
|
||||
We'll start with the documentation as always </br>
|
||||
You can find the [Kubernetes NGINX documentation here](https://kubernetes.github.io/ingress-nginx/) </br>
|
||||
|
||||
First thing we do is check the compatibility matrix to ensure we are deploying a compatible version of NGINX Ingress on our Kubernetes cluster </br>
|
||||
|
||||
The Documentation also has a link to the [GitHub Repo](https://github.com/kubernetes/ingress-nginx/) which has a compatibility matrix </br>
|
||||
|
||||
### Get the installation YAML
|
||||
|
||||
The controller ships as a `helm` chart, so we can grab version `v1.5.1` as per the compatibility
|
||||
matrix. </br>
|
||||
|
||||
From our container we can do this:
|
||||
|
||||
```
|
||||
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
|
||||
helm search repo ingress-nginx --versions
|
||||
```
|
||||
|
||||
From the app version we select the version that matches the compatibility matrix. </br>
|
||||
|
||||
```
|
||||
NAME CHART VERSION APP VERSION DESCRIPTION
|
||||
ingress-nginx/ingress-nginx 4.4.0 1.5.1 Ingress controller for Kubernetes using NGINX a...
|
||||
```
|
||||
|
||||
Now we can use `helm` to install the chart directly if we want. </br>
|
||||
Or we can use `helm` to grab the manifest and explore its content. </br>
|
||||
We can also add that manifest to our git repo if we are using a GitOps workflow to deploy it. </br>
|
||||
|
||||
```
|
||||
CHART_VERSION="4.4.0"
|
||||
APP_VERSION="1.5.1"
|
||||
|
||||
mkdir ./kubernetes/ingress/controller/nginx/manifests/
|
||||
|
||||
helm template ingress-nginx ingress-nginx \
|
||||
--repo https://kubernetes.github.io/ingress-nginx \
|
||||
--version ${CHART_VERSION} \
|
||||
--namespace ingress-nginx \
|
||||
> ./kubernetes/ingress/controller/nginx/manifests/nginx-ingress.${APP_VERSION}.yaml
|
||||
```
|
||||
|
||||
### Deploy the Ingress controller
|
||||
|
||||
```
|
||||
kubectl create namespace ingress-nginx
|
||||
kubectl apply -f ./kubernetes/ingress/controller/nginx/manifests/nginx-ingress.${APP_VERSION}.yaml
|
||||
```
|
||||
|
||||
|
||||
### Check the installation
|
||||
|
||||
```
|
||||
kubectl -n ingress-nginx get pods
|
||||
```
|
||||
The traffic for our cluster will come in over the Ingress service </br>
|
||||
Note that we dont have load balancer capability in `kind` by default, so our `LoadBalancer` is pending:
|
||||
|
||||
```
|
||||
kubectl -n ingress-nginx get svc
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
ingress-nginx-controller LoadBalancer 10.96.130.21 <pending> 80:31011/TCP,443:31772/TCP 26m
|
||||
ingress-nginx-controller-admission ClusterIP 10.96.125.210 <none> 443/TCP 26m
|
||||
```
|
||||
|
||||
For testing purposes, we will simply setup `port-forward`ing </br>
|
||||
If you are running in the cloud, you will get a real IP address. </br>
|
||||
|
||||
```
|
||||
kubectl -n ingress-nginx port-forward svc/ingress-nginx-controller 443
|
||||
```
|
||||
|
||||
We can reach our controller on [https://localhost/](https://localhost/) </br>
|
||||
|
||||
It's important to understand that Ingress runs on two ports `80` and `443` </br>
|
||||
NGINX Ingress creates a fake certificate which is served for default `HTTPS` traffic on port `443`. </br>
|
||||
If you look in the browser you will notice the name of the certificate `Common Name (CN) Kubernetes Ingress Controller Fake Certificate`
|
||||
|
||||
## Features
|
||||
|
||||
Now before we take a look at the features we'll need two web applications that we can use as our test harness, `service-a` and `service-b` </br>
|
||||
|
||||
In this demo, i have a deployment that runs a pod and a service that exposes the pod on port 80. </br>
|
||||
This is a typical scenario where you have a micrservice you want to expose publicly. </br>
|
||||
|
||||
### Deploy Service A & B
|
||||
|
||||
Will deploy these two apps to the default namespace:
|
||||
|
||||
```
|
||||
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/service-a.yaml
|
||||
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/service-b.yaml
|
||||
```
|
||||
|
||||
Test our service : `kubectl port-forward svc/service-a 80`
|
||||
|
||||
Our services accept traffic on:
|
||||
|
||||
* `http://localhost/` which goes to the root `/`
|
||||
* `http://localhost/path-a.html` which goes to the root `/path-a.html`
|
||||
* `http://localhost/path-b.html` which goes to the root `/path-b.html`
|
||||
* `http://localhost/<any-other-path>.html` which goes to the root `404`
|
||||
|
||||
### Routing by Domain
|
||||
|
||||
The most common way to route traffic with ingress is by domain:
|
||||
|
||||
* https://public.service-a.com/ --> Ingress --> k8s service --> http://service-a/
|
||||
* https://public.service-b.com/ --> Ingress --> k8s service --> http://service-b/
|
||||
|
||||
To showcase this, let's deploy an ingress for service-a and service-b that routes by domain. </br>
|
||||
|
||||
<i>Note: we don't own public domain `public.service-a.com` so we're using a `/etc/hosts` file</i>
|
||||
|
||||
Example Ingress:
|
||||
|
||||
```
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-a
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.service-a.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-a
|
||||
port:
|
||||
number: 80
|
||||
```
|
||||
|
||||
<i>Note: we don't own public domain `public.my-services.com` so we're using a `/etc/hosts` file</i>
|
||||
|
||||
Deploy our ingresses:
|
||||
|
||||
```
|
||||
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/routing-by-domain.yaml
|
||||
```
|
||||
|
||||
Now we can access service-a and service-b on:
|
||||
|
||||
* https://public.service-a.com/
|
||||
* https://public.service-b.com/
|
||||
|
||||
|
||||
### Routing by Path
|
||||
|
||||
Another popular routing strategy is to use a shared domain and route based on the HTTP path. For example: </br>
|
||||
|
||||
* https://public.my-services.com/path-a --> Ingress --> k8s service --> http://service-a/path-a
|
||||
* https://public.my-services.com/path-b --> Ingress --> k8s service --> http://service-b/path-b
|
||||
|
||||
This way public path `/path-a` will hit our application on `/path-a` </br>
|
||||
|
||||
Example Ingress:
|
||||
|
||||
```
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-a
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.my-services.com
|
||||
http:
|
||||
paths:
|
||||
- path: /path-a
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-a
|
||||
port:
|
||||
number: 80
|
||||
```
|
||||
Deploy our ingresses:
|
||||
|
||||
```
|
||||
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/routing-by-path.yaml
|
||||
```
|
||||
Now notice the following routing:
|
||||
|
||||
* https://public.my-services.com/ --> Ingress (404)
|
||||
* https://public.my-services.com/path-a --> Ingress --> k8s service --> http://service-a/
|
||||
* https://public.my-services.com/path-b --> Ingress --> k8s service --> http://service-b/
|
||||
|
||||
No matter what path you place on the front end, as long as the path matches `/path-a` or `/path-b`
|
||||
it will be routed to the correct service on `/` </br>
|
||||
It's important to note that no extra paths or querystrings will NOT be passed to the upstream </br>
|
||||
|
||||
We can see this by looking at our NGINX Ingress controller logs as the controller will write the path it sees as well as the upstream service where it sent the request
|
||||
```
|
||||
kubectl -n ingress-nginx logs -l app.kubernetes.io/instance=ingress-nginx
|
||||
```
|
||||
|
||||
### App Root
|
||||
|
||||
Sometimes applications have different root paths and don't simply serve traffic on `/` </br>
|
||||
For example, the base path may be `http://localhost/home` </br>
|
||||
|
||||
To tell the Ingress controller that our application root path is `/home`, we can set the annotation `nginx.ingress.kubernetes.io/app-root: /home` </br>
|
||||
|
||||
This means the controller will be aware that all traffic that matches `path-a` should go to `/home` on service-a. </br>
|
||||
|
||||
### URL Rewrite
|
||||
|
||||
We saw earlier when we routed by path, that we could pass `/path-a` to service-a and `/path-b` to service-b. </br>
|
||||
However, the traffic would always go to `/` so we lost any trailing URL, parameters and querystring. </br>
|
||||
Not very useful. </br>
|
||||
|
||||
To allow the Ingress controller to pass paths to the upstream you need to look into [Rewrite Configuration](https://kubernetes.github.io/ingress-nginx/examples/rewrite/)
|
||||
|
||||
Example Ingress:
|
||||
|
||||
```
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-a
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.my-services.com
|
||||
http:
|
||||
paths:
|
||||
- path: /path-a(/|$)(.*)
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-a
|
||||
port:
|
||||
number: 80
|
||||
```
|
||||
Deploy our ingresses:
|
||||
|
||||
```
|
||||
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/routing-by-path-rewrite.yaml
|
||||
```
|
||||
Now notice the following routing:
|
||||
|
||||
* https://public.my-services.com/ --> Ingress (404)
|
||||
* https://public.my-services.com/path-a* --> Ingress --> k8s service --> http://service-a/*
|
||||
* https://public.my-services.com/path-b* --> Ingress --> k8s service --> http://service-b/*
|
||||
|
||||
```
|
||||
kubectl -n ingress-nginx logs -l app.kubernetes.io/instance=ingress-nginx
|
||||
```
|
||||
It's important to study the logs of the Ingress Controller to learn what path it saw, where it routed to
|
||||
|
||||
```
|
||||
127.0.0.1 - - [13/Nov/2022:02:17:47 +0000] "GET /path-a/path.html HTTP/2.0" 404 19 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36" 485 0.000 [default-service-a-80] [] 10.244.0.8:80 19 0.000 404 206ed4b88b712564fc073c3adb845dff
|
||||
```
|
||||
|
||||
In the above case, the controller saw ` /path-a/path.html` , routed to service-a and we can see what our service-a saw, by looking at its logs:
|
||||
|
||||
```
|
||||
kubectl logs -l app=service-a
|
||||
10.244.0.7 - - [13/Nov/2022:02:28:36 +0000] "GET /path-a.html HTTP/1.1" 200 28 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
|
||||
```
|
||||
|
||||
|
||||
### SSL terminating & passthrough
|
||||
|
||||
As we noticed by logs, its default for the Ingress controller to offload SSL. </br>
|
||||
We can see this because when it routes to upstreams, it routes to our service on port 80 </br>
|
||||
Ingress offloads the TLS connection and creates a new connection with its upstream. </br>
|
||||
|
||||
This is a common approach to offload TLS on the edge as internal traffic is generally unencrypted in private
|
||||
networks especially in large microservice environments where security is tightened in other manners so TLS is not needed all the way through. </br>
|
||||
|
||||
We can enable SSL pass through with the annotation: `nginx.ingress.kubernetes.io/ssl-passthrough`. </br>
|
||||
|
||||
SSL Passthrough is disabled by default and requires starting the controller with the --enable-ssl-passthrough flag. </br>
|
||||
|
||||
### IP Whitelist
|
||||
|
||||
We can add a layer of protection to our services that are exposed by an ingress. </br>
|
||||
One popular way is IP whitelisting. </br>
|
||||
|
||||
This can be done with a [whitelist source range annotation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#whitelist-source-range) for example: </br>
|
||||
|
||||
`nginx.ingress.kubernetes.io/whitelist-source-range: <ip,ip,ip>`</br>
|
||||
|
||||
You can set this globally if you want using the [Customization ConfigMap](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#whitelist-source-range). </br>
|
||||
We'll take a look at this customization in a bit. </br>
|
||||
|
||||
### Authentication
|
||||
|
||||
You can add a layer of protection to services exposed by ingress by several [Authentication methods](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#authentication). </br>
|
||||
|
||||
A simple example is basic Authentication where the client supplied a `username\password` to access our service. </br>
|
||||
|
||||
This is controlled by annotations:
|
||||
|
||||
* `nginx.ingress.kubernetes.io/auth-type: basic`
|
||||
* `nginx.ingress.kubernetes.io/auth-secret: server-a-secret`
|
||||
* `nginx.ingress.kubernetes.io/auth-secret-type: auth-file`
|
||||
|
||||
Create a username and password:
|
||||
|
||||
```
|
||||
apk add apache2-utils
|
||||
|
||||
htpasswd -c auth service-a-user
|
||||
|
||||
kubectl create secret generic server-a-secret --from-file=auth
|
||||
```
|
||||
|
||||
Deploy our ingresses:
|
||||
|
||||
```
|
||||
kubectl apply -f ./kubernetes/ingress/controller/nginx/features/basic-auth.yaml
|
||||
```
|
||||
|
||||
### Server snippet
|
||||
|
||||
Every ingress is technically an NGINX server block with a NGINX proxy pass. </br>
|
||||
We can even customise this server block with a [Server Snippet annotation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#server-snippet)
|
||||
|
||||
|
||||
### Customization
|
||||
|
||||
As mentioned before, the NGINX Ingress controller can be customized quite heavily with the [ConfigMap](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/)
|
||||
|
||||
We can customize log format to JSON as well for example:
|
||||
|
||||
```
|
||||
log-format-escape-json: "true"
|
||||
log-format-upstream: '{"time":"$time_iso8601","remote_addr":"$remote_addr","proxy_protocol_addr":"$proxy_protocol_addr","proxy_protocol_port":"$proxy_protocol_port","x_forward_for":"$proxy_add_x_forwarded_for","remote_user":"$remote_user","host":"$host","request_method":"$request_method","request_uri":"$request_uri","server_protocol":"$server_protocol","status":$status,"request_time":$request_time,"request_length":$request_length,"bytes_sent":$bytes_sent,"upstream_name":"$proxy_upstream_name","upstream_addr":"$upstream_addr","upstream_uri":"$uri","upstream_response_length":$upstream_response_length,"upstream_response_time":$upstream_response_time,"upstream_status":$upstream_status,"http_referrer":"$http_referer","http_user_agent":"$http_user_agent","http_cookie":"$http_cookie","http_device_id":"$http_x_device_id","http_customer_id":"$http_x_customer_id"}'
|
||||
|
||||
```
|
||||
|
||||
Apply the changes and restart Ingress:
|
||||
|
||||
```
|
||||
kubectl apply -f ./kubernetes/ingress/controller/nginx/manifests/nginx-ingress.${APP_VERSION}.yaml
|
||||
```
|
||||
|
||||
kubectl -n ingress-nginx logs -l app.kubernetes.io/instance=ingress-nginx
|
@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: nginx-ingress-clusterrole
|
||||
@ -50,4 +50,4 @@ rules:
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- update
|
||||
|
43
kubernetes/ingress/controller/nginx/features/basic-auth.yaml
Normal file
43
kubernetes/ingress/controller/nginx/features/basic-auth.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-a
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/auth-type: basic
|
||||
nginx.ingress.kubernetes.io/auth-secret: server-a-secret
|
||||
nginx.ingress.kubernetes.io/auth-secret-type: auth-file
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.my-services.com
|
||||
http:
|
||||
paths:
|
||||
- path: /path-a(/|$)(.*)
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-a
|
||||
port:
|
||||
number: 80
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-b
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.my-services.com
|
||||
http:
|
||||
paths:
|
||||
- path: /path-b(/|$)(.*)
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-b
|
||||
port:
|
||||
number: 80
|
||||
---
|
@ -0,0 +1,36 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-a
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.service-a.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-a
|
||||
port:
|
||||
number: 80
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-b
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.service-b.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-b
|
||||
port:
|
||||
number: 80
|
||||
---
|
@ -0,0 +1,40 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-a
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.my-services.com
|
||||
http:
|
||||
paths:
|
||||
- path: /path-a(/|$)(.*)
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-a
|
||||
port:
|
||||
number: 80
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-b
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.my-services.com
|
||||
http:
|
||||
paths:
|
||||
- path: /path-b(/|$)(.*)
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-b
|
||||
port:
|
||||
number: 80
|
||||
---
|
@ -0,0 +1,40 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-a
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.my-services.com
|
||||
http:
|
||||
paths:
|
||||
- path: /path-a
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-a
|
||||
port:
|
||||
number: 80
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: service-b
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: public.my-services.com
|
||||
http:
|
||||
paths:
|
||||
- path: /path-b
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: service-b
|
||||
port:
|
||||
number: 80
|
||||
---
|
92
kubernetes/ingress/controller/nginx/features/service-a.yaml
Normal file
92
kubernetes/ingress/controller/nginx/features/service-a.yaml
Normal file
@ -0,0 +1,92 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: service-a
|
||||
data:
|
||||
path-a.html: |
|
||||
"/path-a.html" on service-a
|
||||
path-b.html: |
|
||||
"/path-b.html" on service-a
|
||||
index.html: |
|
||||
"/" on service-a
|
||||
404.html: |
|
||||
service-a 404 page
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: service-a-nginx.conf
|
||||
data:
|
||||
nginx.conf: |
|
||||
user nginx;
|
||||
worker_processes 1;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
server {
|
||||
listen 80;
|
||||
server_name localhost;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: service-a
|
||||
labels:
|
||||
app: service-a
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: service-a
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: service-a
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14.2
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: html
|
||||
mountPath: "/usr/share/nginx/html/"
|
||||
- name: config
|
||||
mountPath: "/etc/nginx/"
|
||||
volumes:
|
||||
- name: html
|
||||
configMap:
|
||||
name: service-a
|
||||
- name: config
|
||||
configMap:
|
||||
name: service-a-nginx.conf
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: service-a
|
||||
spec:
|
||||
selector:
|
||||
app: service-a
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
92
kubernetes/ingress/controller/nginx/features/service-b.yaml
Normal file
92
kubernetes/ingress/controller/nginx/features/service-b.yaml
Normal file
@ -0,0 +1,92 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: service-b
|
||||
data:
|
||||
path-a.html: |
|
||||
"/path-a.html" on service-b
|
||||
path-b.html: |
|
||||
"/path-b.html" on service-b
|
||||
index.html: |
|
||||
"/" on service-b
|
||||
404.html: |
|
||||
service-b 404 page
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: service-b-nginx.conf
|
||||
data:
|
||||
nginx.conf: |
|
||||
user nginx;
|
||||
worker_processes 1;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
server {
|
||||
listen 80;
|
||||
server_name localhost;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: service-b
|
||||
labels:
|
||||
app: service-b
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: service-b
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: service-b
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14.2
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: html
|
||||
mountPath: "/usr/share/nginx/html/"
|
||||
- name: config
|
||||
mountPath: "/etc/nginx/"
|
||||
volumes:
|
||||
- name: html
|
||||
configMap:
|
||||
name: service-b
|
||||
- name: config
|
||||
configMap:
|
||||
name: service-b-nginx.conf
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: service-b
|
||||
spec:
|
||||
selector:
|
||||
app: service-b
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
@ -0,0 +1,742 @@
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
automountServiceAccountToken: true
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
allow-snippet-annotations: "true"
|
||||
log-format-escape-json: "true"
|
||||
log-format-upstream: '{"time":"$time_iso8601","remote_addr":"$remote_addr","proxy_protocol_addr":"$proxy_protocol_addr","proxy_protocol_port":"$proxy_protocol_port","x_forward_for":"$proxy_add_x_forwarded_for","remote_user":"$remote_user","host":"$host","request_method":"$request_method","request_uri":"$request_uri","server_protocol":"$server_protocol","status":$status,"request_time":$request_time,"request_length":$request_length,"bytes_sent":$bytes_sent,"upstream_name":"$proxy_upstream_name","upstream_addr":"$upstream_addr","upstream_uri":"$uri","upstream_response_length":$upstream_response_length,"upstream_response_time":$upstream_response_time,"upstream_status":$upstream_status,"http_referrer":"$http_referer","http_user_agent":"$http_user_agent","http_cookie":"$http_cookie","http_device_id":"$http_x_device_id","http_customer_id":"$http_x_customer_id"}'
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: "ingress-nginx"
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# TODO(Jintao Zhang)
|
||||
# Once we release a new version of the controller,
|
||||
# we will be able to remove the configmap related permissions
|
||||
# We have used the Lease API for selection
|
||||
# ref: https://github.com/kubernetes/ingress-nginx/pull/8921
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
- ingress-nginx-leader
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
resourceNames:
|
||||
- ingress-nginx-leader
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-rolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: "ingress-nginx"
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-service-webhook.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller-admission
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: https-webhook
|
||||
port: 443
|
||||
targetPort: webhook
|
||||
appProtocol: https
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ipFamilyPolicy: SingleStack
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
appProtocol: http
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
appProtocol: https
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
minReadySeconds: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
spec:
|
||||
dnsPolicy: ClusterFirst
|
||||
containers:
|
||||
- name: controller
|
||||
image: "registry.k8s.io/ingress-nginx/controller:v1.5.1@sha256:4ba73c697770664c1e00e9f968de14e08f606ff961c76e5d7033a4a9c593c629"
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
- --election-id=ingress-nginx-leader
|
||||
- --controller-class=k8s.io/ingress-nginx
|
||||
- --ingress-class=nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
- --validating-webhook=:8443
|
||||
- --validating-webhook-certificate=/usr/local/certificates/cert
|
||||
- --validating-webhook-key=/usr/local/certificates/key
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
runAsUser: 101
|
||||
allowPrivilegeEscalation: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
- name: https
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
- name: webhook
|
||||
containerPort: 8443
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: webhook-cert
|
||||
mountPath: /usr/local/certificates/
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: ingress-nginx
|
||||
terminationGracePeriodSeconds: 300
|
||||
volumes:
|
||||
- name: webhook-cert
|
||||
secret:
|
||||
secretName: ingress-nginx-admission
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-ingressclass.yaml
|
||||
# We don't support namespaced ingressClass yet
|
||||
# So a ClusterRole and a ClusterRoleBinding is required
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: nginx
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
|
||||
# before changing this value, check the required kubernetes version
|
||||
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
name: ingress-nginx-admission
|
||||
webhooks:
|
||||
- name: validate.nginx.ingress.kubernetes.io
|
||||
matchPolicy: Equivalent
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- ingresses
|
||||
failurePolicy: Fail
|
||||
sideEffects: None
|
||||
admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: "ingress-nginx"
|
||||
name: ingress-nginx-controller-admission
|
||||
path: /networking/v1/ingresses
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
rules:
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- validatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx-admission
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx-admission
|
||||
namespace: "ingress-nginx"
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx-admission
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx-admission
|
||||
namespace: "ingress-nginx"
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ingress-nginx-admission-create
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: ingress-nginx-admission-create
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
containers:
|
||||
- name: create
|
||||
image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- create
|
||||
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
|
||||
- --namespace=$(POD_NAMESPACE)
|
||||
- --secret-name=ingress-nginx-admission
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: ingress-nginx-admission
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
fsGroup: 2000
|
||||
runAsNonRoot: true
|
||||
runAsUser: 2000
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ingress-nginx-admission-patch
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
"helm.sh/hook": post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: ingress-nginx-admission-patch
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
containers:
|
||||
- name: patch
|
||||
image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- patch
|
||||
- --webhook-name=ingress-nginx-admission
|
||||
- --namespace=$(POD_NAMESPACE)
|
||||
- --patch-mutating=false
|
||||
- --secret-name=ingress-nginx-admission
|
||||
- --patch-failure-policy=Fail
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: ingress-nginx-admission
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
fsGroup: 2000
|
||||
runAsNonRoot: true
|
||||
runAsUser: 2000
|
@ -1,5 +1,7 @@
|
||||
|
||||
VIDEO : https://youtu.be/feLpGydQVio
|
||||
Kubectl Basics:
|
||||
|
||||
<a href="https://youtu.be/feLpGydQVio" title="Kubernetes"><img src="https://i.ytimg.com/vi/feLpGydQVio/hqdefault.jpg" width="30%" alt="kubectl basics" /></a>
|
||||
|
||||
## Configs
|
||||
|
||||
|
190
kubernetes/kubectl/README.md
Normal file
190
kubernetes/kubectl/README.md
Normal file
@ -0,0 +1,190 @@
|
||||
# Introduction to KUBECTL
|
||||
|
||||
<a href="https://youtu.be/1zcXudjSVUs" title="k8s-kubectl"><img src="https://i.ytimg.com/vi/1zcXudjSVUs/hqdefault.jpg" width="20%" alt="k8s-kubectl" /></a>
|
||||
|
||||
To start off this tutorial, we will be using [kind](https://kind.sigs.k8s.io/) to create our test cluster. </br>
|
||||
You can use `minikube` or any Kubernetes cluster. </br>
|
||||
|
||||
Kind is an amazing tool for running test clusters locally as it runs in a container which makes it lightweight and easy to run throw-away clusters for testing purposes. </br>
|
||||
|
||||
## Download KUBECTL
|
||||
|
||||
We can download `kubectl` from the [Official Docs](https://kubernetes.io/docs/tasks/tools/) </br>
|
||||
|
||||
## Create a kubernetes cluster
|
||||
|
||||
In this guide we will run two clusters side by side so we can demonstrate cluster access. </br>
|
||||
Create two clusters:
|
||||
|
||||
```
|
||||
kind create cluster --name dev --image kindest/node:v1.23.5
|
||||
kind create cluster --name prod --image kindest/node:v1.23.5
|
||||
|
||||
```
|
||||
|
||||
See cluster up and running:
|
||||
|
||||
```
|
||||
kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
prod-control-plane Ready control-plane,master 2m12s v1.23.5
|
||||
```
|
||||
|
||||
## Understanding the KUBECONFIG
|
||||
|
||||
Default location of the `kubeconfig` file is in `<users-directory>/.kube/config`
|
||||
|
||||
```
|
||||
kind: Config
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- list of clusters (addresses \ endpoints)
|
||||
users:
|
||||
- list of users (thing that identifies us when accessing a cluster [certificate])
|
||||
contexts:
|
||||
- list of contexts ( which user and cluster to use when running commands)
|
||||
```
|
||||
|
||||
Commands to interact with `kubeconfig` are `kubectl config`. </br>
|
||||
Key commands are telling `kubectl` which context to use
|
||||
|
||||
```
|
||||
kubectl config current-context
|
||||
kubectl config get-contexts
|
||||
kubectl config use-context <name>
|
||||
```
|
||||
|
||||
You can also tell your `kubectl` to use different config files. </br>
|
||||
This is useful to keep your production config separate from your development ones </br>
|
||||
|
||||
Set the `$KUBECONFIG` environment variable to a path:
|
||||
```
|
||||
#linux
|
||||
export KUBECONFIG=<path>
|
||||
|
||||
#windows
|
||||
$ENV:KUBECONFIG="C:\Users\aimve\.kube\config"
|
||||
```
|
||||
|
||||
We can export seperate configs using `kind` </br>
|
||||
This is possible with cloud based clusters as well:
|
||||
|
||||
```
|
||||
kind --name dev export kubeconfig --kubeconfig C:\Users\aimve\.kube\dev-config
|
||||
|
||||
kind --name prod export kubeconfig --kubeconfig C:\Users\aimve\.kube\prod-config
|
||||
|
||||
#switch to prod
|
||||
$ENV:KUBECONFIG="C:\Users\aimve\.kube\prod-config"
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
## Working with Kubernetes resources
|
||||
|
||||
Now that we have cluster access, next we can read resources from the cluster
|
||||
with the `kubectl get` command.
|
||||
|
||||
## Namespaces
|
||||
|
||||
Most kubernetes resources are namespace scoped:
|
||||
|
||||
```
|
||||
kubectl get namespaces
|
||||
```
|
||||
|
||||
By default, `kubectl` commands will run against the `default` namespace
|
||||
|
||||
## List resources in a namespace
|
||||
|
||||
```
|
||||
kubectl get <resource>
|
||||
|
||||
kubectl get pods
|
||||
kubectl get deployments
|
||||
kubectl get services
|
||||
kubectl get configmaps
|
||||
kubectl get secrets
|
||||
kubectl get ingress
|
||||
```
|
||||
|
||||
## Create resources in a namespace
|
||||
|
||||
We can create a namespace with the `kubectl create` command:
|
||||
|
||||
```
|
||||
kubectl create ns example-apps
|
||||
```
|
||||
|
||||
Let's create a couple of resources:
|
||||
|
||||
```
|
||||
|
||||
kubectl -n example-apps create deployment webserver --image=nginx --port=80
|
||||
kubectl -n example-apps get deploy
|
||||
kubectl -n example-apps get pods
|
||||
|
||||
kubectl -n example-apps create service clusterip webserver --tcp 80:80
|
||||
kubectl -n example-apps get service
|
||||
kubectl -n example-apps port-forward svc/webserver 80
|
||||
# we can access http://localhost/
|
||||
|
||||
kubectl -n example-apps create configmap webserver-config --from-file config.json=./kubernetes/kubectl/config.json
|
||||
kubectl -n example-apps get cm
|
||||
|
||||
kubectl -n example-apps create secret generic webserver-secret --from-file secret.json=./kubernetes/kubectl/secret.json
|
||||
kubectl -n example-apps get secret
|
||||
|
||||
```
|
||||
|
||||
## Working with YAML
|
||||
|
||||
As you can see we can create resources with `kubectl` but this is only for basic testing purposes.
|
||||
Kubernetes is a declarative platform, meaning we should provide it what to create instead
|
||||
of running imperative line-by-line commands. </br>
|
||||
|
||||
We can also get the YAML of pre-existing objects in our cluster with the `-o yaml` flag on the `get` command </br>
|
||||
|
||||
Let's output all our YAML to a `yaml` folder:
|
||||
|
||||
```
|
||||
kubectl -n example-apps get cm webserver-config -o yaml > .\kubernetes\kubectl\yaml\config.yaml
|
||||
kubectl -n example-apps get secret webserver-secret -o yaml > .\kubernetes\kubectl\yaml\secret.yaml
|
||||
kubectl -n example-apps get deploy webserver -o yaml > .\kubernetes\kubectl\yaml\deployment.yaml
|
||||
kubectl -n example-apps get svc webserver -o yaml > .\kubernetes\kubectl\yaml\service.yaml
|
||||
```
|
||||
|
||||
## Create resources from YAML files
|
||||
|
||||
The most common and recommended way to create resources in Kubernetes is with the `kubectl apply` command. </br>
|
||||
This command takes in declarative `YAML` files.
|
||||
|
||||
To show you how powerful it is, instead of creating things line-by-line, we can deploy all our infrastructure
|
||||
with a single command. </br>
|
||||
|
||||
Let's deploy a Wordpress CMS site, with a back end MySQL database. </br>
|
||||
This is a snippet taken from my `How to learn Kubernetes` video:
|
||||
|
||||
```
|
||||
kubectl create ns wordpress-site
|
||||
kubectl -n wordpress-site apply -f ./kubernetes/tutorials/basics/yaml/
|
||||
```
|
||||
|
||||
We can checkout our site with the `port-forward` command:
|
||||
|
||||
```
|
||||
kubectl -n wordpress-site get svc
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
mysql ClusterIP 10.96.146.75 <none> 3306/TCP 17s
|
||||
wordpress ClusterIP 10.96.157.6 <none> 80/TCP 17s
|
||||
|
||||
kubectl -n wordpress-site port-forward svc/wordpress 80
|
||||
```
|
||||
|
||||
## Clean up
|
||||
|
||||
```
|
||||
kind delete cluster --name dev
|
||||
kind delete cluster --name prod
|
||||
|
||||
```
|
3
kubernetes/kubectl/config.json
Normal file
3
kubernetes/kubectl/config.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"config": "some-value"
|
||||
}
|
3
kubernetes/kubectl/secret.json
Normal file
3
kubernetes/kubectl/secret.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"secret": "some-secret-value"
|
||||
}
|
BIN
kubernetes/kubectl/yaml/config.yaml
Normal file
BIN
kubernetes/kubectl/yaml/config.yaml
Normal file
Binary file not shown.
BIN
kubernetes/kubectl/yaml/deployment.yaml
Normal file
BIN
kubernetes/kubectl/yaml/deployment.yaml
Normal file
Binary file not shown.
BIN
kubernetes/kubectl/yaml/secret.yaml
Normal file
BIN
kubernetes/kubectl/yaml/secret.yaml
Normal file
Binary file not shown.
BIN
kubernetes/kubectl/yaml/service.yaml
Normal file
BIN
kubernetes/kubectl/yaml/service.yaml
Normal file
Binary file not shown.
@ -1,5 +1,8 @@
|
||||
|
||||
# The Basics
|
||||
|
||||
<a href="https://youtu.be/5gsHYdiD6v8" title="k8s-kustomize"><img src="https://i.ytimg.com/vi/5gsHYdiD6v8/hqdefault.jpg" width="20%" alt="k8s-kustomize" /></a>
|
||||
|
||||
|
||||
```
|
||||
|
||||
kubectl apply -f kubernetes/kustomize/application/namespace.yaml
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Persistent Volumes Demo
|
||||
|
||||
<a href="https://youtu.be/ZxC6FwEc9WQ" title="k8s-pv"><img src="https://i.ytimg.com/vi/ZxC6FwEc9WQ/hqdefault.jpg" width="20%" alt="k8s-pv" /></a>
|
||||
|
||||
## Container Storage
|
||||
|
||||
By default containers store their data on the file system like any other process.
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Introduction to Portainer
|
||||
|
||||
<a href="https://youtu.be/FC8pABzxZVU" title="k8s-portainer"><img src="https://i.ytimg.com/vi/FC8pABzxZVU/hqdefault.jpg" width="20%" alt="k8s-portainer" /></a>
|
||||
|
||||
Start here 👉🏽[https://www.portainer.io/](https://www.portainer.io/) </br>
|
||||
Documentation 👉🏽[https://docs.portainer.io/](https://docs.portainer.io/)
|
||||
|
||||
|
121
kubernetes/probes/README.md
Normal file
121
kubernetes/probes/README.md
Normal file
@ -0,0 +1,121 @@
|
||||
# Introduction to Kubernetes Probes
|
||||
|
||||
|
||||
## Create a kubernetes cluster
|
||||
|
||||
In this guide we we''ll need a Kubernetes cluster for testing. Let's create one using [kind](https://kind.sigs.k8s.io/) </br>
|
||||
|
||||
```
|
||||
cd kubernetes/probes
|
||||
kind create cluster --name demo --image kindest/node:v1.28.0
|
||||
```
|
||||
|
||||
Test the cluster:
|
||||
```
|
||||
kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
demo-control-plane Ready control-plane 59s v1.28.0
|
||||
|
||||
```
|
||||
|
||||
## Applications
|
||||
|
||||
Client app is used to act as a client that sends web requests :
|
||||
|
||||
```
|
||||
kubectl apply -f client.yaml
|
||||
```
|
||||
|
||||
The server app is the app that will receive web requests:
|
||||
|
||||
```
|
||||
kubectl apply -f server.yaml
|
||||
```
|
||||
|
||||
Test making web requests constantly:
|
||||
|
||||
```
|
||||
while true; do curl http://server; sleep 1s; done
|
||||
```
|
||||
|
||||
Bump the server `version` label up and apply to force a new deployment </br>
|
||||
Notice the client throws an error, so traffic is interupted, not good! </br>
|
||||
|
||||
This is because our new pod during deployment is not ready to take traffic!
|
||||
|
||||
## Readiness Probes
|
||||
|
||||
Let's add a readiness probe that tells Kubernetes when we are ready:
|
||||
|
||||
```
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 5000
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 3
|
||||
failureThreshold: 3
|
||||
```
|
||||
|
||||
### Automatic failover with Readiness probes
|
||||
|
||||
Let's pretend our application starts hanging and not longer returns responses </br>
|
||||
This is common with some web servers and may need to be manually restarted
|
||||
|
||||
```
|
||||
kubectl exec -it podname -- sh -c "rm /data.txt"
|
||||
```
|
||||
|
||||
Now we will notice our client app starts getting errors. </br>
|
||||
Few things to notice:
|
||||
|
||||
* Our readiness probe detected an issue and removed traffic from the faulty pod.
|
||||
* We should be running more than one application so we would be highly available
|
||||
|
||||
```
|
||||
kubectl scale deploy server --replicas 2
|
||||
```
|
||||
|
||||
* Notice traffic comes back as its routed to the healthy pod
|
||||
|
||||
Fix our old pod: `kubectl exec -it podname -- sh -c "echo 'ok' > /data.txt"` </br>
|
||||
|
||||
* If we do this again with 2 pods, notice we still get an interuption but our app automaticall stabalises after some time
|
||||
* This is because readinessProbe has `failureThreshold` and some failure will be expected before recovery
|
||||
* Do not set this `failureThreshold` too low as you may remove traffic frequently. Tune accordingly!
|
||||
|
||||
Readiness probes help us automatically remove traffic when there are intermittent network issues </br>
|
||||
|
||||
## Liveness Probes
|
||||
|
||||
Liveness probe helps us when we cannot automatically recover. </br>
|
||||
Let's use the same mechanism to create a vaulty pod:
|
||||
|
||||
```
|
||||
kubectl exec -it podname -- sh -c "rm /data.txt"
|
||||
```
|
||||
|
||||
Our readiness probe has saved us from traffic issues. </br>
|
||||
But we want the pod to recover automatically, so let's create livenessProbe:
|
||||
|
||||
```
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 5000
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 4
|
||||
failureThreshold: 8
|
||||
```
|
||||
|
||||
Scale back up: `kubectl scale deploy server --replicas 2`
|
||||
Create a vaulty pod: `kubectl exec -it podname -- sh -c "rm /data.txt" `
|
||||
|
||||
If we observe we will notice the readinessProbe saves our traffic, and livenessProbe will eventually replace the bad pod </br>
|
||||
|
||||
## Startup Probes
|
||||
|
||||
The [startup probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes) is for slow starting applications </br>
|
||||
It's important to understand difference between start up and readiness probes. </br>
|
||||
In our examples here, readiness probe acts as a startup probe too, since our app is fairly slow starting! </br>
|
||||
This difference is explained in the video. </br>
|
22
kubernetes/probes/client.yaml
Normal file
22
kubernetes/probes/client.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: client
|
||||
labels:
|
||||
app: client
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: client
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
spec:
|
||||
containers:
|
||||
- name: client
|
||||
image: alpine:latest
|
||||
command:
|
||||
- sleep
|
||||
- "9999"
|
83
kubernetes/probes/server.yaml
Normal file
83
kubernetes/probes/server.yaml
Normal file
@ -0,0 +1,83 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: server
|
||||
labels:
|
||||
app: server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: server
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: server
|
||||
version: "1"
|
||||
spec:
|
||||
containers:
|
||||
- name: server
|
||||
image: python:alpine
|
||||
workingDir: /app
|
||||
command: ["/bin/sh"]
|
||||
args:
|
||||
- -c
|
||||
- "pip3 install --disable-pip-version-check --root-user-action=ignore flask && echo 'ok' > /data.txt && flask run -h 0.0.0.0 -p 5000"
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
volumeMounts:
|
||||
- name: app
|
||||
mountPath: "/app"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 5000
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 3
|
||||
failureThreshold: 3
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 5000
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 4
|
||||
failureThreshold: 8
|
||||
volumes:
|
||||
- name: app
|
||||
configMap:
|
||||
name: server-code
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: server
|
||||
labels:
|
||||
app: server
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: server
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: 5000
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: server-code
|
||||
data:
|
||||
app.py: |
|
||||
import time
|
||||
import logging
|
||||
import os.path
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
from flask import Flask
|
||||
app = Flask(__name__)
|
||||
@app.route("/")
|
||||
def hello():
|
||||
with open('/data.txt') as data:
|
||||
return data.read()
|
@ -1,5 +1,7 @@
|
||||
# Introduction to Rancher: On-prem Kubernetes
|
||||
|
||||
<a href="https://youtu.be/1j5lhDzlFUM" title="k8s-rancher"><img src="https://i.ytimg.com/vi/1j5lhDzlFUM/hqdefault.jpg" width="20%" alt="k8s-rancher" /></a>
|
||||
|
||||
This guide follows the general instructions of running a [manual rancher install](https://rancher.com/docs/rancher/v2.5/en/quick-start-guide/deployment/quickstart-manual-setup/) and running our own infrastructure on Hyper-v
|
||||
|
||||
# Hyper-V : Prepare our infrastructure
|
||||
|
@ -1,7 +1,8 @@
|
||||
# Introduction to Kubernetes: RBAC
|
||||
|
||||
## Create Kubernetes cluster
|
||||
<a href="https://youtu.be/jvhKOAyD8S8" title="k8s-rbac"><img src="https://i.ytimg.com/vi/jvhKOAyD8S8/hqdefault.jpg" width="20%" alt="k8s-rbac" /></a>
|
||||
|
||||
## Create Kubernetes cluster
|
||||
|
||||
```
|
||||
kind create cluster --name rbac --image kindest/node:v1.20.2
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user