mirror of
https://github.com/marcel-dempers/docker-development-youtube-series.git
synced 2025-06-06 17:01:30 +00:00
commit
f8ac773d6a
17
.github/stale.yaml
vendored
Normal file
17
.github/stale.yaml
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
daysUntilStale: 30
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- pinned
|
||||
- security
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: wontfix
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||
closeComment: false
|
37
.github/workflows/docker._yaml
vendored
Normal file
37
.github/workflows/docker._yaml
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
###########################################################
|
||||
# Rename the file extension to ".yaml" (remove "_") to enable
|
||||
###########################################################
|
||||
|
||||
name: Docker Series Builds
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: docker login
|
||||
env:
|
||||
DOCKER_USER: ${{ secrets.DOCKER_USER }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
run: |
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
||||
- name: docker build csharp
|
||||
run: |
|
||||
docker build ./c# -t aimvector/csharp:1.0.0
|
||||
- name: docker build nodejs
|
||||
run: |
|
||||
docker build ./nodejs -t aimvector/nodejs:1.0.0
|
||||
- name: docker build python
|
||||
run: |
|
||||
docker build ./python -t aimvector/python:1.0.0
|
||||
- name: docker build golang
|
||||
run: |
|
||||
docker build ./golang -t aimvector/golang:1.0.0
|
||||
- name: docker push
|
||||
run: |
|
||||
docker push aimvector/csharp:1.0.0
|
||||
docker push aimvector/nodejs:1.0.0
|
||||
docker push aimvector/golang:1.0.0
|
||||
docker push aimvector/python:1.0.0
|
34
.github/workflows/docker.yml
vendored
34
.github/workflows/docker.yml
vendored
@ -1,34 +0,0 @@
|
||||
# name: Docker Series Builds
|
||||
|
||||
# #uncomment to enable push trigger
|
||||
# #on: [push]
|
||||
|
||||
# jobs:
|
||||
# build:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@v2
|
||||
# - name: docker login
|
||||
# env:
|
||||
# DOCKER_USER: ${{ secrets.DOCKER_USER }}
|
||||
# DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
# run: |
|
||||
# docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
||||
# - name: docker build csharp
|
||||
# run: |
|
||||
# docker build ./c# -t aimvector/csharp:1.0.0
|
||||
# - name: docker build nodejs
|
||||
# run: |
|
||||
# docker build ./nodejs -t aimvector/nodejs:1.0.0
|
||||
# - name: docker build python
|
||||
# run: |
|
||||
# docker build ./python -t aimvector/python:1.0.0
|
||||
# - name: docker build golang
|
||||
# run: |
|
||||
# docker build ./golang -t aimvector/golang:1.0.0
|
||||
# - name: docker push
|
||||
# run: |
|
||||
# docker push aimvector/csharp:1.0.0
|
||||
# docker push aimvector/nodejs:1.0.0
|
||||
# docker push aimvector/golang:1.0.0
|
||||
# docker push aimvector/python:1.0.0
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -4,3 +4,8 @@ node_modules/
|
||||
__pycache__/
|
||||
*.pem
|
||||
*.csr
|
||||
# terraform
|
||||
.terraform
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
security/letsencrypt/introduction/certs/**
|
88
README.md
88
README.md
@ -1,71 +1,37 @@
|
||||
# Docker Development Guide
|
||||
## a Youtube Series
|
||||
# The Ultimate Engineer Toolbox <img src="https://www.shareicon.net/data/128x128/2017/04/11/883708_media_512x512.png" alt="YouTube" width="5%" height="5%"> :hammer::wrench:
|
||||
|
||||
Hi!
|
||||
A Collection of tools, hands-on walkthroughs with source code. <br/>
|
||||
The Ultimate Swiss Army knife for DevOps, Developers and Platform Engineers
|
||||
|
||||
<br/>
|
||||
|
||||
|
||||
| Steps | Playlist :tv: | Source :octocat: |
|
||||
|---|---|---|
|
||||
| Learn Kubernetes :snowflake: | <a href="https://www.youtube.com/playlist?list=PLHq1uqvAteVvUEdqaBeMK2awVThNujwMd" title="Kubernetes"><img src="https://i.ytimg.com/vi/8h4FoWK7tIA/hqdefault.jpg" width="15%" height="15%" alt="Kubernetes Guide" /></a> | [source]("./kubernetes/README.md") |
|
||||
| Learn about CI/CD tools :whale: | <a href="https://www.youtube.com/playlist?list=PLHq1uqvAteVsSsrnZimHEf7NJ1MlRhQUj" title="CI/CD"><img src="https://i.ytimg.com/vi/myCcJJ_Fk10/hqdefault.jpg" width="15%" height="15%" alt="CI/CD Guide" /></a> | | | |
|
||||
| Deploy Kubernetes to the cloud :partly_sunny: | <a href="https://www.youtube.com/playlist?list=PLHq1uqvAteVsUhzNBkn-rPzXtPNpJu1-k" title="Cloud K8s"><img src="https://i.ytimg.com/vi/3jA9EfkSAUU/hqdefault.jpg" width="15%" height="15%" alt="Cloud Guide" /></a> | [source]("./kubernetes/cloud/README.md") |
|
||||
| Monitoring Kubernetes :mag: | <a href="https://www.youtube.com/playlist?list=PLHq1uqvAteVuEXCrRkPFWLXRKWNLOVUHn" title="Cloud K8s"><img src="https://i.ytimg.com/vi/5o37CGlNLr8/hqdefault.jpg" width="15%" height="15%" alt="Cloud Guide" /></a> | [source]("./monitoring/prometheus/kubernetes/README.md") |
|
||||
| Guide to Logging :page_with_curl: | <a href="https://www.youtube.com/playlist?list=PLHq1uqvAteVvfDxFW50Mdezk0xum-tyHT" title="Cloud K8s"><img src="https://i.ytimg.com/vi/MMVdkzeQ848/hqdefault.jpg" width="15%" height="15%" alt="Cloud Guide" /></a> | [source]("./monitoring/logging/README.md") |
|
||||
| Guide to ServiceMesh :globe_with_meridians: | <a href="https://www.youtube.com/playlist?list=PLHq1uqvAteVsmxHpGsMjTOROn3i99lzTA" title="Cloud K8s"><img src="https://i.ytimg.com/vi/rVNPnHeGYBE/hqdefault.jpg" width="15%" height="15%" alt="Cloud Guide" /></a> | [source]("./kubernetes/servicemesh/README.md") |
|
||||
|
||||
This is the source code for the YouTube series covering docker-based development workflows.
|
||||
|
||||
## Docker Development Basics
|
||||
|
||||
Part #1: The Dockerfiles (.NET, Golang, Python, NodeJS) <br/>
|
||||
Video: https://youtu.be/wyjNpxLRmLg <br/>
|
||||
Source code for Part #1: https://github.com/marcel-dempers/docker-development-youtube-series/tree/part1
|
||||
|
||||
Part #2: The code (.NET, Golang, Python, NodeJS) <br/>
|
||||
Video: https://youtu.be/EdmKENqnQUw <br/>
|
||||
Source code for Part #2 https://github.com/marcel-dempers/docker-development-youtube-series/tree/part2
|
||||
| Step :heavy_check_mark: | Video :movie_camera: | Source Code :octocat: |
|
||||
|---|---|---|
|
||||
| Working with `Dockerfiles` <br/>(.NET, Golang, Python, NodeJS) | <a href="https://youtu.be/wyjNpxLRmLg" title="Docker 1"><img src="https://i.ytimg.com/vi/wyjNpxLRmLg/hqdefault.jpg" width="15%" height="15%" alt="Docker 1" /></a> | [source](https://github.com/marcel-dempers/docker-development-youtube-series/tree/part1) |
|
||||
| Working with code <br/>(.NET, Golang, Python, NodeJS) | <a href="https://youtu.be/EdmKENqnQUw" title="Docker 1"><img src="https://i.ytimg.com/vi/EdmKENqnQUw/hqdefault.jpg" width="15%" height="15%" alt="Docker 1" /></a> | [source](https://github.com/marcel-dempers/docker-development-youtube-series/tree/part2) |
|
||||
| Docker Multistage explained | <a href="https://youtu.be/2lQ7WrwpZfI" title="Docker 1"><img src="https://i.ytimg.com/vi/2lQ7WrwpZfI/hqdefault.jpg" width="15%" height="15%" alt="Docker 1" /></a> | [source](https://github.com/marcel-dempers/docker-development-youtube-series/tree/part3) |
|
||||
| Debugging Go in Docker | <a href="https://youtu.be/kToyI16IFxs" title="Docker 1"><img src="https://i.ytimg.com/vi/kToyI16IFxs/hqdefault.jpg" width="15%" height="15%" alt="Docker 1" /></a> | [source](https://github.com/marcel-dempers/docker-development-youtube-series/tree/master/golang) |
|
||||
| Debugging .NET in Docker | <a href="https://youtu.be/ds2bud0ZYTY" title="Docker 1"><img src="https://i.ytimg.com/vi/ds2bud0ZYTY/hqdefault.jpg" width="15%" height="15%" alt="Docker 1" /></a> | [source](https://github.com/marcel-dempers/docker-development-youtube-series/tree/part5) |
|
||||
| Debugging Python in Docker | <a href="https://youtu.be/b78Tg-YmJZI" title="Docker 1"><img src="https://i.ytimg.com/vi/b78Tg-YmJZI/hqdefault.jpg" width="15%" height="15%" alt="Docker 1" /></a> | [source](https://github.com/marcel-dempers/docker-development-youtube-series/tree/debugging-python) |
|
||||
| Debugging NodeJS in Docker | <a href="https://youtu.be/ktvgr9VZ4dc" title="Docker 1"><img src="https://i.ytimg.com/vi/ktvgr9VZ4dc/hqdefault.jpg" width="15%" height="15%" alt="Docker 1" /></a> | [source](https://github.com/marcel-dempers/docker-development-youtube-series/tree/master/nodejs) |
|
||||
|
||||
Part #3: Docker Multistage explained <br/>
|
||||
Video: https://youtu.be/2lQ7WrwpZfI <br/>
|
||||
Source code for Part #2 https://github.com/marcel-dempers/docker-development-youtube-series/tree/part3
|
||||
|
||||
Part #4: Debugging Golang code in Docker <br/>
|
||||
Video: https://youtu.be/kToyI16IFxs <br/>
|
||||
Source code for Part #4 https://github.com/marcel-dempers/docker-development-youtube-series/tree/part4
|
||||
|
||||
Part #5: Debugging .NET Core code in Docker <br/>
|
||||
Video: https://youtu.be/ds2bud0ZYTY <br/>
|
||||
Source code for Part #5 https://github.com/marcel-dempers/docker-development-youtube-series/tree/part5
|
||||
|
||||
Part #6: Debugging Python code in Docker using VSCode <br/>
|
||||
Video: https://youtu.be/b78Tg-YmJZI <br/>
|
||||
Source code for Part #6 https://github.com/marcel-dempers/docker-development-youtube-series/tree/debugging-python
|
||||
## Engineering Toolbox :hammer::wrench:
|
||||
|
||||
|
||||
## Prometheus Monitoring
|
||||
Checkout the toolbox [website](https://marceldempers.dev/toolbox)
|
||||
|
||||
Application and Server monitoring <br/>
|
||||
Let's take a look how to monitor application code using Prometheus.
|
||||
|
||||
See the [Prometheus Monitoring](./prometheus-monitoring/readme.md) readme guide for detailed steps
|
||||
|
||||
## Kubernetes Development Basics
|
||||
|
||||
See the [Kubernetes Guide](./kubernetes/readme.md) readme guide for detailed steps
|
||||
|
||||
Part #1 Kubernetes Getting Started on Windows <br/>
|
||||
Video: https://youtu.be/8h4FoWK7tIA <br/>
|
||||
|
||||
Part #2 Kubernetes kubectl | the basics <br/>
|
||||
Video: https://youtu.be/feLpGydQVio <br/>
|
||||
|
||||
Part #3 Kubernetes deployments | the basics <br/>
|
||||
Video: https://youtu.be/DMpEZEakYVc <br/>
|
||||
|
||||
Part #4 Kubernetes config management | the basics <br/>
|
||||
Video: https://youtu.be/o-gXx7r7Rz4 <br/>
|
||||
|
||||
Part #5 Kubernetes secrets | the basics <br/>
|
||||
Video: https://youtu.be/o36yTfGDmZ0 <br/>
|
||||
|
||||
Part #6 Kubernetes load balancing and services | the basics <br/>
|
||||
Video: https://youtu.be/xhva6DeKqVU <br/>
|
||||
|
||||
Part #7 Kubernetes ingress | the basics <br/>
|
||||
Video: https://youtu.be/izWCkcJAzBw <br/>
|
||||
|
||||
Kubernetes in the Cloud
|
||||
|
||||
Checkout my series on running Kubernetes in the Cloud [here](./kubernetes/cloud/readme.md) <br/>
|
||||
|
||||
More details coming soon!
|
||||
<a href="https://marceldempers.dev/toolbox" title="toolbox 1"><img src="./toolbox.png" alt="toolbox 1" /></a>
|
52
flux/readme.md
Normal file
52
flux/readme.md
Normal file
@ -0,0 +1,52 @@
|
||||
# Flux Getting Started Guide
|
||||
|
||||
# 1 - Kubernetes
|
||||
|
||||
Get a Kubernetes Cluster. In this video, I use Docker for Windows.
|
||||
If you are new to Kubernetes, checkout my videos [here](https://marceldempers.dev/videos/guides/kubernetes-getting-started)
|
||||
|
||||
# 2 - Flux CTL
|
||||
|
||||
I used Flux 1.18 which I got from the GitHub [Release Page](https://github.com/fluxcd/flux/releases/tag/1.18.0)
|
||||
Rename it to `fluxctl.exe` & place it in a folder that is on your `$env:Path` environment variable.
|
||||
Open a new terminal and try
|
||||
```
|
||||
fluxctl
|
||||
```
|
||||
|
||||
# 4 - Installing Flux
|
||||
|
||||
Make sure you are pointing to the kubernetes cluster you want to use
|
||||
```
|
||||
kubectl config current-context
|
||||
kubectl get nodes
|
||||
```
|
||||
```
|
||||
kubectl create ns flux
|
||||
|
||||
$GHUSER = "marcel-dempers"
|
||||
fluxctl install `
|
||||
--git-user=${GHUSER} `
|
||||
--git-email=${GHUSER}@users.noreply.github.com `
|
||||
--git-url=git@github.com:${GHUSER}/docker-development-youtube-series `
|
||||
--git-path=kubernetes/configmaps,kubernetes/secrets,kubernetes/deployments `
|
||||
--git-branch=flux-test `
|
||||
--namespace=flux | kubectl apply -f -
|
||||
|
||||
kubectl -n flux rollout status deployment/flux
|
||||
|
||||
$env:FLUX_FORWARD_NAMESPACE = "flux"
|
||||
fluxctl list-workloads
|
||||
fluxctl identity
|
||||
|
||||
|
||||
https://github.com/marcel-dempers/docker-development-youtube-series/settings/keys/new
|
||||
|
||||
fluxctl sync
|
||||
|
||||
annotations:
|
||||
fluxcd.io/tag.example-app: semver:~1.0
|
||||
fluxcd.io/automated: 'true'
|
||||
|
||||
fluxctl policy -w default:deployment/example-deploy --tag "example-app=1.0.*"
|
||||
```
|
@ -17,8 +17,8 @@ kind create cluster --name vault --kubeconfig ~/.kube/kind-vault --image kindest
|
||||
|
||||
## TLS End to End Encryption
|
||||
|
||||
VIDEO: <Coming-Soon>
|
||||
See steps in `hashicorp/vault/tls/ssl_generate_self_signed.txt`
|
||||
VIDEO: ```<Coming-Soon>```
|
||||
See steps in [./tls/ssl_generate_self_signed.txt](./tls/ssl_generate_self_signed.txt)
|
||||
You'll need to generate TLS certs (or bring your own)
|
||||
Create base64 strings from the files, place it in the `server-tls-secret.yaml` and apply it.
|
||||
Remember not to check-in your TLS to GIT :)
|
||||
@ -38,21 +38,20 @@ kubectl -n vault-example get pvc
|
||||
```
|
||||
ensure vault-claim is bound, if not, `kubectl -n vault-example describe pvc vault-claim`
|
||||
ensure correct storage class is used for your cluster.
|
||||
if you need to change the storage class, deleve the pvc , edit YAML and re-apply
|
||||
if you need to change the storage class, delete the pvc, edit YAML and re-apply
|
||||
|
||||
## Initialising Vault
|
||||
|
||||
```
|
||||
kubectl -n vault-example exec -it vault-example-0 vault operator init
|
||||
#unseal 3 times
|
||||
# unseal 3 times
|
||||
kubectl -n vault-example exec -it vault-example-0 vault operator unseal
|
||||
kubectl -n vault-example get pods
|
||||
```
|
||||
|
||||
## Depploy the Injector
|
||||
|
||||
VIDEO: <Coming-Soon>
|
||||
## Deploy the Injector
|
||||
|
||||
VIDEO: ```<Coming-Soon>```
|
||||
Injector allows pods to automatically get secrets from the vault.
|
||||
|
||||
```
|
||||
@ -68,7 +67,6 @@ For the injector to be authorised to access vault, we need to enable K8s auth
|
||||
kubectl -n vault-example exec -it vault-example-0 vault login
|
||||
kubectl -n vault-example exec -it vault-example-0 vault auth enable kubernetes
|
||||
|
||||
|
||||
kubectl -n vault-example exec -it vault-example-0 sh
|
||||
vault write auth/kubernetes/config \
|
||||
token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
|
||||
@ -77,7 +75,6 @@ kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
exit
|
||||
|
||||
kubectl -n vault-example get pods
|
||||
|
||||
```
|
||||
|
||||
# Summary
|
||||
@ -108,7 +105,7 @@ Objective:
|
||||
* Let's delegate Vault to manage life cycles of our database credentials
|
||||
* Deploy an app, that automatically gets it's credentials from vault
|
||||
|
||||
[Try it](./example-apps/basic-secret/readme.md)
|
||||
[Try it](./example-apps/dynamic-postgresql/readme.md)
|
||||
|
||||
|
||||
|
||||
|
15
jenkins/amazon-eks/jenkins.pv.yaml
Normal file
15
jenkins/amazon-eks/jenkins.pv.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: jenkins
|
||||
spec:
|
||||
capacity:
|
||||
storage: 5Gi
|
||||
volumeMode: Filesystem
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: efs-sc
|
||||
csi:
|
||||
driver: efs.csi.aws.com
|
||||
volumeHandle: fs-92b853aa
|
11
jenkins/amazon-eks/jenkins.pvc.yaml
Normal file
11
jenkins/amazon-eks/jenkins.pvc.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: jenkins-claim
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
storageClassName: efs-sc
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
184
jenkins/amazon-eks/readme.md
Normal file
184
jenkins/amazon-eks/readme.md
Normal file
@ -0,0 +1,184 @@
|
||||
# Jenkins on Amazon Kubernetes
|
||||
|
||||
## Create a cluster
|
||||
|
||||
Follow my Introduction to Amazon EKS for beginners guide, to create a cluster <br/>
|
||||
Video [here](https://youtu.be/QThadS3Soig)
|
||||
|
||||
## Setup our Cloud Storage
|
||||
|
||||
```
|
||||
# deploy EFS storage driver
|
||||
kubectl apply -k "github.com/kubernetes-sigs/aws-efs-csi-driver/deploy/kubernetes/overlays/stable/?ref=master"
|
||||
|
||||
# get VPC ID
|
||||
aws eks describe-cluster --name getting-started-eks --query "cluster.resourcesVpcConfig.vpcId" --output text
|
||||
# Get CIDR range
|
||||
aws ec2 describe-vpcs --vpc-ids vpc-id --query "Vpcs[].CidrBlock" --output text
|
||||
|
||||
# security for our instances to access file storage
|
||||
aws ec2 create-security-group --description efs-test-sg --group-name efs-sg --vpc-id VPC_ID
|
||||
aws ec2 authorize-security-group-ingress --group-id sg-xxx --protocol tcp --port 2049 --cidr VPC_CIDR
|
||||
|
||||
# create storage
|
||||
aws efs create-file-system --creation-token eks-efs
|
||||
|
||||
# create mount point
|
||||
aws efs create-mount-target --file-system-id FileSystemId --subnet-id SubnetID --security-group GroupID
|
||||
|
||||
# grab our volume handle to update our PV YAML
|
||||
aws efs describe-file-systems --query "FileSystems[*].FileSystemId" --output text
|
||||
```
|
||||
|
||||
More details about EKS storage [here](https://aws.amazon.com/premiumsupport/knowledge-center/eks-persistent-storage/)
|
||||
|
||||
### Setup a namespace
|
||||
```
|
||||
kubectl create ns jenkins
|
||||
```
|
||||
|
||||
### Setup our storage for Jenkins
|
||||
|
||||
```
|
||||
kubectl get storageclass
|
||||
|
||||
# create volume
|
||||
kubectl apply -f ./jenkins/amazon-eks/jenkins.pv.yaml
|
||||
kubectl get pv
|
||||
|
||||
# create volume claim
|
||||
kubectl apply -n jenkins -f ./jenkins/amazon-eks/jenkins.pvc.yaml
|
||||
kubectl -n jenkins get pvc
|
||||
```
|
||||
|
||||
### Deploy Jenkins
|
||||
|
||||
```
|
||||
# rbac
|
||||
kubectl apply -n jenkins -f ./jenkins/jenkins.rbac.yaml
|
||||
|
||||
kubectl apply -n jenkins -f ./jenkins/jenkins.deployment.yaml
|
||||
|
||||
kubectl -n jenkins get pods
|
||||
|
||||
```
|
||||
|
||||
### Expose a service for agents
|
||||
|
||||
```
|
||||
|
||||
kubectl apply -n jenkins -f ./jenkins/jenkins.service.yaml
|
||||
|
||||
```
|
||||
|
||||
## Jenkins Initial Setup
|
||||
|
||||
```
|
||||
kubectl -n jenkins exec -it <podname> cat /var/jenkins_home/secrets/initialAdminPassword
|
||||
kubectl port-forward -n jenkins <podname> 8080
|
||||
|
||||
# setup user and recommended basic plugins
|
||||
# let it continue while we move on!
|
||||
|
||||
```
|
||||
|
||||
## SSH to our node to get Docker user info
|
||||
|
||||
```
|
||||
eval $(ssh-agent)
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
ssh -i ~/.ssh/id_rsa ec2-user@ec2-13-239-41-67.ap-southeast-2.compute.amazonaws.com
|
||||
id -u docker
|
||||
cat /etc/group
|
||||
# Get user ID for docker
|
||||
# Get group ID for docker
|
||||
```
|
||||
## Docker Jenkins Agent
|
||||
|
||||
Docker file is [here](../dockerfiles/dockerfile) <br/>
|
||||
|
||||
```
|
||||
# you can build it
|
||||
|
||||
cd ./jenkins/dockerfiles/
|
||||
docker build . -t aimvector/jenkins-slave
|
||||
|
||||
```
|
||||
|
||||
## Continue Jenkins setup
|
||||
|
||||
|
||||
Install Kubernetes Plugin <br/>
|
||||
Configure Plugin: Values I used are [here](../readme.md) <br/>
|
||||
|
||||
Install Kubernetes Plugin <br/>
|
||||
|
||||
## Try a pipeline
|
||||
|
||||
```
|
||||
pipeline {
|
||||
agent {
|
||||
kubernetes{
|
||||
label 'jenkins-slave'
|
||||
}
|
||||
|
||||
}
|
||||
environment{
|
||||
DOCKER_USERNAME = credentials('DOCKER_USERNAME')
|
||||
DOCKER_PASSWORD = credentials('DOCKER_PASSWORD')
|
||||
}
|
||||
stages {
|
||||
stage('docker login') {
|
||||
steps{
|
||||
sh(script: """
|
||||
docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
|
||||
""", returnStdout: true)
|
||||
}
|
||||
}
|
||||
|
||||
stage('git clone') {
|
||||
steps{
|
||||
sh(script: """
|
||||
git clone https://github.com/marcel-dempers/docker-development-youtube-series.git
|
||||
""", returnStdout: true)
|
||||
}
|
||||
}
|
||||
|
||||
stage('docker build') {
|
||||
steps{
|
||||
sh script: '''
|
||||
#!/bin/bash
|
||||
cd $WORKSPACE/docker-development-youtube-series/python
|
||||
docker build . --network host -t aimvector/python:${BUILD_NUMBER}
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
||||
stage('docker push') {
|
||||
steps{
|
||||
sh(script: """
|
||||
docker push aimvector/python:${BUILD_NUMBER}
|
||||
""")
|
||||
}
|
||||
}
|
||||
|
||||
stage('deploy') {
|
||||
steps{
|
||||
sh script: '''
|
||||
#!/bin/bash
|
||||
cd $WORKSPACE/docker-development-youtube-series/
|
||||
#get kubectl for this demo
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
./kubectl apply -f ./kubernetes/configmaps/configmap.yaml
|
||||
./kubectl apply -f ./kubernetes/secrets/secret.yaml
|
||||
cat ./kubernetes/deployments/deployment.yaml | sed s/1.0.0/${BUILD_NUMBER}/g | ./kubectl apply -f -
|
||||
./kubectl apply -f ./kubernetes/services/service.yaml
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -1,94 +1,94 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
# The MIT License
|
||||
#
|
||||
# Copyright (c) 2015, CloudBees, Inc.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
|
||||
# Usage jenkins-slave.sh [options] -url http://jenkins [SECRET] [AGENT_NAME]
|
||||
# Optional environment variables :
|
||||
# * JENKINS_TUNNEL : HOST:PORT for a tunnel to route TCP traffic to jenkins host, when jenkins can't be directly accessed over network
|
||||
# * JENKINS_URL : alternate jenkins URL
|
||||
# * JENKINS_SECRET : agent secret, if not set as an argument
|
||||
# * JENKINS_AGENT_NAME : agent name, if not set as an argument
|
||||
# * JENKINS_AGENT_WORKDIR : agent work directory, if not set by optional parameter -workDir
|
||||
|
||||
if [ $# -eq 1 ]; then
|
||||
|
||||
# if `docker run` only has one arguments, we assume user is running alternate command like `bash` to inspect the image
|
||||
exec "$@"
|
||||
|
||||
else
|
||||
|
||||
# if -tunnel is not provided try env vars
|
||||
case "$@" in
|
||||
*"-tunnel "*) ;;
|
||||
*)
|
||||
if [ ! -z "$JENKINS_TUNNEL" ]; then
|
||||
TUNNEL="-tunnel $JENKINS_TUNNEL"
|
||||
fi ;;
|
||||
esac
|
||||
|
||||
# if -workDir is not provided try env vars
|
||||
if [ ! -z "$JENKINS_AGENT_WORKDIR" ]; then
|
||||
case "$@" in
|
||||
*"-workDir"*) echo "Warning: Work directory is defined twice in command-line arguments and the environment variable" ;;
|
||||
*)
|
||||
WORKDIR="-workDir $JENKINS_AGENT_WORKDIR" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ -n "$JENKINS_URL" ]; then
|
||||
URL="-url $JENKINS_URL"
|
||||
fi
|
||||
|
||||
if [ -n "$JENKINS_NAME" ]; then
|
||||
JENKINS_AGENT_NAME="$JENKINS_NAME"
|
||||
fi
|
||||
|
||||
if [ -z "$JNLP_PROTOCOL_OPTS" ]; then
|
||||
echo "Warning: JnlpProtocol3 is disabled by default, use JNLP_PROTOCOL_OPTS to alter the behavior"
|
||||
JNLP_PROTOCOL_OPTS="-Dorg.jenkinsci.remoting.engine.JnlpProtocol3.disabled=true"
|
||||
fi
|
||||
|
||||
# If both required options are defined, do not pass the parameters
|
||||
OPT_JENKINS_SECRET=""
|
||||
if [ -n "$JENKINS_SECRET" ]; then
|
||||
case "$@" in
|
||||
*"${JENKINS_SECRET}"*) echo "Warning: SECRET is defined twice in command-line arguments and the environment variable" ;;
|
||||
*)
|
||||
OPT_JENKINS_SECRET="${JENKINS_SECRET}" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
OPT_JENKINS_AGENT_NAME=""
|
||||
if [ -n "$JENKINS_AGENT_NAME" ]; then
|
||||
case "$@" in
|
||||
*"${JENKINS_AGENT_NAME}"*) echo "Warning: AGENT_NAME is defined twice in command-line arguments and the environment variable" ;;
|
||||
*)
|
||||
OPT_JENKINS_AGENT_NAME="${JENKINS_AGENT_NAME}" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
#TODO: Handle the case when the command-line and Environment variable contain different values.
|
||||
#It is fine it blows up for now since it should lead to an error anyway.
|
||||
|
||||
exec java $JAVA_OPTS $JNLP_PROTOCOL_OPTS -cp /usr/share/jenkins/slave.jar hudson.remoting.jnlp.Main -headless $TUNNEL $URL $WORKDIR $OPT_JENKINS_SECRET $OPT_JENKINS_AGENT_NAME "$@"
|
||||
#!/usr/bin/env sh
|
||||
|
||||
# The MIT License
|
||||
#
|
||||
# Copyright (c) 2015, CloudBees, Inc.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
|
||||
# Usage jenkins-slave.sh [options] -url http://jenkins [SECRET] [AGENT_NAME]
|
||||
# Optional environment variables :
|
||||
# * JENKINS_TUNNEL : HOST:PORT for a tunnel to route TCP traffic to jenkins host, when jenkins can't be directly accessed over network
|
||||
# * JENKINS_URL : alternate jenkins URL
|
||||
# * JENKINS_SECRET : agent secret, if not set as an argument
|
||||
# * JENKINS_AGENT_NAME : agent name, if not set as an argument
|
||||
# * JENKINS_AGENT_WORKDIR : agent work directory, if not set by optional parameter -workDir
|
||||
|
||||
if [ $# -eq 1 ]; then
|
||||
|
||||
# if `docker run` only has one arguments, we assume user is running alternate command like `bash` to inspect the image
|
||||
exec "$@"
|
||||
|
||||
else
|
||||
|
||||
# if -tunnel is not provided try env vars
|
||||
case "$@" in
|
||||
*"-tunnel "*) ;;
|
||||
*)
|
||||
if [ ! -z "$JENKINS_TUNNEL" ]; then
|
||||
TUNNEL="-tunnel $JENKINS_TUNNEL"
|
||||
fi ;;
|
||||
esac
|
||||
|
||||
# if -workDir is not provided try env vars
|
||||
if [ ! -z "$JENKINS_AGENT_WORKDIR" ]; then
|
||||
case "$@" in
|
||||
*"-workDir"*) echo "Warning: Work directory is defined twice in command-line arguments and the environment variable" ;;
|
||||
*)
|
||||
WORKDIR="-workDir $JENKINS_AGENT_WORKDIR" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ -n "$JENKINS_URL" ]; then
|
||||
URL="-url $JENKINS_URL"
|
||||
fi
|
||||
|
||||
if [ -n "$JENKINS_NAME" ]; then
|
||||
JENKINS_AGENT_NAME="$JENKINS_NAME"
|
||||
fi
|
||||
|
||||
if [ -z "$JNLP_PROTOCOL_OPTS" ]; then
|
||||
echo "Warning: JnlpProtocol3 is disabled by default, use JNLP_PROTOCOL_OPTS to alter the behavior"
|
||||
JNLP_PROTOCOL_OPTS="-Dorg.jenkinsci.remoting.engine.JnlpProtocol3.disabled=true"
|
||||
fi
|
||||
|
||||
# If both required options are defined, do not pass the parameters
|
||||
OPT_JENKINS_SECRET=""
|
||||
if [ -n "$JENKINS_SECRET" ]; then
|
||||
case "$@" in
|
||||
*"${JENKINS_SECRET}"*) echo "Warning: SECRET is defined twice in command-line arguments and the environment variable" ;;
|
||||
*)
|
||||
OPT_JENKINS_SECRET="${JENKINS_SECRET}" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
OPT_JENKINS_AGENT_NAME=""
|
||||
if [ -n "$JENKINS_AGENT_NAME" ]; then
|
||||
case "$@" in
|
||||
*"${JENKINS_AGENT_NAME}"*) echo "Warning: AGENT_NAME is defined twice in command-line arguments and the environment variable" ;;
|
||||
*)
|
||||
OPT_JENKINS_AGENT_NAME="${JENKINS_AGENT_NAME}" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
#TODO: Handle the case when the command-line and Environment variable contain different values.
|
||||
#It is fine it blows up for now since it should lead to an error anyway.
|
||||
|
||||
exec java $JAVA_OPTS $JNLP_PROTOCOL_OPTS -cp /usr/share/jenkins/slave.jar hudson.remoting.jnlp.Main -headless $TUNNEL $URL $WORKDIR $OPT_JENKINS_SECRET $OPT_JENKINS_AGENT_NAME "$@"
|
||||
fi
|
@ -1,48 +1,42 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: jenkins
|
||||
labels:
|
||||
name: jenkins
|
||||
app: jenkins
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: jenkins
|
||||
app: jenkins
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: jenkins
|
||||
name: jenkins
|
||||
name: jenkins
|
||||
spec:
|
||||
serviceAccountName: jenkins
|
||||
containers:
|
||||
- env:
|
||||
- name: jenkins
|
||||
image: jenkins/jenkins:2.235.1-lts-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: JAVA_OPTS
|
||||
value: -Xmx2048m -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85
|
||||
image: jenkins/jenkins #:lts-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: jenkins
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
- containerPort: 50000
|
||||
protocol: TCP
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: "1"
|
||||
# memory: 1Gi
|
||||
# requests:
|
||||
# cpu: "1"
|
||||
# memory: 1Gi
|
||||
volumeMounts:
|
||||
- mountPath: /var/jenkins_home
|
||||
name: jenkins
|
||||
restartPolicy: Always
|
||||
securityContext:
|
||||
#fsGroup: 1000
|
||||
runAsUser: 0
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
|
@ -4,7 +4,7 @@ kind: ServiceAccount
|
||||
metadata:
|
||||
name: jenkins
|
||||
---
|
||||
kind: Role
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: jenkins
|
||||
@ -20,7 +20,19 @@ rules:
|
||||
verbs: ["get","list","watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
verbs: ["create","delete","get","list","patch","update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create","delete","get","list","patch","update"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments"]
|
||||
verbs: ["create","delete","get","list","patch","update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["create","delete","get","list","patch","update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["create","delete","get","list","patch","update"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@ -29,7 +41,7 @@ metadata:
|
||||
name: jenkins
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
kind: ClusterRole
|
||||
name: jenkins
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
|
@ -1,3 +1,13 @@
|
||||
|
||||
# Jenkins on Amazon Kubernetes
|
||||
|
||||
For running Jenkins on AMAZON, start [here](./amazon-eks/readme.md)
|
||||
|
||||
# Jenkins on Local (Docker Windows \ Minikube \ etc)
|
||||
|
||||
For running Jenkins on Local Docker for Windows or Minikube <br/>
|
||||
Watch the [video](https://youtu.be/eRWIJGF3Y2g)
|
||||
|
||||
# Setting up Jenkins Agent
|
||||
|
||||
After installing `kubernetes-plugin` for Jenkins
|
||||
@ -14,6 +24,7 @@ After installing `kubernetes-plugin` for Jenkins
|
||||
* Add Kubernetes Pod Template
|
||||
* Name: jenkins-slave
|
||||
* Namespace: jenkins
|
||||
* Service Account: jenkins
|
||||
* Labels: jenkins-slave (you will need to use this label on all jobs)
|
||||
* Containers | Add Template
|
||||
* Name: jnlp
|
||||
|
22
kubernetes/autoscaling/components/application/app.go
Normal file
22
kubernetes/autoscaling/components/application/app.go
Normal file
@ -0,0 +1,22 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func main(){
|
||||
http.HandleFunc("/", useCPU)
|
||||
http.ListenAndServe(":80", nil)
|
||||
}
|
||||
|
||||
func useCPU(w http.ResponseWriter, r *http.Request) {
|
||||
count := 1
|
||||
|
||||
for i := 1; i <= 1000000; i++ {
|
||||
count = i
|
||||
}
|
||||
|
||||
fmt.Printf("count: %d", count)
|
||||
w.Write([]byte(string(count)))
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: application-cpu
|
||||
labels:
|
||||
app: application-cpu
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: application-cpu
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: 80
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: application-cpu
|
||||
labels:
|
||||
app: application-cpu
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: application-cpu
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: application-cpu
|
||||
spec:
|
||||
containers:
|
||||
- name: application-cpu
|
||||
image: aimvector/application-cpu:v1.0.2
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
requests:
|
||||
memory: "50Mi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "500Mi"
|
||||
cpu: "2000m"
|
15
kubernetes/autoscaling/components/application/dockerfile
Normal file
15
kubernetes/autoscaling/components/application/dockerfile
Normal file
@ -0,0 +1,15 @@
|
||||
FROM golang:1.14-alpine as build
|
||||
|
||||
RUN apk add --no-cache git curl
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
COPY app.go /src
|
||||
|
||||
RUN go build app.go
|
||||
|
||||
FROM alpine as runtime
|
||||
|
||||
COPY --from=build /src/app /app/app
|
||||
|
||||
CMD [ "/app/app" ]
|
@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: traffic-generator
|
||||
spec:
|
||||
containers:
|
||||
- name: alpine
|
||||
image: alpine
|
||||
args:
|
||||
- sleep
|
||||
- "100000000"
|
@ -0,0 +1,153 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:aggregated-metrics-reader
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: metrics-server-auth-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
group: metrics.k8s.io
|
||||
version: v1beta1
|
||||
insecureSkipTLSVerify: true
|
||||
groupPriorityMinimum: 100
|
||||
versionPriority: 100
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
# mount in tmp so we can safely use from-scratch images and/or read-only containers
|
||||
- name: tmp-dir
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server/metrics-server:v0.3.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --cert-dir=/tmp
|
||||
- --secure-port=4443
|
||||
- --kubelet-insecure-tls #remove these for production: only used for kind
|
||||
- --kubelet-preferred-address-types="InternalIP" #remove these for production: only used for kind
|
||||
ports:
|
||||
- name: main-port
|
||||
containerPort: 4443
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
volumeMounts:
|
||||
- name: tmp-dir
|
||||
mountPath: /tmp
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
kubernetes.io/arch: "amd64"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/name: "Metrics-server"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: main-port
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
- namespaces
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:metrics-server
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
181
kubernetes/autoscaling/readme.md
Normal file
181
kubernetes/autoscaling/readme.md
Normal file
@ -0,0 +1,181 @@
|
||||
# Kubernetes Autoscaling Guide
|
||||
|
||||
## Cluster Autoscaling
|
||||
|
||||
Cluster autoscaler allows us to scale cluster nodes when they become full <br/>
|
||||
I would recommend to learn about scaling your cluster nodes before scaling pods. <br/>
|
||||
Video [here](https://youtu.be/jM36M39MA3I)
|
||||
|
||||
## Horizontal Pod Autoscaling
|
||||
|
||||
HPA allows us to scale pods when their resource utilisation goes over a threshold <br/>
|
||||
|
||||
## Requirements
|
||||
|
||||
### A Cluster
|
||||
|
||||
* For both autoscaling guides, we'll need a cluster. <br/>
|
||||
* For `Cluster Autoscaler` You need a cloud based cluster that supports the cluster autoscaler <br/>
|
||||
* For `HPA` We'll use [kind](http://kind.sigs.k8s.io/)
|
||||
|
||||
### Cluster Autoscaling - Creating an AKS Cluster
|
||||
|
||||
```
|
||||
# azure example
|
||||
|
||||
NAME=aks-getting-started
|
||||
RESOURCEGROUP=aks-getting-started
|
||||
SERVICE_PRINCIPAL=
|
||||
SERVICE_PRINCIPAL_SECRET=
|
||||
|
||||
az aks create -n $NAME \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--location australiaeast \
|
||||
--kubernetes-version 1.16.10 \
|
||||
--nodepool-name default \
|
||||
--node-count 1 \
|
||||
--node-vm-size Standard_F4s_v2 \
|
||||
--node-osdisk-size 250 \
|
||||
--service-principal $SERVICE_PRINCIPAL \
|
||||
--client-secret $SERVICE_PRINCIPAL_SECRET \
|
||||
--output none \
|
||||
--enable-cluster-autoscaler \
|
||||
--min-count 1 \
|
||||
--max-count 5
|
||||
```
|
||||
|
||||
### Horizontal Pod Autocaling - Creating a Kind Cluster
|
||||
|
||||
My Node has 6 CPU cores for this demo <br/>
|
||||
|
||||
```
|
||||
kind create cluster --name hpa --image kindest/node:v1.18.4
|
||||
```
|
||||
|
||||
### Metric Server
|
||||
|
||||
* For `Cluster Autoscaler` - On cloud-based clusters, Metric server may already be installed. <br/>
|
||||
* For `HPA` - We're using kind
|
||||
|
||||
[Metric Server](https://github.com/kubernetes-sigs/metrics-server) provides container resource metrics for use in autoscaling pipelines <br/>
|
||||
|
||||
Because I run K8s `1.18` in `kind`, the Metric Server version i need is `0.3.7` <br/>
|
||||
We will need to deploy Metric Server [0.3.7](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.3.7) <br/>
|
||||
I used `components.yaml`from the release page link above. <br/>
|
||||
|
||||
<b>Important Note</b> : For Demo clusters (like `kind`), you will need to disable TLS <br/>
|
||||
You can disable TLS by adding the following to the metrics-server container args <br/>
|
||||
|
||||
<b>For production, make sure you remove the following :</b> <br/>
|
||||
|
||||
```
|
||||
- --kubelet-insecure-tls
|
||||
- --kubelet-preferred-address-types="InternalIP"
|
||||
|
||||
```
|
||||
|
||||
Deployment: <br/>
|
||||
|
||||
|
||||
```
|
||||
cd kubernetes\autoscaling
|
||||
kubectl -n kube-system apply -f .\components\metric-server\metricserver-0.3.7.yaml
|
||||
|
||||
#test
|
||||
kubectl -n kube-system get pods
|
||||
|
||||
#note: wait for metrics to populate!
|
||||
kubectl top nodes
|
||||
|
||||
```
|
||||
|
||||
## Example Application
|
||||
|
||||
For all autoscaling guides, we'll need a simple app, that generates some CPU load <br/>
|
||||
|
||||
* Build the app
|
||||
* Push it to a registry
|
||||
* Ensure resource requirements are set
|
||||
* Deploy it to Kubernetes
|
||||
* Ensure metrics are visible for the app
|
||||
|
||||
```
|
||||
# build
|
||||
|
||||
cd kubernetes\autoscaling\components\application
|
||||
docker build . -t aimvector/application-cpu:v1.0.0
|
||||
|
||||
# push
|
||||
docker push aimvector/application-cpu:v1.0.0
|
||||
|
||||
# resource requirements
|
||||
resources:
|
||||
requests:
|
||||
memory: "50Mi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "500Mi"
|
||||
cpu: "2000m"
|
||||
|
||||
# deploy
|
||||
kubectl apply -f deployment.yaml
|
||||
|
||||
# metrics
|
||||
kubectl top pods
|
||||
|
||||
```
|
||||
|
||||
## Cluster Autoscaler
|
||||
|
||||
For cluster autoscaling, you should be able to scale the pods manually and watch the cluster scale. </br>
|
||||
Cluster autoscaling stops here. </br>
|
||||
For Pod Autoscaling (HPA), continue</br>
|
||||
|
||||
## Generate some traffic
|
||||
|
||||
Let's deploy a simple traffic generator pod
|
||||
|
||||
```
|
||||
cd kubernetes\autoscaling\components\application
|
||||
kubectl apply -f .\traffic-generator.yaml
|
||||
|
||||
# get a terminal to the traffic-generator
|
||||
kubectl exec -it traffic-generator sh
|
||||
|
||||
# install wrk
|
||||
apk add --no-cache wrk
|
||||
|
||||
# simulate some load
|
||||
wrk -c 5 -t 5 -d 99999 -H "Connection: Close" http://application-cpu
|
||||
|
||||
#you can scale to pods manually and see roughly 6-7 pods will satisfy resource requests.
|
||||
kubectl scale deploy/application-cpu --replicas 2
|
||||
```
|
||||
|
||||
## Deploy an autoscaler
|
||||
|
||||
```
|
||||
# scale the deployment back down to 2
|
||||
kubectl scale deploy/application-cpu --replicas 2
|
||||
|
||||
# deploy the autoscaler
|
||||
kubectl autoscale deploy/application-cpu --cpu-percent=95 --min=1 --max=10
|
||||
|
||||
# pods should scale to roughly 6-7 to match criteria of 95% of resource requests
|
||||
|
||||
kubectl get pods
|
||||
kubectl top pods
|
||||
kubectl get hpa/application-cpu -owide
|
||||
|
||||
kubectl describe hpa/application-cpu
|
||||
|
||||
```
|
||||
|
||||
## Vertical Pod Autoscaling
|
||||
|
||||
The vertical pod autoscaler allows us to automatically set request values on our pods <br/>
|
||||
based on recommendations.
|
||||
This helps us tune the request values based on actual CPU and Memory usage.<br/>
|
||||
|
||||
More [here](./vertical-pod-autoscaling/readme.md)
|
||||
|
144
kubernetes/autoscaling/vertical-pod-autoscaling/readme.md
Normal file
144
kubernetes/autoscaling/vertical-pod-autoscaling/readme.md
Normal file
@ -0,0 +1,144 @@
|
||||
# Vertical Pod Autoscaling
|
||||
|
||||
## We need a Kubernetes cluster
|
||||
|
||||
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
|
||||
|
||||
```
|
||||
kind create cluster --name vpa --image kindest/node:v1.19.1
|
||||
```
|
||||
<hr/>
|
||||
|
||||
## Metric Server
|
||||
|
||||
<br/>
|
||||
|
||||
* For `Cluster Autoscaler` - On cloud-based clusters, Metric server may already be installed. <br/>
|
||||
* For `HPA` - We're using kind
|
||||
|
||||
[Metric Server](https://github.com/kubernetes-sigs/metrics-server) provides container resource metrics for use in autoscaling pipelines <br/>
|
||||
|
||||
Because I run K8s `1.19` in `kind`, the Metric Server version i need is `0.3.7` <br/>
|
||||
We will need to deploy Metric Server [0.3.7](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.3.7) <br/>
|
||||
I used `components.yaml`from the release page link above. <br/>
|
||||
|
||||
<b>Important Note</b> : For Demo clusters (like `kind`), you will need to disable TLS <br/>
|
||||
You can disable TLS by adding the following to the metrics-server container args <br/>
|
||||
|
||||
<b>For production, make sure you remove the following :</b> <br/>
|
||||
|
||||
```
|
||||
- --kubelet-insecure-tls
|
||||
- --kubelet-preferred-address-types="InternalIP"
|
||||
|
||||
```
|
||||
|
||||
Deployment: <br/>
|
||||
|
||||
```
|
||||
cd kubernetes\autoscaling
|
||||
kubectl -n kube-system apply -f .\components\metric-server\metricserver-0.3.7.yaml
|
||||
|
||||
#test
|
||||
kubectl -n kube-system get pods
|
||||
|
||||
#note: wait for metrics to populate!
|
||||
kubectl top nodes
|
||||
|
||||
```
|
||||
|
||||
## VPA
|
||||
|
||||
VPA docs [here]("https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler#install-command") <br/>
|
||||
Let's install the VPA from a container that can access our cluster
|
||||
|
||||
```
|
||||
cd kubernetes/autoscaling/vertical-pod-autoscaling
|
||||
docker run -it --rm -v ${HOME}:/root/ -v ${PWD}:/work -w /work --net host debian:buster bash
|
||||
|
||||
# install git
|
||||
apt-get update && apt-get install -y git curl nano
|
||||
|
||||
# install kubectl
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
|
||||
|
||||
cd /tmp
|
||||
git clone https://github.com/kubernetes/autoscaler.git
|
||||
cd autoscaler/vertical-pod-autoscaler/
|
||||
|
||||
./hack/vpa-up.sh
|
||||
|
||||
# after few seconds, we can see the VPA components in:
|
||||
|
||||
kubectl -n kube-system get pods
|
||||
```
|
||||
|
||||
## Build and deploy example app
|
||||
|
||||
```
|
||||
# build
|
||||
|
||||
cd kubernetes\autoscaling\components\application
|
||||
docker build . -t aimvector/application-cpu:v1.0.0
|
||||
|
||||
# push
|
||||
docker push aimvector/application-cpu:v1.0.0
|
||||
|
||||
# deploy
|
||||
kubectl apply -f deployment.yaml
|
||||
|
||||
# metrics
|
||||
kubectl top pods
|
||||
|
||||
```
|
||||
|
||||
## Generate some traffic
|
||||
|
||||
Let's deploy a simple traffic generator pod
|
||||
|
||||
```
|
||||
cd kubernetes\autoscaling\components\application
|
||||
kubectl apply -f .\traffic-generator.yaml
|
||||
|
||||
# get a terminal to the traffic-generator
|
||||
kubectl exec -it traffic-generator sh
|
||||
|
||||
# install wrk
|
||||
apk add --no-cache wrk
|
||||
|
||||
# simulate some load
|
||||
wrk -c 5 -t 5 -d 99999 -H "Connection: Close" http://application-cpu
|
||||
|
||||
```
|
||||
|
||||
# Deploy an example VPA
|
||||
|
||||
```
|
||||
|
||||
kubectl apply -f .\vertical-pod-autoscaling\vpa.yaml
|
||||
|
||||
kubectl describe vpa application-cpu
|
||||
|
||||
```
|
||||
|
||||
# Deploy Goldilocks
|
||||
|
||||
```
|
||||
cd /tmp
|
||||
git clone https://github.com/FairwindsOps/goldilocks.git
|
||||
cd goldilocks/hack/manifests/
|
||||
|
||||
kubectl create namespace goldilocks
|
||||
kubectl -n goldilocks apply -f ./controller
|
||||
kubectl -n goldilocks apply -f ./dashboard
|
||||
|
||||
|
||||
kubectl label ns default goldilocks.fairwinds.com/enabled=true
|
||||
kubectl label ns default goldilocks.fairwinds.com/vpa-update-mode="off"
|
||||
|
||||
kubectl -n goldilocks port-forward svc/goldilocks-dashboard 80
|
||||
|
||||
```
|
11
kubernetes/autoscaling/vertical-pod-autoscaling/vpa.yaml
Normal file
11
kubernetes/autoscaling/vertical-pod-autoscaling/vpa.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: autoscaling.k8s.io/v1
|
||||
kind: VerticalPodAutoscaler
|
||||
metadata:
|
||||
name: application-cpu
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: "apps/v1"
|
||||
kind: Deployment
|
||||
name: application-cpu
|
||||
updatePolicy:
|
||||
updateMode: "Off"
|
@ -9,7 +9,7 @@ docker run -it --rm -v ${PWD}:/work -w /work --entrypoint /bin/sh amazon/aws-cli
|
||||
|
||||
cd ./kubernetes/cloud/amazon
|
||||
|
||||
yum install jq
|
||||
yum install jq gzip nano tar git
|
||||
```
|
||||
|
||||
## Login to AWS
|
||||
@ -116,13 +116,27 @@ aws eks create-nodegroup \
|
||||
## EKS CTL example
|
||||
|
||||
```
|
||||
eksctl create cluster --name getting-started-eks-1 \
|
||||
# Install EKS CTL
|
||||
curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
|
||||
mv /tmp/eksctl /usr/local/bin
|
||||
|
||||
# Create SSH key for Node access (if you need it)
|
||||
yum install openssh
|
||||
mkdir -p ~/.ssh/
|
||||
PASSPHRASE="mysuperstrongpassword"
|
||||
ssh-keygen -t rsa -b 4096 -N "${PASSPHRASE}" -C "your_email@example.com" -q -f ~/.ssh/id_rsa
|
||||
chmod 400 ~/.ssh/id_rsa*
|
||||
|
||||
|
||||
eksctl create cluster --name getting-started-eks \
|
||||
--region ap-southeast-2 \
|
||||
--version 1.16 \
|
||||
--managed \
|
||||
--node-type t2.small \
|
||||
--nodes 1 \
|
||||
--node-volume-size 200
|
||||
--node-volume-size 200 \
|
||||
--ssh-access \
|
||||
--ssh-public-key=~/.ssh/id_rsa.pub \
|
||||
|
||||
```
|
||||
## Create some sample containers
|
||||
|
174
kubernetes/cloud/amazon/terraform/main.tf
Normal file
174
kubernetes/cloud/amazon/terraform/main.tf
Normal file
@ -0,0 +1,174 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12.0"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
version = ">= 2.28.1"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
data "aws_eks_cluster" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "aws_security_group" "worker_group_mgmt_one" {
|
||||
name_prefix = "worker_group_mgmt_one"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
|
||||
cidr_blocks = [
|
||||
"10.0.0.0/8",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "all_worker_mgmt" {
|
||||
name_prefix = "all_worker_management"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
|
||||
cidr_blocks = [
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "2.6.0"
|
||||
|
||||
name = "test-vpc"
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
module "eks" {
|
||||
source = "terraform-aws-modules/eks/aws"
|
||||
cluster_name = var.cluster_name
|
||||
cluster_version = "1.17"
|
||||
subnets = module.vpc.private_subnets
|
||||
version = "12.2.0"
|
||||
cluster_create_timeout = "1h"
|
||||
cluster_endpoint_private_access = true
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
worker_groups = [
|
||||
{
|
||||
name = "worker-group-1"
|
||||
instance_type = "t2.small"
|
||||
additional_userdata = "echo foo bar"
|
||||
asg_desired_capacity = 1
|
||||
additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id]
|
||||
},
|
||||
]
|
||||
|
||||
worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id]
|
||||
map_roles = var.map_roles
|
||||
map_users = var.map_users
|
||||
map_accounts = var.map_accounts
|
||||
}
|
||||
|
||||
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
load_config_file = false
|
||||
version = "~> 1.11"
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "example" {
|
||||
metadata {
|
||||
name = "terraform-example"
|
||||
labels = {
|
||||
test = "MyExampleApp"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
replicas = 2
|
||||
|
||||
selector {
|
||||
match_labels = {
|
||||
test = "MyExampleApp"
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
test = "MyExampleApp"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
container {
|
||||
image = "nginx:1.7.8"
|
||||
name = "example"
|
||||
|
||||
resources {
|
||||
limits {
|
||||
cpu = "0.5"
|
||||
memory = "512Mi"
|
||||
}
|
||||
requests {
|
||||
cpu = "250m"
|
||||
memory = "50Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "example" {
|
||||
metadata {
|
||||
name = "terraform-example"
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
test = "MyExampleApp"
|
||||
}
|
||||
port {
|
||||
port = 80
|
||||
target_port = 80
|
||||
}
|
||||
|
||||
type = "LoadBalancer"
|
||||
}
|
||||
}
|
24
kubernetes/cloud/amazon/terraform/outputs.tf
Normal file
24
kubernetes/cloud/amazon/terraform/outputs.tf
Normal file
@ -0,0 +1,24 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
|
||||
output "region" {
|
||||
description = "AWS region"
|
||||
value = var.region
|
||||
}
|
82
kubernetes/cloud/amazon/terraform/readme.md
Normal file
82
kubernetes/cloud/amazon/terraform/readme.md
Normal file
@ -0,0 +1,82 @@
|
||||
# Getting Started with Amazon EKS using Terraform
|
||||
|
||||
More resources:
|
||||
|
||||
Terraform provider for AWS [here](https://www.terraform.io/docs/providers/aws/index.html) <br/>
|
||||
|
||||
## Amazon CLI
|
||||
|
||||
You can get the Amazon CLI on [Docker-Hub](https://hub.docker.com/r/amazon/aws-cli) <br/>
|
||||
We'll need the Amazon CLI to gather information so we can build our Terraform file.
|
||||
|
||||
```
|
||||
# Run Amazon CLI
|
||||
docker run -it --rm -v ${PWD}:/work -w /work --entrypoint /bin/sh amazon/aws-cli:2.0.43
|
||||
|
||||
# some handy tools :)
|
||||
yum install -y jq gzip nano tar git unzip wget
|
||||
|
||||
```
|
||||
|
||||
## Login to Amazon
|
||||
|
||||
```
|
||||
# Access your "My Security Credentials" section in your profile.
|
||||
# Create an access key
|
||||
|
||||
aws configure
|
||||
|
||||
Default region name: ap-southeast-2
|
||||
Default output format: json
|
||||
```
|
||||
|
||||
# Terraform CLI
|
||||
|
||||
```
|
||||
# Get Terraform
|
||||
|
||||
curl -o /tmp/terraform.zip -LO https://releases.hashicorp.com/terraform/0.13.1/terraform_0.13.1_linux_amd64.zip
|
||||
unzip /tmp/terraform.zip
|
||||
chmod +x terraform && mv terraform /usr/local/bin/
|
||||
terraform
|
||||
```
|
||||
|
||||
## Terraform Amazon Kubernetes Provider
|
||||
|
||||
Documentation on all the Kubernetes fields for terraform [here](https://www.terraform.io/docs/providers/aws/r/eks_cluster.html)
|
||||
|
||||
```
|
||||
cd kubernetes/cloud/amazon/terraform
|
||||
|
||||
terraform init
|
||||
|
||||
terraform plan
|
||||
terraform apply
|
||||
|
||||
```
|
||||
|
||||
# Lets see what we deployed
|
||||
|
||||
```
|
||||
# grab our EKS config
|
||||
aws eks update-kubeconfig --name getting-started-eks --region ap-southeast-2
|
||||
|
||||
# Get kubectl
|
||||
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
|
||||
kubectl get nodes
|
||||
kubectl get deploy
|
||||
kubectl get pods
|
||||
kubectl get svc
|
||||
|
||||
|
||||
```
|
||||
|
||||
# Clean up
|
||||
|
||||
```
|
||||
terraform destroy
|
||||
```
|
57
kubernetes/cloud/amazon/terraform/variables.tf
Normal file
57
kubernetes/cloud/amazon/terraform/variables.tf
Normal file
@ -0,0 +1,57 @@
|
||||
variable "region" {
|
||||
default = "ap-southeast-2"
|
||||
description = "AWS region"
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
default = "getting-started-eks"
|
||||
}
|
||||
|
||||
variable "map_accounts" {
|
||||
description = "Additional AWS account numbers to add to the aws-auth configmap."
|
||||
type = list(string)
|
||||
|
||||
default = [
|
||||
"777777777777",
|
||||
"888888888888",
|
||||
]
|
||||
}
|
||||
|
||||
variable "map_roles" {
|
||||
description = "Additional IAM roles to add to the aws-auth configmap."
|
||||
type = list(object({
|
||||
rolearn = string
|
||||
username = string
|
||||
groups = list(string)
|
||||
}))
|
||||
|
||||
default = [
|
||||
{
|
||||
rolearn = "arn:aws:iam::66666666666:role/role1"
|
||||
username = "role1"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "map_users" {
|
||||
description = "Additional IAM users to add to the aws-auth configmap."
|
||||
type = list(object({
|
||||
userarn = string
|
||||
username = string
|
||||
groups = list(string)
|
||||
}))
|
||||
|
||||
default = [
|
||||
{
|
||||
userarn = "arn:aws:iam::66666666666:user/user1"
|
||||
username = "user1"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
{
|
||||
userarn = "arn:aws:iam::66666666666:user/user2"
|
||||
username = "user2"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
]
|
||||
}
|
@ -70,7 +70,7 @@ cp ~/.ssh/id_rsa* .
|
||||
az aks create -n aks-getting-started \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--location australiaeast \
|
||||
--kubernetes-version 1.16.9 \
|
||||
--kubernetes-version 1.16.10 \
|
||||
--load-balancer-sku standard \
|
||||
--nodepool-name default \
|
||||
--node-count 1 \
|
||||
|
28
kubernetes/cloud/azure/terraform/main.tf
Normal file
28
kubernetes/cloud/azure/terraform/main.tf
Normal file
@ -0,0 +1,28 @@
|
||||
provider "azurerm" {
|
||||
version = "=2.5.0"
|
||||
|
||||
subscription_id = var.subscription_id
|
||||
client_id = var.serviceprinciple_id
|
||||
client_secret = var.serviceprinciple_key
|
||||
tenant_id = var.tenant_id
|
||||
|
||||
features {}
|
||||
}
|
||||
|
||||
module "cluster" {
|
||||
source = "./modules/cluster/"
|
||||
serviceprinciple_id = var.serviceprinciple_id
|
||||
serviceprinciple_key = var.serviceprinciple_key
|
||||
ssh_key = var.ssh_key
|
||||
location = var.location
|
||||
kubernetes_version = var.kubernetes_version
|
||||
|
||||
}
|
||||
|
||||
module "k8s" {
|
||||
source = "./modules/k8s/"
|
||||
host = "${module.cluster.host}"
|
||||
client_certificate = "${base64decode(module.cluster.client_certificate)}"
|
||||
client_key = "${base64decode(module.cluster.client_key)}"
|
||||
cluster_ca_certificate= "${base64decode(module.cluster.cluster_ca_certificate)}"
|
||||
}
|
72
kubernetes/cloud/azure/terraform/modules/cluster/cluster.tf
Normal file
72
kubernetes/cloud/azure/terraform/modules/cluster/cluster.tf
Normal file
@ -0,0 +1,72 @@
|
||||
resource "azurerm_resource_group" "aks-getting-started" {
|
||||
name = "aks-getting-started"
|
||||
location = var.location
|
||||
}
|
||||
|
||||
resource "azurerm_kubernetes_cluster" "aks-getting-started" {
|
||||
name = "aks-getting-started"
|
||||
location = azurerm_resource_group.aks-getting-started.location
|
||||
resource_group_name = azurerm_resource_group.aks-getting-started.name
|
||||
dns_prefix = "aks-getting-started"
|
||||
kubernetes_version = var.kubernetes_version
|
||||
|
||||
default_node_pool {
|
||||
name = "default"
|
||||
node_count = 1
|
||||
vm_size = "Standard_E4s_v3"
|
||||
type = "VirtualMachineScaleSets"
|
||||
os_disk_size_gb = 250
|
||||
}
|
||||
|
||||
service_principal {
|
||||
client_id = var.serviceprinciple_id
|
||||
client_secret = var.serviceprinciple_key
|
||||
}
|
||||
|
||||
linux_profile {
|
||||
admin_username = "azureuser"
|
||||
ssh_key {
|
||||
key_data = var.ssh_key
|
||||
}
|
||||
}
|
||||
|
||||
network_profile {
|
||||
network_plugin = "kubenet"
|
||||
load_balancer_sku = "Standard"
|
||||
}
|
||||
|
||||
addon_profile {
|
||||
aci_connector_linux {
|
||||
enabled = false
|
||||
}
|
||||
|
||||
azure_policy {
|
||||
enabled = false
|
||||
}
|
||||
|
||||
http_application_routing {
|
||||
enabled = false
|
||||
}
|
||||
|
||||
kube_dashboard {
|
||||
enabled = false
|
||||
}
|
||||
|
||||
oms_agent {
|
||||
enabled = false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
resource "azurerm_kubernetes_cluster_node_pool" "monitoring" {
|
||||
name = "monitoring"
|
||||
kubernetes_cluster_id = azurerm_kubernetes_cluster.aks-getting-started.id
|
||||
vm_size = "Standard_DS2_v2"
|
||||
node_count = 1
|
||||
os_disk_size_gb = 250
|
||||
os_type = "Linux"
|
||||
}
|
||||
|
||||
*/
|
19
kubernetes/cloud/azure/terraform/modules/cluster/outputs.tf
Normal file
19
kubernetes/cloud/azure/terraform/modules/cluster/outputs.tf
Normal file
@ -0,0 +1,19 @@
|
||||
output "kube_config" {
|
||||
value = azurerm_kubernetes_cluster.aks-getting-started.kube_config_raw
|
||||
}
|
||||
|
||||
output "cluster_ca_certificate" {
|
||||
value = azurerm_kubernetes_cluster.aks-getting-started.kube_config.0.cluster_ca_certificate
|
||||
}
|
||||
|
||||
output "client_certificate" {
|
||||
value = azurerm_kubernetes_cluster.aks-getting-started.kube_config.0.client_certificate
|
||||
}
|
||||
|
||||
output "client_key" {
|
||||
value = azurerm_kubernetes_cluster.aks-getting-started.kube_config.0.client_key
|
||||
}
|
||||
|
||||
output "host" {
|
||||
value = azurerm_kubernetes_cluster.aks-getting-started.kube_config.0.host
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
variable "serviceprinciple_id" {
|
||||
}
|
||||
|
||||
variable "serviceprinciple_key" {
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
default = "australiaeast"
|
||||
}
|
||||
|
||||
variable "kubernetes_version" {
|
||||
default = "1.16.10"
|
||||
}
|
||||
|
||||
variable "ssh_key" {
|
||||
}
|
86
kubernetes/cloud/azure/terraform/modules/k8s/k8s.tf
Normal file
86
kubernetes/cloud/azure/terraform/modules/k8s/k8s.tf
Normal file
@ -0,0 +1,86 @@
|
||||
|
||||
provider "kubernetes" {
|
||||
load_config_file = "false"
|
||||
host = var.host
|
||||
client_certificate = var.client_certificate
|
||||
client_key = var.client_key
|
||||
cluster_ca_certificate = var.cluster_ca_certificate
|
||||
}
|
||||
|
||||
|
||||
resource "kubernetes_deployment" "example" {
|
||||
metadata {
|
||||
name = "terraform-example"
|
||||
labels = {
|
||||
test = "MyExampleApp"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
replicas = 3
|
||||
|
||||
selector {
|
||||
match_labels = {
|
||||
test = "MyExampleApp"
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
test = "MyExampleApp"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
container {
|
||||
image = "nginx:1.7.8"
|
||||
name = "example"
|
||||
|
||||
resources {
|
||||
limits {
|
||||
cpu = "0.5"
|
||||
memory = "512Mi"
|
||||
}
|
||||
requests {
|
||||
cpu = "250m"
|
||||
memory = "50Mi"
|
||||
}
|
||||
}
|
||||
|
||||
liveness_probe {
|
||||
http_get {
|
||||
path = "/nginx_status"
|
||||
port = 80
|
||||
|
||||
http_header {
|
||||
name = "X-Custom-Header"
|
||||
value = "Awesome"
|
||||
}
|
||||
}
|
||||
|
||||
initial_delay_seconds = 3
|
||||
period_seconds = 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "example" {
|
||||
metadata {
|
||||
name = "terraform-example"
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
test = "MyExampleApp"
|
||||
}
|
||||
port {
|
||||
port = 80
|
||||
target_port = 80
|
||||
}
|
||||
|
||||
type = "LoadBalancer"
|
||||
}
|
||||
}
|
11
kubernetes/cloud/azure/terraform/modules/k8s/variables.tf
Normal file
11
kubernetes/cloud/azure/terraform/modules/k8s/variables.tf
Normal file
@ -0,0 +1,11 @@
|
||||
variable "host" {
|
||||
}
|
||||
|
||||
variable "client_certificate" {
|
||||
}
|
||||
|
||||
variable "client_key" {
|
||||
}
|
||||
|
||||
variable "cluster_ca_certificate" {
|
||||
}
|
125
kubernetes/cloud/azure/terraform/readme.md
Normal file
125
kubernetes/cloud/azure/terraform/readme.md
Normal file
@ -0,0 +1,125 @@
|
||||
# Getting Started with AKS using Terraform
|
||||
|
||||
More resources:
|
||||
|
||||
Terraform provider for Azure [here](https://github.com/terraform-providers/terraform-provider-azurerm) <br/>
|
||||
|
||||
## Azure CLI
|
||||
|
||||
You can get the Azure CLI on [Docker-Hub](https://hub.docker.com/_/microsoft-azure-cli) <br/>
|
||||
We'll need the Azure CLI to gather information so we can build our Terraform file.
|
||||
|
||||
```
|
||||
# Run Azure CLI
|
||||
docker run -it --rm -v ${PWD}:/work -w /work --entrypoint /bin/sh mcr.microsoft.com/azure-cli:2.6.0
|
||||
|
||||
```
|
||||
|
||||
## Login to Azure
|
||||
|
||||
```
|
||||
#login and follow prompts
|
||||
az login
|
||||
TENTANT_ID=<your-tenant-id>
|
||||
|
||||
# view and select your subscription account
|
||||
|
||||
az account list -o table
|
||||
SUBSCRIPTION=<id>
|
||||
az account set --subscription $SUBSCRIPTION
|
||||
|
||||
```
|
||||
|
||||
|
||||
## Create Service Principal
|
||||
|
||||
Kubernetes needs a service account to manage our Kubernetes cluster </br>
|
||||
Lets create one! </br>
|
||||
|
||||
```
|
||||
|
||||
SERVICE_PRINCIPAL_JSON=$(az ad sp create-for-rbac --skip-assignment --name aks-getting-started-sp -o json)
|
||||
|
||||
# Keep the `appId` and `password` for later use!
|
||||
|
||||
SERVICE_PRINCIPAL=$(echo $SERVICE_PRINCIPAL_JSON | jq -r '.appId')
|
||||
SERVICE_PRINCIPAL_SECRET=$(echo $SERVICE_PRINCIPAL_JSON | jq -r '.password')
|
||||
|
||||
#note: reset the credential if you have any sinlge or double quote on password
|
||||
az ad sp credential reset --name "aks-getting-started-sp"
|
||||
|
||||
# Grant contributor role over the subscription to our service principal
|
||||
|
||||
az role assignment create --assignee $SERVICE_PRINCIPAL \
|
||||
--scope "/subscriptions/$SUBSCRIPTION" \
|
||||
--role Contributor
|
||||
|
||||
```
|
||||
For extra reference you can also take a look at the Microsoft Docs: [here](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/aks/kubernetes-service-principal.md) </br>
|
||||
|
||||
|
||||
# Terraform CLI
|
||||
```
|
||||
# Get Terraform
|
||||
|
||||
curl -o /tmp/terraform.zip -LO https://releases.hashicorp.com/terraform/0.12.28/terraform_0.12.28_linux_amd64.zip
|
||||
|
||||
unzip /tmp/terraform.zip
|
||||
chmod +x terraform && mv terraform /usr/local/bin/
|
||||
|
||||
cd kubernetes/cloud/azure/terraform/
|
||||
|
||||
```
|
||||
|
||||
# Generate SSH key
|
||||
|
||||
```
|
||||
ssh-keygen -t rsa -b 4096 -N "VeryStrongSecret123!" -C "your_email@example.com" -q -f ~/.ssh/id_rsa
|
||||
SSH_KEY=$(cat ~/.ssh/id_rsa.pub)
|
||||
```
|
||||
|
||||
## Terraform Azure Kubernetes Provider
|
||||
|
||||
Documentation on all the Kubernetes fields for terraform [here](https://www.terraform.io/docs/providers/azurerm/r/kubernetes_cluster.html)
|
||||
|
||||
```
|
||||
terraform init
|
||||
|
||||
terraform plan -var serviceprinciple_id=$SERVICE_PRINCIPAL \
|
||||
-var serviceprinciple_key="$SERVICE_PRINCIPAL_SECRET" \
|
||||
-var tenant_id=$TENTANT_ID \
|
||||
-var subscription_id=$SUBSCRIPTION \
|
||||
-var ssh_key="$SSH_KEY"
|
||||
|
||||
terraform apply -var serviceprinciple_id=$SERVICE_PRINCIPAL \
|
||||
-var serviceprinciple_key="$SERVICE_PRINCIPAL_SECRET" \
|
||||
-var tenant_id=$TENTANT_ID \
|
||||
-var subscription_id=$SUBSCRIPTION \
|
||||
-var ssh_key="$SSH_KEY"
|
||||
```
|
||||
|
||||
# Lets see what we deployed
|
||||
|
||||
```
|
||||
# grab our AKS config
|
||||
az aks get-credentials -n aks-getting-started -g aks-getting-started
|
||||
|
||||
# Get kubectl
|
||||
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
|
||||
kubectl get svc
|
||||
|
||||
```
|
||||
|
||||
# Clean up
|
||||
|
||||
```
|
||||
terraform destroy -var serviceprinciple_id=$SERVICE_PRINCIPAL \
|
||||
-var serviceprinciple_key="$SERVICE_PRINCIPAL_SECRET" \
|
||||
-var tenant_id=$TENTANT_ID \
|
||||
-var subscription_id=$SUBSCRIPTION \
|
||||
-var ssh_key="$SSH_KEY"
|
||||
```
|
23
kubernetes/cloud/azure/terraform/variables.tf
Normal file
23
kubernetes/cloud/azure/terraform/variables.tf
Normal file
@ -0,0 +1,23 @@
|
||||
variable "serviceprinciple_id" {
|
||||
}
|
||||
|
||||
variable "serviceprinciple_key" {
|
||||
}
|
||||
|
||||
variable "tenant_id" {
|
||||
}
|
||||
|
||||
variable "subscription_id" {
|
||||
}
|
||||
|
||||
|
||||
variable "ssh_key" {
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
default = "australiaeast"
|
||||
}
|
||||
|
||||
variable "kubernetes_version" {
|
||||
default = "1.16.10"
|
||||
}
|
@ -1,53 +1,59 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-deploy
|
||||
labels:
|
||||
app: example-app
|
||||
annotations:
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example-app
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example-app
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
image: aimvector/python:1.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /status
|
||||
# port: 5000
|
||||
# initialDelaySeconds: 3
|
||||
# periodSeconds: 3
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "500m"
|
||||
volumeMounts:
|
||||
- name: secret-volume
|
||||
mountPath: /secrets/
|
||||
- name: config-volume
|
||||
mountPath: /configs/
|
||||
volumes:
|
||||
- name: secret-volume
|
||||
secret:
|
||||
secretName: mysecret
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: example-config #name of our configmap object
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-deploy
|
||||
labels:
|
||||
app: example-app
|
||||
test: test
|
||||
annotations:
|
||||
fluxcd.io/tag.example-app: semver:~1.0
|
||||
fluxcd.io/automated: 'true'
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example-app
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example-app
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
image: aimvector/python:1.0.4
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /status
|
||||
# port: 5000
|
||||
# initialDelaySeconds: 3
|
||||
# periodSeconds: 3
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "500m"
|
||||
#NOTE: comment out `volumeMounts` section for configmap and\or secret guide
|
||||
# volumeMounts:
|
||||
# - name: secret-volume
|
||||
# mountPath: /secrets/
|
||||
# - name: config-volume
|
||||
# mountPath: /configs/
|
||||
#NOTE: comment out `volumes` section for configmap and\or secret guide
|
||||
# volumes:
|
||||
# - name: secret-volume
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# - name: config-volume
|
||||
# configMap:
|
||||
# name: example-config #name of our configmap object
|
||||
|
10
kubernetes/kustomize/application/configmap.yaml
Normal file
10
kubernetes/kustomize/application/configmap.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
# apiVersion: v1
|
||||
# kind: ConfigMap
|
||||
# metadata:
|
||||
# name: example-config
|
||||
# namespace: example
|
||||
# data:
|
||||
# config.json: |
|
||||
# {
|
||||
# "environment" : "dev"
|
||||
# }
|
36
kubernetes/kustomize/application/deployment.yaml
Normal file
36
kubernetes/kustomize/application/deployment.yaml
Normal file
@ -0,0 +1,36 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-deploy
|
||||
namespace: example
|
||||
labels:
|
||||
app: example-app
|
||||
annotations:
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example-app
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example-app
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
image: aimvector/python:1.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /configs/
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: example-config
|
5
kubernetes/kustomize/application/kustomization.yaml
Normal file
5
kubernetes/kustomize/application/kustomization.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- deployment.yaml
|
||||
- service.yaml
|
||||
- configmap.yaml
|
4
kubernetes/kustomize/application/namespace.yaml
Normal file
4
kubernetes/kustomize/application/namespace.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: example
|
16
kubernetes/kustomize/application/service.yaml
Normal file
16
kubernetes/kustomize/application/service.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: example-service
|
||||
namespace: example
|
||||
labels:
|
||||
app: example-app
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
app: example-app
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: 5000
|
@ -0,0 +1,4 @@
|
||||
bases:
|
||||
- ../../application
|
||||
patches:
|
||||
- replica_count.yaml
|
@ -0,0 +1,6 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-deploy
|
||||
spec:
|
||||
replicas: 4
|
@ -0,0 +1,4 @@
|
||||
{
|
||||
"environment" : "prod",
|
||||
"hello": "world"
|
||||
}
|
12
kubernetes/kustomize/environments/production/env.yaml
Normal file
12
kubernetes/kustomize/environments/production/env.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-deploy
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
env:
|
||||
- name: ENVIRONMENT
|
||||
value: Production
|
@ -0,0 +1,16 @@
|
||||
bases:
|
||||
- ../../application
|
||||
patches:
|
||||
- replica_count.yaml
|
||||
- resource_limits.yaml
|
||||
configMapGenerator:
|
||||
- name: example-config
|
||||
namespace: example
|
||||
#behavior: replace
|
||||
files:
|
||||
- configs/config.json
|
||||
patchesStrategicMerge:
|
||||
- env.yaml
|
||||
images:
|
||||
- name: aimvector/python
|
||||
newTag: 1.0.1
|
@ -0,0 +1,6 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-deploy
|
||||
spec:
|
||||
replicas: 6
|
@ -0,0 +1,16 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-deploy
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "500m"
|
40
kubernetes/kustomize/readme.md
Normal file
40
kubernetes/kustomize/readme.md
Normal file
@ -0,0 +1,40 @@
|
||||
|
||||
# The Basics
|
||||
```
|
||||
|
||||
kubectl apply -f kubernetes/kustomize/application/namespace.yaml
|
||||
kubectl apply -f kubernetes/kustomize/application/configmap.yaml
|
||||
kubectl apply -f kubernetes/kustomize/application/deployment.yaml
|
||||
kubectl apply -f kubernetes/kustomize/application/service.yaml
|
||||
|
||||
# OR
|
||||
|
||||
kubectl apply -f kubernetes/kustomize/application/
|
||||
|
||||
kubectl delete ns example
|
||||
|
||||
```
|
||||
|
||||
# Kustomize
|
||||
|
||||
## Build
|
||||
```
|
||||
kubectl kustomize .\kubernetes\kustomize\ | kubectl apply -f -
|
||||
# OR
|
||||
kubectl apply -k .\kubernetes\kustomize\
|
||||
|
||||
kubectl delete ns example
|
||||
```
|
||||
|
||||
## Overlays
|
||||
|
||||
```
|
||||
kubectl kustomize .\kubernetes\kustomize\environments\production | kubectl apply -f -
|
||||
# OR
|
||||
kubectl apply -k .\kubernetes\kustomize\environments\production
|
||||
|
||||
kubectl delete ns example
|
||||
```
|
||||
|
||||
|
||||
|
16
kubernetes/persistentvolume/persistentvolume.yaml
Normal file
16
kubernetes/persistentvolume/persistentvolume.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: example-volume
|
||||
labels:
|
||||
type: local
|
||||
spec:
|
||||
#we use local node storage here!
|
||||
#kubectl get storageclass
|
||||
storageClassName: hostpath
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
hostPath:
|
||||
path: "/mnt/data"
|
11
kubernetes/persistentvolume/persistentvolumeclaim.yaml
Normal file
11
kubernetes/persistentvolume/persistentvolumeclaim.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: example-claim
|
||||
spec:
|
||||
storageClassName: hostpath
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Mi
|
50
kubernetes/persistentvolume/postgres-no-pv.yaml
Normal file
50
kubernetes/persistentvolume/postgres-no-pv.yaml
Normal file
@ -0,0 +1,50 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: postgres-config
|
||||
labels:
|
||||
app: postgres
|
||||
data:
|
||||
POSTGRES_DB: postgresdb
|
||||
POSTGRES_USER: admin
|
||||
POSTGRES_PASSWORD: admin123
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
serviceName: postgres
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:10.4
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: postgres-config
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: postgres
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
selector:
|
||||
app: postgres
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 5432
|
||||
targetPort: 5432
|
57
kubernetes/persistentvolume/postgres-with-pv.yaml
Normal file
57
kubernetes/persistentvolume/postgres-with-pv.yaml
Normal file
@ -0,0 +1,57 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: postgres-config
|
||||
labels:
|
||||
app: postgres
|
||||
data:
|
||||
POSTGRES_DB: postgresdb
|
||||
POSTGRES_USER: admin
|
||||
POSTGRES_PASSWORD: admin123
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
serviceName: postgres
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:10.4
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: postgres-config
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /var/lib/postgresql/data
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: example-claim
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: postgres
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
selector:
|
||||
app: postgres
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 5432
|
||||
targetPort: 5432
|
77
kubernetes/persistentvolume/readme.md
Normal file
77
kubernetes/persistentvolume/readme.md
Normal file
@ -0,0 +1,77 @@
|
||||
# Persistent Volumes Demo
|
||||
|
||||
## Container Storage
|
||||
|
||||
By default containers store their data on the file system like any other process.
|
||||
Container file system is temporary and not persistent during container restarts
|
||||
When container is recreated, so is the file system
|
||||
|
||||
|
||||
|
||||
```
|
||||
# run postgres
|
||||
docker run -d --rm -e POSTGRES_DB=postgresdb -e POSTGRES_USER=admin -e POSTGRES_PASSWORD=admin123 postgres:10.4
|
||||
|
||||
# enter the container
|
||||
docker exec -it <container-id> bash
|
||||
|
||||
# login to postgres
|
||||
psql --username=admin postgresdb
|
||||
|
||||
#create a table
|
||||
CREATE TABLE COMPANY(
|
||||
ID INT PRIMARY KEY NOT NULL,
|
||||
NAME TEXT NOT NULL,
|
||||
AGE INT NOT NULL,
|
||||
ADDRESS CHAR(50),
|
||||
SALARY REAL
|
||||
);
|
||||
|
||||
#show table
|
||||
\dt
|
||||
|
||||
# quit
|
||||
\q
|
||||
```
|
||||
|
||||
Restarting the above container and going back in you will notice `\dt` commands returning no tables.
|
||||
Since data is lost.
|
||||
|
||||
Same can be demonstrated using Kubernetes
|
||||
|
||||
```
|
||||
cd .\kubernetes\persistentvolume\
|
||||
|
||||
kubectl create ns postgres
|
||||
kubectl apply -n postgres -f ./postgres-no-pv.yaml
|
||||
kubectl -n postgres get pods
|
||||
kubectl -n postgres exec -it postgres-0 bash
|
||||
|
||||
# run the same above mentioned commands to create and list the database table
|
||||
|
||||
kubectl delete po -n postgres postgres-0
|
||||
|
||||
# exec back in and confirm table does not exist.
|
||||
```
|
||||
|
||||
# Persist data Docker
|
||||
|
||||
```
|
||||
docker volume create postges
|
||||
docker run -d --rm -v postges:/var/lib/postgresql/data -e POSTGRES_DB=postgresdb -e POSTGRES_USER=admin -e POSTGRES_PASSWORD=admin123 postgres:10.4
|
||||
|
||||
# run the same tests as above and notice
|
||||
```
|
||||
|
||||
# Persist data Kubernetes
|
||||
|
||||
|
||||
```
|
||||
kubectl apply -f persistentvolume.yaml
|
||||
kubectl apply -n postgres -f persistentvolumeclaim.yaml
|
||||
|
||||
kubectl apply -n postgres -f postgres-with-pv.yaml
|
||||
|
||||
kubectl -n postgres get pods
|
||||
|
||||
```
|
@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: nginx-ingress-clusterrole-nisa-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: nginx-ingress-clusterrole
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nginx-ingress-serviceaccount
|
||||
namespace: ingress-nginx
|
@ -0,0 +1,53 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: nginx-ingress-clusterrole
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
@ -0,0 +1,35 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nginx-configuration
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
data:
|
||||
client-body-buffer-size: 60k
|
||||
client-header-buffer-size: 16k
|
||||
error-log-level: warn
|
||||
http2-max-field-size: 16k
|
||||
large-client-header-buffers: 4 16k
|
||||
log-format-escape-json: "true"
|
||||
log-format-upstream: '{"time":"$time_iso8601","remote_addr":"$remote_addr","proxy_protocol_addr":"$proxy_protocol_addr","proxy_protocol_port":"$proxy_protocol_port","x_forward_for":"$proxy_add_x_forwarded_for","remote_user":"$remote_user","host":"$host","request_method":"$request_method","request_uri":"$request_uri","server_protocol":"$server_protocol","status":$status,"request_time":$request_time,"request_length":$request_length,"bytes_sent":$bytes_sent,"upstream_name":"$proxy_upstream_name","upstream_addr":"$upstream_addr","upstream_uri":"$uri","upstream_response_length":$upstream_response_length,"upstream_response_time":$upstream_response_time,"upstream_status":$upstream_status,"http_referrer":"$http_referer","http_user_agent":"$http_user_agent","http_cookie":"$http_cookie"}'
|
||||
location-snippet: "include /etc/nginx/custom-snippets/location-custom.conf;"
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tcp-services
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: udp-services
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
@ -0,0 +1,37 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: custom-snippets
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
location-custom.conf: |
|
||||
set $globalcors "";
|
||||
|
||||
if ($http_origin ~* '^(http|https)?://(localhost(:[0-9]+)?)$') {
|
||||
set $globalcors "true";
|
||||
}
|
||||
|
||||
if ($request_method = 'OPTIONS') {
|
||||
set $globalcors "${globalcors}options";
|
||||
}
|
||||
|
||||
if ($globalcors = "trueoptions") {
|
||||
add_header 'Access-Control-Allow-Origin' "$http_origin";
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, HEAD, PUT, DELETE, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'X-CSRF-Token,Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With';
|
||||
add_header 'Access-Control-Max-Age' 1728000;
|
||||
add_header 'Content-Type' 'text/plain charset=UTF-8';
|
||||
add_header 'Content-Length' 0;
|
||||
|
||||
return 204;
|
||||
}
|
||||
|
||||
if ($request_method ~* "(GET|POST|HEAD)") {
|
||||
add_header "Access-Control-Allow-Origin" "$http_origin";
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
add_header 'Access-Control-Allow-Headers' 'X-CSRF-Token,Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With';
|
||||
add_header 'X-Frame-Options' 'sameorigin';
|
||||
}
|
||||
|
||||
---
|
@ -0,0 +1,77 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx-ingress
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx-ingress
|
||||
annotations:
|
||||
prometheus.io/port: "10254"
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
serviceAccountName: nginx-ingress-serviceaccount
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --configmap=$(POD_NAMESPACE)/nginx-configuration
|
||||
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
|
||||
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
|
||||
- --publish-service=$(POD_NAMESPACE)/ingress-nginx
|
||||
- --annotations-prefix=nginx.ingress.kubernetes.io
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
runAsUser: 33
|
||||
volumeMounts:
|
||||
- name: custom-snippets
|
||||
mountPath: /etc/nginx/custom-snippets/
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 10
|
||||
volumes:
|
||||
- name: custom-snippets
|
||||
configMap:
|
||||
name: custom-snippets
|
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ingress-nginx
|
@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nginx-ingress-serviceaccount
|
||||
namespace: ingress-nginx
|
@ -0,0 +1,16 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
app: nginx-ingress
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: http
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: https
|
131
kubernetes/servicemesh/applications/playlists-api/app.go
Normal file
131
kubernetes/servicemesh/applications/playlists-api/app.go
Normal file
@ -0,0 +1,131 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"context"
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
var environment = os.Getenv("ENVIRONMENT")
|
||||
var redis_host = os.Getenv("REDIS_HOST")
|
||||
var redis_port = os.Getenv("REDIS_PORT")
|
||||
var ctx = context.Background()
|
||||
var rdb *redis.Client
|
||||
|
||||
func main() {
|
||||
|
||||
router := httprouter.New()
|
||||
|
||||
router.GET("/", func(w http.ResponseWriter, r *http.Request, p httprouter.Params){
|
||||
cors(w)
|
||||
playlistsJson := getPlaylists()
|
||||
|
||||
playlists := []playlist{}
|
||||
err := json.Unmarshal([]byte(playlistsJson), &playlists)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
//get videos for each playlist from videos api
|
||||
for pi := range playlists {
|
||||
|
||||
vs := []videos{}
|
||||
for vi := range playlists[pi].Videos {
|
||||
|
||||
v := videos{}
|
||||
videoResp, err := http.Get("http://videos-api:10010/" + playlists[pi].Videos[vi].Id)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
break
|
||||
}
|
||||
|
||||
defer videoResp.Body.Close()
|
||||
video, err := ioutil.ReadAll(videoResp.Body)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
|
||||
err = json.Unmarshal(video, &v)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
vs = append(vs, v)
|
||||
|
||||
}
|
||||
|
||||
playlists[pi].Videos = vs
|
||||
}
|
||||
|
||||
playlistsBytes, err := json.Marshal(playlists)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(playlistsBytes)
|
||||
if b, err := ioutil.ReadAll(reader); err == nil {
|
||||
fmt.Fprintf(w, "%s", string(b))
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
r := redis.NewClient(&redis.Options{
|
||||
Addr: redis_host + ":" + redis_port,
|
||||
DB: 0,
|
||||
})
|
||||
rdb = r
|
||||
|
||||
fmt.Println("Running...")
|
||||
log.Fatal(http.ListenAndServe(":10010", router))
|
||||
}
|
||||
|
||||
func getPlaylists()(response string){
|
||||
playlistData, err := rdb.Get(ctx, "playlists").Result()
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
fmt.Println("error occured retrieving playlists from Redis")
|
||||
return "[]"
|
||||
}
|
||||
|
||||
return playlistData
|
||||
}
|
||||
|
||||
type playlist struct {
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Videos []videos `json:"videos"`
|
||||
}
|
||||
|
||||
type videos struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Imageurl string `json:"imageurl"`
|
||||
Url string `json:"url"`
|
||||
|
||||
}
|
||||
|
||||
type stop struct {
|
||||
error
|
||||
}
|
||||
|
||||
func cors(writer http.ResponseWriter) () {
|
||||
if(environment == "DEBUG"){
|
||||
writer.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
|
||||
writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With, X-MY-API-Version")
|
||||
writer.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
}
|
||||
}
|
@ -0,0 +1,70 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: playlists-api
|
||||
labels:
|
||||
app: playlists-api
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: playlists-api
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: playlists-api
|
||||
spec:
|
||||
containers:
|
||||
- name: playlists-api
|
||||
image: aimvector/service-mesh:playlists-api-1.0.0
|
||||
imagePullPolicy : Always
|
||||
ports:
|
||||
- containerPort: 10010
|
||||
env:
|
||||
- name: "ENVIRONMENT"
|
||||
value: "DEBUG"
|
||||
- name: "REDIS_HOST"
|
||||
value: "playlists-db"
|
||||
- name: "REDIS_PORT"
|
||||
value: "6379"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: playlists-api
|
||||
labels:
|
||||
app: playlists-api
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: playlists-api
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: 10010
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
name: playlists-api
|
||||
spec:
|
||||
rules:
|
||||
- host: servicemesh.demo
|
||||
http:
|
||||
paths:
|
||||
- path: /api/playlists(/|$)(.*)
|
||||
backend:
|
||||
serviceName: playlists-api
|
||||
servicePort: 80
|
||||
|
||||
|
18
kubernetes/servicemesh/applications/playlists-api/dockerfile
Normal file
18
kubernetes/servicemesh/applications/playlists-api/dockerfile
Normal file
@ -0,0 +1,18 @@
|
||||
FROM golang:1.15-alpine as build
|
||||
RUN apk add --no-cache git
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
COPY go.sum /src/
|
||||
COPY go.mod /src/
|
||||
RUN go mod download
|
||||
|
||||
COPY app.go /src
|
||||
|
||||
RUN go build -o app
|
||||
|
||||
FROM alpine:3.12
|
||||
|
||||
RUN mkdir -p /app
|
||||
COPY --from=build /src/app /app/app
|
||||
CMD ["./app/app"]
|
9
kubernetes/servicemesh/applications/playlists-api/go.mod
Normal file
9
kubernetes/servicemesh/applications/playlists-api/go.mod
Normal file
@ -0,0 +1,9 @@
|
||||
module example.com/playlists-api
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/go-redis/redis/v8 v8.0.0-beta.7 // indirect
|
||||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||
github.com/sirupsen/logrus v1.6.0 // indirect
|
||||
)
|
130
kubernetes/servicemesh/applications/playlists-api/go.sum
Normal file
130
kubernetes/servicemesh/applications/playlists-api/go.sum
Normal file
@ -0,0 +1,130 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200624174652-8d2f3be8b2d9 h1:h2Ul3Ym2iVZWMQGYmulVUJ4LSkBm1erp9mUkPwtMoLg=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200624174652-8d2f3be8b2d9/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-redis/redis v6.15.8+incompatible h1:BKZuG6mCnRj5AOaWJXoCgf6rqTYnYJLe4en2hxT7r9o=
|
||||
github.com/go-redis/redis/v8 v8.0.0-beta.7 h1:4HiY+qfsyz8OUr9zyAP2T1CJ0SFRY4mKFvm9TEznuv8=
|
||||
github.com/go-redis/redis/v8 v8.0.0-beta.7/go.mod h1:FGJAWDWFht1sQ4qxyJHZZbVyvnVcKQN0E3u5/5lRz+g=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
go.opentelemetry.io/otel v0.7.0 h1:u43jukpwqR8EsyeJOMgrsUgZwVI1e1eVw7yuzRkD1l0=
|
||||
go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc=
|
||||
golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
@ -0,0 +1,36 @@
|
||||
*2
|
||||
$6
|
||||
SELECT
|
||||
$1
|
||||
0
|
||||
*3
|
||||
$3
|
||||
set
|
||||
$9
|
||||
playlists
|
||||
$515
|
||||
|
||||
[
|
||||
{
|
||||
"id" : "1",
|
||||
"name" : "CI/CD",
|
||||
"videos": [ { "id" : "OFgziggbCOg"}, { "id" : "myCcJJ_Fk10"}, { "id" : "2WSJF7d8dUg"}]
|
||||
},
|
||||
{
|
||||
"id" : "2",
|
||||
"name" : "K8s in the Cloud",
|
||||
"videos": [ { "id" : "QThadS3Soig"}, { "id" : "eyvLwK5C2dw"}]
|
||||
},
|
||||
{
|
||||
"id" : "3",
|
||||
"name" : "Storage and MessageBrokers",
|
||||
"videos": [ { "id" : "JmCn7k0PlV4"}, { "id" : "_lpDfMkxccc"}]
|
||||
|
||||
},
|
||||
{
|
||||
"id" : "4",
|
||||
"name" : "K8s Autoscaling",
|
||||
"videos": [ { "id" : "jM36M39MA3I"}, { "id" : "FfDI08sgrYY"}]
|
||||
}
|
||||
]
|
||||
|
BIN
kubernetes/servicemesh/applications/playlists-db/configmap.yaml
Normal file
BIN
kubernetes/servicemesh/applications/playlists-db/configmap.yaml
Normal file
Binary file not shown.
59
kubernetes/servicemesh/applications/playlists-db/deploy.yaml
Normal file
59
kubernetes/servicemesh/applications/playlists-db/deploy.yaml
Normal file
@ -0,0 +1,59 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: playlists-db
|
||||
labels:
|
||||
app: playlists-db
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: playlists-db
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: playlists-db
|
||||
spec:
|
||||
initContainers:
|
||||
- name: data
|
||||
image: busybox
|
||||
command: [ "sh", "-c" ]
|
||||
args:
|
||||
- |
|
||||
cp /config/appendonly.aof /tmp/appendonly.aof
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /tmp/
|
||||
- name: config
|
||||
mountPath: /config/
|
||||
containers:
|
||||
- name: playlists-db
|
||||
image: redis:6.0-alpine
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /tmp/
|
||||
command: [ "redis-server"]
|
||||
args: ["--dir", "/tmp", "--appendonly", "yes"]
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
- name: config
|
||||
configMap:
|
||||
name: playlists-db
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: playlists-db
|
||||
labels:
|
||||
app: playlists-db
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: playlists-db
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 6379
|
||||
targetPort: 6379
|
85
kubernetes/servicemesh/applications/videos-api/app.go
Normal file
85
kubernetes/servicemesh/applications/videos-api/app.go
Normal file
@ -0,0 +1,85 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"fmt"
|
||||
"context"
|
||||
"os"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
var environment = os.Getenv("ENVIRONMENT")
|
||||
var redis_host = os.Getenv("REDIS_HOST")
|
||||
var redis_port = os.Getenv("REDIS_PORT")
|
||||
var flaky = os.Getenv("FLAKY")
|
||||
|
||||
var ctx = context.Background()
|
||||
var rdb *redis.Client
|
||||
|
||||
func main() {
|
||||
|
||||
router := httprouter.New()
|
||||
|
||||
router.GET("/:id", func(w http.ResponseWriter, r *http.Request, p httprouter.Params){
|
||||
|
||||
if flaky == "true"{
|
||||
if rand.Intn(90) < 30 {
|
||||
panic("flaky error occurred ")
|
||||
}
|
||||
}
|
||||
|
||||
video := video(w,r,p)
|
||||
|
||||
cors(w)
|
||||
fmt.Fprintf(w, "%s", video)
|
||||
})
|
||||
|
||||
r := redis.NewClient(&redis.Options{
|
||||
Addr: redis_host + ":" + redis_port,
|
||||
DB: 0,
|
||||
})
|
||||
rdb = r
|
||||
|
||||
fmt.Println("Running...")
|
||||
log.Fatal(http.ListenAndServe(":10010", router))
|
||||
}
|
||||
|
||||
func video(writer http.ResponseWriter, request *http.Request, p httprouter.Params)(response string){
|
||||
|
||||
id := p.ByName("id")
|
||||
fmt.Print(id)
|
||||
|
||||
videoData, err := rdb.Get(ctx, id).Result()
|
||||
if err == redis.Nil {
|
||||
return "{}"
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
return videoData
|
||||
}
|
||||
}
|
||||
|
||||
type stop struct {
|
||||
error
|
||||
}
|
||||
|
||||
func cors(writer http.ResponseWriter) () {
|
||||
if(environment == "DEBUG"){
|
||||
writer.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
|
||||
writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With, X-MY-API-Version")
|
||||
writer.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
}
|
||||
}
|
||||
|
||||
type videos struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Imageurl string `json:"imageurl"`
|
||||
Url string `json:"url"`
|
||||
|
||||
}
|
55
kubernetes/servicemesh/applications/videos-api/deploy.yaml
Normal file
55
kubernetes/servicemesh/applications/videos-api/deploy.yaml
Normal file
@ -0,0 +1,55 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: videos-api
|
||||
labels:
|
||||
app: videos-api
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: videos-api
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: videos-api
|
||||
spec:
|
||||
containers:
|
||||
- name: videos-api
|
||||
image: aimvector/service-mesh:videos-api-1.0.0
|
||||
imagePullPolicy : Always
|
||||
ports:
|
||||
- containerPort: 10010
|
||||
env:
|
||||
- name: "ENVIRONMENT"
|
||||
value: "DEBUG"
|
||||
- name: "REDIS_HOST"
|
||||
value: "videos-db"
|
||||
- name: "REDIS_PORT"
|
||||
value: "6379"
|
||||
- name: "FLAKY"
|
||||
value: "false"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: videos-api
|
||||
labels:
|
||||
app: videos-api
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: videos-api
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 10010
|
||||
targetPort: 10010
|
||||
---
|
||||
|
||||
|
18
kubernetes/servicemesh/applications/videos-api/dockerfile
Normal file
18
kubernetes/servicemesh/applications/videos-api/dockerfile
Normal file
@ -0,0 +1,18 @@
|
||||
FROM golang:1.15-alpine as build
|
||||
RUN apk add --no-cache git
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
COPY go.sum /src/
|
||||
COPY go.mod /src/
|
||||
RUN go mod download
|
||||
|
||||
COPY app.go /src
|
||||
|
||||
RUN go build -o app
|
||||
|
||||
FROM alpine:3.12
|
||||
|
||||
RUN mkdir -p /app
|
||||
COPY --from=build /src/app /app/app
|
||||
CMD ["./app/app"]
|
9
kubernetes/servicemesh/applications/videos-api/go.mod
Normal file
9
kubernetes/servicemesh/applications/videos-api/go.mod
Normal file
@ -0,0 +1,9 @@
|
||||
module example.com/playlists-api
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/go-redis/redis/v8 v8.0.0-beta.7 // indirect
|
||||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||
github.com/sirupsen/logrus v1.6.0 // indirect
|
||||
)
|
130
kubernetes/servicemesh/applications/videos-api/go.sum
Normal file
130
kubernetes/servicemesh/applications/videos-api/go.sum
Normal file
@ -0,0 +1,130 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200624174652-8d2f3be8b2d9 h1:h2Ul3Ym2iVZWMQGYmulVUJ4LSkBm1erp9mUkPwtMoLg=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200624174652-8d2f3be8b2d9/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-redis/redis v6.15.8+incompatible h1:BKZuG6mCnRj5AOaWJXoCgf6rqTYnYJLe4en2hxT7r9o=
|
||||
github.com/go-redis/redis/v8 v8.0.0-beta.7 h1:4HiY+qfsyz8OUr9zyAP2T1CJ0SFRY4mKFvm9TEznuv8=
|
||||
github.com/go-redis/redis/v8 v8.0.0-beta.7/go.mod h1:FGJAWDWFht1sQ4qxyJHZZbVyvnVcKQN0E3u5/5lRz+g=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
go.opentelemetry.io/otel v0.7.0 h1:u43jukpwqR8EsyeJOMgrsUgZwVI1e1eVw7yuzRkD1l0=
|
||||
go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc=
|
||||
golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
130
kubernetes/servicemesh/applications/videos-db/appendonly.aof
Normal file
130
kubernetes/servicemesh/applications/videos-db/appendonly.aof
Normal file
@ -0,0 +1,130 @@
|
||||
*2
|
||||
$6
|
||||
SELECT
|
||||
$1
|
||||
0
|
||||
*3
|
||||
$3
|
||||
set
|
||||
$11
|
||||
jM36M39MA3I
|
||||
$206
|
||||
{
|
||||
"id" : "jM36M39MA3I",
|
||||
"title" : "Kubernetes cluster autoscaling",
|
||||
"imageurl" : "https://i.ytimg.com/vi/jM36M39MA3I/sddefault.jpg",
|
||||
"url" : "https://youtu.be/jM36M39MA3I",
|
||||
"description" : ""
|
||||
}
|
||||
|
||||
*3
|
||||
$3
|
||||
set
|
||||
$11
|
||||
FfDI08sgrYY
|
||||
$202
|
||||
{
|
||||
"id" : "FfDI08sgrYY",
|
||||
"title" : "Kubernetes pod autoscaling",
|
||||
"imageurl" : "https://i.ytimg.com/vi/FfDI08sgrYY/sddefault.jpg",
|
||||
"url" : "https://youtu.be/FfDI08sgrYY",
|
||||
"description" : ""
|
||||
}
|
||||
|
||||
*3
|
||||
$3
|
||||
set
|
||||
$11
|
||||
JmCn7k0PlV4
|
||||
$195
|
||||
{
|
||||
"id" : "JmCn7k0PlV4",
|
||||
"title" : "Redis on Kubernetes",
|
||||
"imageurl" : "https://i.ytimg.com/vi/JmCn7k0PlV4/sddefault.jpg",
|
||||
"url" : "https://youtu.be/JmCn7k0PlV4",
|
||||
"description" : ""
|
||||
}
|
||||
|
||||
*3
|
||||
$3
|
||||
set
|
||||
$11
|
||||
_lpDfMkxccc
|
||||
$198
|
||||
{
|
||||
"id" : "_lpDfMkxccc",
|
||||
"title" : "RabbitMQ on Kubernetes",
|
||||
"imageurl" : "https://i.ytimg.com/vi/_lpDfMkxccc/sddefault.jpg",
|
||||
"url" : "https://youtu.be/_lpDfMkxccc",
|
||||
"description" : ""
|
||||
}
|
||||
|
||||
*3
|
||||
$3
|
||||
set
|
||||
$11
|
||||
OFgziggbCOg
|
||||
$183
|
||||
{
|
||||
"id" : "OFgziggbCOg",
|
||||
"title" : "Flux CD",
|
||||
"imageurl" : "https://i.ytimg.com/vi/OFgziggbCOg/sddefault.jpg",
|
||||
"url" : "https://youtu.be/OFgziggbCOg",
|
||||
"description" : ""
|
||||
}
|
||||
|
||||
*3
|
||||
$3
|
||||
set
|
||||
$11
|
||||
myCcJJ_Fk10
|
||||
$184
|
||||
{
|
||||
"id" : "myCcJJ_Fk10",
|
||||
"title" : "Drone CI",
|
||||
"imageurl" : "https://i.ytimg.com/vi/myCcJJ_Fk10/sddefault.jpg",
|
||||
"url" : "https://youtu.be/myCcJJ_Fk10",
|
||||
"description" : ""
|
||||
}
|
||||
|
||||
*3
|
||||
$3
|
||||
set
|
||||
$11
|
||||
2WSJF7d8dUg
|
||||
$183
|
||||
{
|
||||
"id" : "2WSJF7d8dUg",
|
||||
"title" : "Argo CD",
|
||||
"imageurl" : "https://i.ytimg.com/vi/2WSJF7d8dUg/sddefault.jpg",
|
||||
"url" : "https://youtu.be/2WSJF7d8dUg",
|
||||
"description" : ""
|
||||
}
|
||||
|
||||
*3
|
||||
$3
|
||||
set
|
||||
$11
|
||||
QThadS3Soig
|
||||
$196
|
||||
{
|
||||
"id" : "QThadS3Soig",
|
||||
"title" : "Kubernetes on Amazon",
|
||||
"imageurl" : "https://i.ytimg.com/vi/QThadS3Soig/sddefault.jpg",
|
||||
"url" : "https://youtu.be/QThadS3Soig",
|
||||
"description" : ""
|
||||
}
|
||||
|
||||
*3
|
||||
$3
|
||||
set
|
||||
$11
|
||||
eyvLwK5C2dw
|
||||
$244
|
||||
{
|
||||
"id" : "eyvLwK5C2dw",
|
||||
"title" : "Kubernetes on Azure",
|
||||
"imageurl" : "https://i.ytimg.com/vi/eyvLwK5C2dw/mqdefault.jpg?sqp=CISC_PoF&rs=AOn4CLDo7kizrJozB0pxBhxL9JbyiW_EPw",
|
||||
"url" : "https://youtu.be/eyvLwK5C2dw",
|
||||
"description" : ""
|
||||
}
|
BIN
kubernetes/servicemesh/applications/videos-db/configmap.yaml
Normal file
BIN
kubernetes/servicemesh/applications/videos-db/configmap.yaml
Normal file
Binary file not shown.
59
kubernetes/servicemesh/applications/videos-db/deploy.yaml
Normal file
59
kubernetes/servicemesh/applications/videos-db/deploy.yaml
Normal file
@ -0,0 +1,59 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: videos-db
|
||||
labels:
|
||||
app: videos-db
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: videos-db
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: videos-db
|
||||
spec:
|
||||
initContainers:
|
||||
- name: data
|
||||
image: busybox
|
||||
command: [ "sh", "-c" ]
|
||||
args:
|
||||
- |
|
||||
cp /config/appendonly.aof /tmp/appendonly.aof
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /tmp/
|
||||
- name: config
|
||||
mountPath: /config/
|
||||
containers:
|
||||
- name: videos-db
|
||||
image: redis:6.0-alpine
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /tmp/
|
||||
command: [ "redis-server"]
|
||||
args: ["--dir", "/tmp", "--appendonly", "yes"]
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
- name: config
|
||||
configMap:
|
||||
name: videos-db
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: videos-db
|
||||
labels:
|
||||
app: videos-db
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: videos-db
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 6379
|
||||
targetPort: 6379
|
12
kubernetes/servicemesh/applications/videos-web/bootstrap.min.css
vendored
Normal file
12
kubernetes/servicemesh/applications/videos-web/bootstrap.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
62
kubernetes/servicemesh/applications/videos-web/deploy.yaml
Normal file
62
kubernetes/servicemesh/applications/videos-web/deploy.yaml
Normal file
@ -0,0 +1,62 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: videos-web
|
||||
labels:
|
||||
app: videos-web
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: videos-web
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: videos-web
|
||||
spec:
|
||||
containers:
|
||||
- name: videos-web
|
||||
image: aimvector/service-mesh:videos-web-1.0.0
|
||||
imagePullPolicy : Always
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: videos-web
|
||||
labels:
|
||||
app: videos-web
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: videos-web
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: 80
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
name: videos-web
|
||||
spec:
|
||||
rules:
|
||||
- host: servicemesh.demo
|
||||
http:
|
||||
paths:
|
||||
- path: /home(/|$)(.*)
|
||||
backend:
|
||||
serviceName: videos-web
|
||||
servicePort: 80
|
||||
|
@ -0,0 +1,8 @@
|
||||
FROM nginx:1.19-alpine
|
||||
|
||||
#config
|
||||
COPY ./nginx.conf /etc/nginx/nginx.conf
|
||||
|
||||
#content
|
||||
COPY ./*.html /usr/share/nginx/html/
|
||||
COPY ./*.css /usr/share/nginx/html/
|
99
kubernetes/servicemesh/applications/videos-web/index-v2.html
Normal file
99
kubernetes/servicemesh/applications/videos-web/index-v2.html
Normal file
@ -0,0 +1,99 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="utf-8" name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<link rel="stylesheet" href="bootstrap.min.css">
|
||||
<title>Video Catalog</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
|
||||
<article>
|
||||
<nav class="navbar navbar-light bg-light">
|
||||
<span class="navbar-brand mb-0 h1">Video Catalog: V2</span>
|
||||
</nav>
|
||||
<div ng-app="videos" ng-controller="videosController" class="container">
|
||||
<div class="row">
|
||||
<div id="accordion">
|
||||
|
||||
<div id="{{ l.id }}" ng-repeat="l in playlist" class="card">
|
||||
|
||||
<div class="card-header" id="heading{{ l.id }}">
|
||||
<h5 class="mb-0" style="text-align: center;">
|
||||
<button class="btn btn-link" data-toggle="collapse" data-target="#collapse{{ l.id }}" aria-expanded="true"
|
||||
aria-controls="collapse{{ l.id }}">
|
||||
{{ l.name }}
|
||||
</button>
|
||||
</h5>
|
||||
</div>
|
||||
|
||||
<div id="collapse{{ l.id }}" class="collapse show" aria-labelledby="heading{{ l.id }}" data-parent="#accordion">
|
||||
<div class="card-body">
|
||||
|
||||
<div class="row">
|
||||
<div class="col card" ng-repeat="v in l.videos" style="width: 18rem;">
|
||||
<img class="card-img-top" src="{{ v.imageurl }}" alt="Card image cap">
|
||||
<div class="card-body">
|
||||
<h5 class="card-title">{{ v.title }}</h5>
|
||||
<p class="card-text">{{ v.description }}</p>
|
||||
<a href="{{ v.url }}" class="btn btn-primary">Watch</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</article>
|
||||
|
||||
<hr />
|
||||
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
|
||||
|
||||
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.0/js/bootstrap.min.js"
|
||||
integrity="sha384-uefMccjFJAIv6A+rW+L4AHf99KvxDjWSu1z9VI8SKNVmz4sk7buKt/6v9KI65qnm"
|
||||
crossorigin="anonymous"></script>
|
||||
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.6.9/angular.min.js"></script>
|
||||
|
||||
<script>
|
||||
|
||||
var model = {
|
||||
playlist: [],
|
||||
};
|
||||
|
||||
var playlistApiUrl = "";
|
||||
if (location.hostname == "localhost"){
|
||||
playlistApiUrl = "http://localhost:81/"
|
||||
} else {
|
||||
playlistApiUrl = "http://" + location.hostname + "/api/playlists"
|
||||
}
|
||||
|
||||
var app = angular.module('videos', []);
|
||||
app.controller('videosController', function ($scope, $http) {
|
||||
|
||||
$http.get(playlistApiUrl)
|
||||
.then(function (response) {
|
||||
|
||||
console.log(response);
|
||||
//$scope.model = model;
|
||||
for (i = 0; i < response.data.length; ++i) {
|
||||
model.playlist.push(response.data[i]);
|
||||
}
|
||||
$scope.playlist = response.data;
|
||||
});
|
||||
});
|
||||
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
96
kubernetes/servicemesh/applications/videos-web/index.html
Normal file
96
kubernetes/servicemesh/applications/videos-web/index.html
Normal file
@ -0,0 +1,96 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="utf-8" name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<link rel="stylesheet" href="bootstrap.min.css">
|
||||
<title>Video Catalog</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
|
||||
<article>
|
||||
<div ng-app="videos" ng-controller="videosController" class="container">
|
||||
<div class="row">
|
||||
<div id="accordion">
|
||||
|
||||
<div id="{{ l.id }}" ng-repeat="l in playlist" class="card">
|
||||
|
||||
<div class="card-header" id="heading{{ l.id }}">
|
||||
<h5 class="mb-0" style="text-align: center;">
|
||||
<button class="btn btn-link" data-toggle="collapse" data-target="#collapse{{ l.id }}" aria-expanded="true"
|
||||
aria-controls="collapse{{ l.id }}">
|
||||
{{ l.name }}
|
||||
</button>
|
||||
</h5>
|
||||
</div>
|
||||
|
||||
<div id="collapse{{ l.id }}" class="collapse show" aria-labelledby="heading{{ l.id }}" data-parent="#accordion">
|
||||
<div class="card-body">
|
||||
|
||||
<div class="row">
|
||||
<div class="col card" ng-repeat="v in l.videos" style="width: 18rem;">
|
||||
<img class="card-img-top" src="{{ v.imageurl }}" alt="Card image cap">
|
||||
<div class="card-body">
|
||||
<h5 class="card-title">{{ v.title }}</h5>
|
||||
<p class="card-text">{{ v.description }}</p>
|
||||
<a href="{{ v.url }}" class="btn btn-primary">Watch</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</article>
|
||||
|
||||
<hr />
|
||||
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
|
||||
|
||||
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.0/js/bootstrap.min.js"
|
||||
integrity="sha384-uefMccjFJAIv6A+rW+L4AHf99KvxDjWSu1z9VI8SKNVmz4sk7buKt/6v9KI65qnm"
|
||||
crossorigin="anonymous"></script>
|
||||
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.6.9/angular.min.js"></script>
|
||||
|
||||
<script>
|
||||
|
||||
var model = {
|
||||
playlist: [],
|
||||
};
|
||||
|
||||
var playlistApiUrl = "";
|
||||
if (location.hostname == "localhost"){
|
||||
playlistApiUrl = "http://localhost:81/"
|
||||
} else {
|
||||
playlistApiUrl = "http://" + location.hostname + "/api/playlists"
|
||||
}
|
||||
|
||||
var app = angular.module('videos', []);
|
||||
app.controller('videosController', function ($scope, $http) {
|
||||
|
||||
$http.get(playlistApiUrl)
|
||||
.then(function (response) {
|
||||
|
||||
console.log(response);
|
||||
//$scope.model = model;
|
||||
for (i = 0; i < response.data.length; ++i) {
|
||||
model.playlist.push(response.data[i]);
|
||||
}
|
||||
$scope.playlist = response.data;
|
||||
});
|
||||
});
|
||||
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
35
kubernetes/servicemesh/applications/videos-web/nginx.conf
Normal file
35
kubernetes/servicemesh/applications/videos-web/nginx.conf
Normal file
@ -0,0 +1,35 @@
|
||||
user nginx;
|
||||
worker_processes 1;
|
||||
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
access_log /var/log/nginx/access.log main;
|
||||
server {
|
||||
listen 80;
|
||||
|
||||
location / {
|
||||
gzip off;
|
||||
root /usr/share/nginx/html/;
|
||||
index index.html;
|
||||
#index index-v2.html;
|
||||
}
|
||||
|
||||
location ~* \.(js|jpg|png|css)$ {
|
||||
root /usr/share/nginx/html/;
|
||||
}
|
||||
}
|
||||
sendfile on;
|
||||
keepalive_timeout 65;
|
||||
}
|
43
kubernetes/servicemesh/docker-compose.yaml
Normal file
43
kubernetes/servicemesh/docker-compose.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
version: "3.4"
|
||||
services:
|
||||
videos-web:
|
||||
container_name: videos-web
|
||||
image: aimvector/service-mesh:videos-web-1.0.0
|
||||
build:
|
||||
context: ./applications/videos-web
|
||||
ports:
|
||||
- 80:80
|
||||
playlists-api:
|
||||
container_name: playlists-api
|
||||
image: aimvector/service-mesh:playlists-api-1.0.0
|
||||
build:
|
||||
context: ./applications/playlists-api
|
||||
environment:
|
||||
- "ENVIRONMENT=DEBUG"
|
||||
- "REDIS_HOST=playlists-db"
|
||||
- "REDIS_PORT=6379"
|
||||
ports:
|
||||
- 81:10010
|
||||
videos-api:
|
||||
container_name: videos-api
|
||||
image: aimvector/service-mesh:videos-api-1.0.0
|
||||
build:
|
||||
context: ./applications/videos-api
|
||||
environment:
|
||||
- "ENVIRONMENT=DEBUG"
|
||||
- "REDIS_HOST=videos-db"
|
||||
- "REDIS_PORT=6379"
|
||||
ports:
|
||||
- 82:10010
|
||||
videos-db:
|
||||
container_name: videos-db
|
||||
image: redis:6.0-alpine
|
||||
command: [ "redis-server" , "--dir", "/tmp", "--appendonly", "yes"]
|
||||
volumes:
|
||||
- ./applications/videos-db/appendonly.aof:/tmp/appendonly.aof
|
||||
playlists-db:
|
||||
container_name: playlists-db
|
||||
image: redis:6.0-alpine
|
||||
command: [ "redis-server" , "--dir", "/tmp", "--appendonly", "yes"]
|
||||
volumes:
|
||||
- ./applications/playlists-db/appendonly.aof:/tmp/appendonly.aof
|
223
kubernetes/servicemesh/introduction.md
Normal file
223
kubernetes/servicemesh/introduction.md
Normal file
@ -0,0 +1,223 @@
|
||||
# An Introduction to Service Mesh
|
||||
|
||||
## A simple Web UI: videos-web
|
||||
<hr/>
|
||||
<br/>
|
||||
|
||||
Consider `videos-web` <br/>
|
||||
It's an HTML application that lists a bunch of playlists with videos in them.
|
||||
|
||||
```
|
||||
+------------+
|
||||
| videos-web |
|
||||
| |
|
||||
+------------+
|
||||
```
|
||||
<br/>
|
||||
|
||||
## A simple API: playlists-api
|
||||
<hr/>
|
||||
<br/>
|
||||
|
||||
For `videos-web` to get any content, it needs to make a call to `playlists-api`
|
||||
|
||||
```
|
||||
+------------+ +---------------+
|
||||
| videos-web +---->+ playlists-api |
|
||||
| | | |
|
||||
+------------+ +---------------+
|
||||
|
||||
```
|
||||
|
||||
Playlists consist of data like `title`, `description` etc, and a list of `videos`. <br/>
|
||||
Playlists are stored in a database. <br/>
|
||||
`playlists-api` stores its data in a database
|
||||
|
||||
```
|
||||
+------------+ +---------------+ +--------------+
|
||||
| videos-web +---->+ playlists-api +--->+ playlists-db |
|
||||
| | | | | |
|
||||
+------------+ +---------------+ +--------------+
|
||||
|
||||
```
|
||||
|
||||
<br/>
|
||||
|
||||
## A little complexity
|
||||
<hr/>
|
||||
<br/>
|
||||
|
||||
Each playlist item contains only a list of video id's. <br/>
|
||||
A playlist does not have the full metadata of each video. <br/>
|
||||
|
||||
Example `playlist`:
|
||||
```
|
||||
{
|
||||
"id" : "playlist-01",
|
||||
"title": "Cool playlist",
|
||||
"videos" : [ "video-1", "video-x" , "video-b"]
|
||||
}
|
||||
```
|
||||
Take not above `videos: []` is a list of video id's <br/>
|
||||
|
||||
Videos have their own `title` and `description` and other metadata. <br/>
|
||||
|
||||
To get this data, we need a `videos-api` <br/>
|
||||
This `videos-api` has its own database too <br/>
|
||||
|
||||
```
|
||||
+------------+ +-----------+
|
||||
| videos-api +------>+ videos-db |
|
||||
| | | |
|
||||
+------------+ +-----------+
|
||||
```
|
||||
|
||||
For the `playlists-api` to load all the video data, it needs to call `videos-api` for each video ID it has.<br/>
|
||||
<br/>
|
||||
|
||||
## Traffic flow
|
||||
<hr/>
|
||||
<br/>
|
||||
A single `GET` request to the `playlists-api` will get all the playlists
|
||||
from its database with a single DB call <br/>
|
||||
|
||||
For every playlist and every video in each list, a separate `GET` call will be made to the `videos-api` which will
|
||||
retrieve the video metadata from its database. <br/>
|
||||
|
||||
This will result in many network fanouts between `playlists-api` and `videos-api` and many call to its database. <br/>
|
||||
This is intentional to demonstrate a busy network.
|
||||
|
||||
<br/>
|
||||
|
||||
## Full application architecture
|
||||
<hr/>
|
||||
<br/>
|
||||
|
||||
```
|
||||
|
||||
+------------+ +---------------+ +--------------+
|
||||
| videos-web +---->+ playlists-api +--->+ playlists-db |
|
||||
| | | | | |
|
||||
+------------+ +-----+---------+ +--------------+
|
||||
|
|
||||
v
|
||||
+-----+------+ +-----------+
|
||||
| videos-api +------>+ videos-db |
|
||||
| | | |
|
||||
+------------+ +-----------+
|
||||
|
||||
```
|
||||
|
||||
## Adding an Ingress Controller
|
||||
|
||||
Adding an ingress controller allows us to route all our traffic. </br>
|
||||
We setup a `host` file with entry `127.0.0.1 servicemesh.demo`
|
||||
And `port-forward` to the `ingress-controller`
|
||||
|
||||
|
||||
```
|
||||
servicemesh.demo/home --> videos-web
|
||||
servicemesh.demo/api/playlists --> playlists-api
|
||||
|
||||
|
||||
servicemesh.demo/home/ +--------------+
|
||||
+------------------------------> | videos-web |
|
||||
| | |
|
||||
servicemesh.demo/home/ +------+------------+ +--------------+
|
||||
+------------------>+ingress-nginx |
|
||||
|Ingress controller |
|
||||
+------+------------+ +---------------+ +--------------+
|
||||
| | playlists-api +--->+ playlists-db |
|
||||
+------------------------------> | | | |
|
||||
servicemesh.demo/api/playlists +-----+---------+ +--------------+
|
||||
|
|
||||
v
|
||||
+-----+------+ +-----------+
|
||||
| videos-api +------>+ videos-db |
|
||||
| | | |
|
||||
+------------+ +-----------+
|
||||
|
||||
|
||||
|
||||
```
|
||||
<br/>
|
||||
|
||||
## Run the apps: Docker
|
||||
<hr/>
|
||||
<br/>
|
||||
There is a `docker-compose.yaml` in this directory. <br/>
|
||||
Change your terminal to this folder and run:
|
||||
|
||||
```
|
||||
docker-compose build
|
||||
|
||||
docker-compose up
|
||||
|
||||
```
|
||||
|
||||
You can access the app on `http://localhost`
|
||||
|
||||
<br/>
|
||||
|
||||
## Run the apps: Kubernetes
|
||||
<hr/>
|
||||
<br/>
|
||||
|
||||
Create a cluster with [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
|
||||
|
||||
```
|
||||
kind create cluster --name servicemesh --image kindest/node:v1.18.4
|
||||
```
|
||||
<br/>
|
||||
|
||||
### Deploy videos-web
|
||||
|
||||
<hr/>
|
||||
<br/>
|
||||
|
||||
```
|
||||
cd ./kubernetes/servicemesh/
|
||||
|
||||
kubectl apply -f applications/videos-web/deploy.yaml
|
||||
kubectl port-forward svc/videos-web 80:80
|
||||
|
||||
```
|
||||
|
||||
You should see blank page at `http://localhost/` <br/>
|
||||
It's blank because it needs the `playlists-api` to get data
|
||||
|
||||
<br/>
|
||||
|
||||
### Deploy playlists-api and database
|
||||
|
||||
<hr/>
|
||||
<br/>
|
||||
|
||||
```
|
||||
cd ./kubernetes/servicemesh/
|
||||
|
||||
kubectl apply -f applications/playlists-api/deploy.yaml
|
||||
kubectl apply -f applications/playlists-db/
|
||||
kubectl port-forward svc/playlists-api 81:80
|
||||
|
||||
```
|
||||
|
||||
You should see empty playlists page at `http://localhost/` <br/>
|
||||
Playlists are empty because it needs the `videos-api` to get video data <br/>
|
||||
|
||||
<br/>
|
||||
|
||||
### Deploy videos-api and database
|
||||
|
||||
<hr/>
|
||||
<br/>
|
||||
|
||||
```
|
||||
cd ./kubernetes/servicemesh/
|
||||
|
||||
kubectl apply -f applications/videos-api/deploy.yaml
|
||||
kubectl apply -f applications/videos-db/
|
||||
```
|
||||
|
||||
Refresh page at `http://localhost/` <br/>
|
||||
You should now see the complete architecture in the browser <br/>
|
285
kubernetes/servicemesh/istio/README.md
Normal file
285
kubernetes/servicemesh/istio/README.md
Normal file
@ -0,0 +1,285 @@
|
||||
# Introduction to Istio
|
||||
|
||||
## We need a Kubernetes cluster
|
||||
|
||||
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
|
||||
|
||||
```
|
||||
kind create cluster --name istio --image kindest/node:v1.19.1
|
||||
```
|
||||
|
||||
## Deploy our microservices (Video catalog)
|
||||
|
||||
```
|
||||
# ingress controller
|
||||
kubectl create ns ingress-nginx
|
||||
kubectl apply -f kubernetes/servicemesh/applications/ingress-nginx/
|
||||
|
||||
# applications
|
||||
kubectl apply -f kubernetes/servicemesh/applications/playlists-api/
|
||||
kubectl apply -f kubernetes/servicemesh/applications/playlists-db/
|
||||
kubectl apply -f kubernetes/servicemesh/applications/videos-api/
|
||||
kubectl apply -f kubernetes/servicemesh/applications/videos-web/
|
||||
kubectl apply -f kubernetes/servicemesh/applications/videos-db/
|
||||
```
|
||||
|
||||
## Make sure our applications are running
|
||||
|
||||
```
|
||||
kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
playlists-api-d7f64c9c6-rfhdg 1/1 Running 0 2m19s
|
||||
playlists-db-67d75dc7f4-p8wk5 1/1 Running 0 2m19s
|
||||
videos-api-7769dfc56b-fsqsr 1/1 Running 0 2m18s
|
||||
videos-db-74576d7c7d-5ljdh 1/1 Running 0 2m18s
|
||||
videos-web-598c76f8f-chhgm 1/1 Running 0 100s
|
||||
|
||||
```
|
||||
|
||||
## Make sure our ingress controller is running
|
||||
|
||||
```
|
||||
kubectl -n ingress-nginx get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
nginx-ingress-controller-6fbb446cff-8fwxz 1/1 Running 0 2m38s
|
||||
nginx-ingress-controller-6fbb446cff-zbw7x 1/1 Running 0 2m38s
|
||||
|
||||
```
|
||||
|
||||
We'll need a fake DNS name `servicemesh.demo` <br/>
|
||||
Let's fake one by adding the following entry in our hosts (`C:\Windows\System32\drivers\etc\hosts`) file: <br/>
|
||||
|
||||
```
|
||||
127.0.0.1 servicemesh.demo
|
||||
|
||||
```
|
||||
|
||||
## Let's access our applications via Ingress
|
||||
|
||||
```
|
||||
kubectl -n ingress-nginx port-forward deploy/nginx-ingress-controller 80
|
||||
```
|
||||
|
||||
## Access our application in the browser
|
||||
|
||||
We should be able to access our site under `http://servicemesh.demo/home/`
|
||||
|
||||
<br/>
|
||||
<hr/>
|
||||
|
||||
# Getting Started with Istio
|
||||
|
||||
Firstly, I like to do most of my work in containers so everything is reproducable <br/>
|
||||
and my machine remains clean.
|
||||
|
||||
## Get a container to work in
|
||||
<br/>
|
||||
Run a small `alpine linux` container where we can install and play with `istio`: <br/>
|
||||
|
||||
|
||||
```
|
||||
docker run -it --rm -v ${HOME}:/root/ -v ${PWD}:/work -w /work --net host alpine sh
|
||||
|
||||
# install curl & kubectl
|
||||
apk add --no-cache curl nano
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
export KUBE_EDITOR="nano"
|
||||
|
||||
#test cluster access:
|
||||
/work # kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
istio-control-plane Ready master 26m v1.18.4
|
||||
|
||||
```
|
||||
|
||||
## Install Istio CLI
|
||||
|
||||
```
|
||||
curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.6.12 TARGET_ARCH=x86_64 sh -
|
||||
|
||||
mv istio-1.6.12/bin/istioctl /usr/local/bin/
|
||||
chmod +x /usr/local/bin/istioctl
|
||||
mv istio-1.6.12 /tmp/
|
||||
|
||||
```
|
||||
|
||||
## Pre flight checks
|
||||
|
||||
Istio has a great capability to check compatibility with the target cluster <br/>
|
||||
|
||||
```
|
||||
istioctl x precheck
|
||||
|
||||
```
|
||||
|
||||
## Istio Profiles
|
||||
|
||||
https://istio.io/latest/docs/setup/additional-setup/config-profiles/
|
||||
|
||||
```
|
||||
istioctl profile list
|
||||
|
||||
istioctl install --set profile=default
|
||||
|
||||
kubectl -n istio-system get pods
|
||||
|
||||
istioctl proxy-status
|
||||
|
||||
```
|
||||
|
||||
# Mesh our video catalog services
|
||||
|
||||
There are 2 ways to mesh:
|
||||
|
||||
1) Automated Injection:
|
||||
|
||||
You can set the `istio-injection=enabled` label on a namespace to have the istio side car automatically injected into any pod that gets created in the labelled namespace
|
||||
|
||||
This is a more permanent solution:
|
||||
Pods will need to be recreated for injection to occur
|
||||
|
||||
```
|
||||
kubectl label namespace/default istio-injection=enabled
|
||||
|
||||
# restart all pods to get sidecar injected
|
||||
kubectl delete pods --all
|
||||
```
|
||||
|
||||
2) Manual Injection:
|
||||
This may only be temporary as your CI/CD system may roll out the previous YAML.
|
||||
You may want to add this command to your CI/CD to keep only certain deployments part of the mesh.
|
||||
|
||||
```
|
||||
kubectl get deploy
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
playlists-api 1/1 1 1 8h
|
||||
playlists-db 1/1 1 1 8h
|
||||
videos-api 1/1 1 1 8h
|
||||
videos-db 1/1 1 1 8h
|
||||
videos-web 1/1 1 1 8h
|
||||
|
||||
# Lets manually inject istio sidecar into our Ingress Controller:
|
||||
|
||||
kubectl -n ingress-nginx get deploy nginx-ingress-controller -o yaml | istioctl kube-inject -f - | kubectl apply -f -
|
||||
|
||||
# You can manually inject istio sidecar to every deployment like this:
|
||||
|
||||
kubectl get deploy playlists-api -o yaml | istioctl kube-inject -f - | kubectl apply -f -
|
||||
kubectl get deploy playlists-db -o yaml | istioctl kube-inject -f - | kubectl apply -f -
|
||||
kubectl get deploy videos-api -o yaml | istioctl kube-inject -f - | kubectl apply -f -
|
||||
kubectl get deploy videos-db -o yaml | istioctl kube-inject -f - | kubectl apply -f -
|
||||
kubectl get deploy videos-web -o yaml | istioctl kube-inject -f - | kubectl apply -f -
|
||||
|
||||
|
||||
```
|
||||
|
||||
# TCP \ HTTP traffic
|
||||
|
||||
Let's run a `curl` loop to generate some traffic to our site </br>
|
||||
We'll make a call to `/home/` and to simulate the browser making a call to get the playlists, <br/>
|
||||
we'll make a follow up call to `/api/playlists`
|
||||
|
||||
```
|
||||
While ($true) { curl -UseBasicParsing http://servicemesh.demo/home/;curl -UseBasicParsing http://servicemesh.demo/api/playlists; Start-Sleep -Seconds 1;}
|
||||
```
|
||||
|
||||
|
||||
# Observability
|
||||
|
||||
|
||||
## Grafana
|
||||
|
||||
```
|
||||
kubectl apply -n istio-system -f /tmp/istio-1.6.12/samples/addons/grafana.yaml
|
||||
```
|
||||
|
||||
We can see the components in the `istio-system` namespace:
|
||||
```
|
||||
kubectl -n istio-system get pods
|
||||
```
|
||||
|
||||
Access grafana dashboards :
|
||||
|
||||
```
|
||||
kubectl -n istio-system port-forward svc/grafana 3000
|
||||
```
|
||||
|
||||
## Kiali
|
||||
|
||||
`NOTE: this may fail because CRDs need to generate, if so, just rerun the command:`
|
||||
|
||||
```
|
||||
kubectl apply -f /tmp/istio-1.6.12/samples/addons/kiali.yaml
|
||||
|
||||
kubectl -n istio-system get pods
|
||||
kubectl -n istio-system port-forward svc/kiali 20001
|
||||
```
|
||||
|
||||
# Virtual Services
|
||||
|
||||
## Auto Retry
|
||||
|
||||
Let's add a fault in the `videos-api` by setting `env` variable `FLAKY=true`
|
||||
|
||||
```
|
||||
kubectl edit deploy videos-api
|
||||
```
|
||||
|
||||
```
|
||||
kubectl apply -f kubernetes/servicemesh/istio/retries/videos-api.yaml
|
||||
```
|
||||
|
||||
We can describe pods using `istioctl`
|
||||
|
||||
```
|
||||
# istioctl x describe pod <videos-api-POD-NAME>
|
||||
|
||||
istioctl x describe pod videos-api-584768f497-jjrqd
|
||||
Pod: videos-api-584768f497-jjrqd
|
||||
Pod Ports: 10010 (videos-api), 15090 (istio-proxy)
|
||||
Suggestion: add 'version' label to pod for Istio telemetry.
|
||||
--------------------
|
||||
Service: videos-api
|
||||
Port: http 10010/HTTP targets pod port 10010
|
||||
VirtualService: videos-api
|
||||
1 HTTP route(s)
|
||||
```
|
||||
|
||||
Analyse our namespace:
|
||||
|
||||
```
|
||||
istioctl analyze --namespace default
|
||||
```
|
||||
|
||||
## Traffic Splits
|
||||
|
||||
Let's deploy V2 of our application which has a header that's under development
|
||||
|
||||
```
|
||||
kubectl apply -f kubernetes/servicemesh/istio/traffic-splits/videos-web-v2.yaml
|
||||
|
||||
# we can see v2 pods
|
||||
kubectl get pods
|
||||
|
||||
```
|
||||
|
||||
Let's send 50% of traffic to V1 and 50% to V2 by using a `VirtualService`
|
||||
|
||||
```
|
||||
kubectl apply -f kubernetes/servicemesh/istio/traffic-splits/videos-web.yaml
|
||||
```
|
||||
|
||||
## Canary Deployments
|
||||
|
||||
Traffic splits has its uses, but sometimes we may want to route traffic to other <br/>
|
||||
parts of the system using feature toggles, for example, setting a `cookie`<br/>
|
||||
<br/>
|
||||
Let's send all users that have the cookie value `version=v2` to V2 of our `videos-web`.
|
||||
|
||||
```
|
||||
kubectl apply -f kubernetes/servicemesh/istio/canary/videos-web.yaml
|
||||
```
|
||||
|
||||
We can confirm this works, by setting the cookie value `version=v2` followed by accessing https://servicemesh.demo/home/ on a browser page <br/>
|
23
kubernetes/servicemesh/istio/canary/videos-web.yaml
Normal file
23
kubernetes/servicemesh/istio/canary/videos-web.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: videos-web-canary
|
||||
spec:
|
||||
hosts:
|
||||
- servicemesh.demo
|
||||
http:
|
||||
- match:
|
||||
- uri:
|
||||
prefix: /
|
||||
headers:
|
||||
cookie:
|
||||
regex: ^(.*?;)?(version=v2)(;.*)?$
|
||||
route:
|
||||
- destination:
|
||||
host: videos-web-v2
|
||||
- match:
|
||||
- uri:
|
||||
prefix: /
|
||||
route:
|
||||
- destination:
|
||||
host: videos-web
|
14
kubernetes/servicemesh/istio/retries/videos-api.yaml
Normal file
14
kubernetes/servicemesh/istio/retries/videos-api.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: videos-api
|
||||
spec:
|
||||
hosts:
|
||||
- videos-api
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: videos-api
|
||||
retries:
|
||||
attempts: 10
|
||||
perTryTimeout: 2s
|
@ -0,0 +1,43 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: videos-web-v2
|
||||
labels:
|
||||
app: videos-web-v2
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: videos-web-v2
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: videos-web-v2
|
||||
spec:
|
||||
containers:
|
||||
- name: videos-web-v2
|
||||
image: aimvector/service-mesh:videos-web-2.0.0
|
||||
imagePullPolicy : Always
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: videos-web-v2
|
||||
labels:
|
||||
app: videos-web-v2
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: videos-web-v2
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: 80
|
15
kubernetes/servicemesh/istio/traffic-splits/videos-web.yaml
Normal file
15
kubernetes/servicemesh/istio/traffic-splits/videos-web.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: videos-web
|
||||
spec:
|
||||
hosts:
|
||||
- servicemesh.demo
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: videos-web-v2
|
||||
weight: 50
|
||||
- destination:
|
||||
host: videos-web
|
||||
weight: 50
|
3555
kubernetes/servicemesh/linkerd/manifest/linkerd-edge-20.10.1.yaml
Normal file
3555
kubernetes/servicemesh/linkerd/manifest/linkerd-edge-20.10.1.yaml
Normal file
File diff suppressed because it is too large
Load Diff
250
kubernetes/servicemesh/linkerd/readme.md
Normal file
250
kubernetes/servicemesh/linkerd/readme.md
Normal file
@ -0,0 +1,250 @@
|
||||
# Introduction to Linkerd
|
||||
|
||||
## We need a Kubernetes cluster
|
||||
|
||||
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
|
||||
|
||||
```
|
||||
kind create cluster --name linkerd --image kindest/node:v1.19.1
|
||||
```
|
||||
|
||||
## Deploy our microservices (Video catalog)
|
||||
|
||||
```
|
||||
# ingress controller
|
||||
kubectl create ns ingress-nginx
|
||||
kubectl apply -f kubernetes/servicemesh/applications/ingress-nginx/
|
||||
|
||||
# applications
|
||||
kubectl apply -f kubernetes/servicemesh/applications/playlists-api/
|
||||
kubectl apply -f kubernetes/servicemesh/applications/playlists-db/
|
||||
kubectl apply -f kubernetes/servicemesh/applications/videos-web/
|
||||
kubectl apply -f kubernetes/servicemesh/applications/videos-api/
|
||||
kubectl apply -f kubernetes/servicemesh/applications/videos-db/
|
||||
```
|
||||
|
||||
## Make sure our applications are running
|
||||
|
||||
```
|
||||
kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
playlists-api-d7f64c9c6-rfhdg 1/1 Running 0 2m19s
|
||||
playlists-db-67d75dc7f4-p8wk5 1/1 Running 0 2m19s
|
||||
videos-api-7769dfc56b-fsqsr 1/1 Running 0 2m18s
|
||||
videos-db-74576d7c7d-5ljdh 1/1 Running 0 2m18s
|
||||
videos-web-598c76f8f-chhgm 1/1 Running 0 100s
|
||||
|
||||
```
|
||||
|
||||
## Make sure our ingress controller is running
|
||||
|
||||
```
|
||||
kubectl -n ingress-nginx get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
nginx-ingress-controller-6fbb446cff-8fwxz 1/1 Running 0 2m38s
|
||||
nginx-ingress-controller-6fbb446cff-zbw7x 1/1 Running 0 2m38s
|
||||
|
||||
```
|
||||
|
||||
We'll need a fake DNS name `servicemesh.demo` <br/>
|
||||
Let's fake one by adding the following entry in our hosts (`C:\Windows\System32\drivers\etc\hosts`) file: <br/>
|
||||
|
||||
```
|
||||
127.0.0.1 servicemesh.demo
|
||||
|
||||
```
|
||||
|
||||
## Let's access our applications via Ingress
|
||||
|
||||
```
|
||||
kubectl -n ingress-nginx port-forward deploy/nginx-ingress-controller 80
|
||||
```
|
||||
|
||||
## Access our application in the browser
|
||||
|
||||
We should be able to access our site under `http://servicemesh.demo/home/`
|
||||
|
||||
<br/>
|
||||
<hr/>
|
||||
|
||||
# Getting Started with Linkerd
|
||||
|
||||
Firstly, I like to do most of my work in containers so everything is reproducible <br/>
|
||||
and my machine remains clean.
|
||||
|
||||
## Get a container to work in
|
||||
<br/>
|
||||
Run a small `alpine linux` container where we can install and play with `linkerd`: <br/>
|
||||
|
||||
```
|
||||
docker run -it --rm -v ${HOME}:/root/ -v ${PWD}:/work -w /work --net host alpine sh
|
||||
|
||||
# install curl & kubectl
|
||||
apk add --no-cache curl nano
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
export KUBE_EDITOR="nano"
|
||||
|
||||
#test cluster access:
|
||||
/work # kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
linkerd-control-plane Ready master 26m v1.19.1
|
||||
|
||||
```
|
||||
|
||||
## Linkerd CLI
|
||||
|
||||
Lets download the `linkerd` command line tool <br/>
|
||||
I grabbed the `edge-20.10.1` release using `curl`
|
||||
|
||||
You can go to the [releases](https://github.com/linkerd/linkerd2/releases/tag/edge-20.10.1) page to get it
|
||||
|
||||
```
|
||||
curl -L -o linkerd https://github.com/linkerd/linkerd2/releases/download/edge-20.10.1/linkerd2-cli-edge-20.10.1-linux-amd64
|
||||
chmod +x linkerd && mv ./linkerd /usr/local/bin/
|
||||
|
||||
linkerd --help
|
||||
```
|
||||
|
||||
## Pre flight checks
|
||||
|
||||
Linkerd has a great capability to check compatibility with the target cluster <br/>
|
||||
|
||||
```
|
||||
linkerd check --pre
|
||||
|
||||
```
|
||||
|
||||
## Get the YAML
|
||||
|
||||
```
|
||||
linkerd install > ./kubernetes/servicemesh/linkerd/manifest/linkerd-edge-20.10.1.yaml
|
||||
```
|
||||
|
||||
## Install Linkerd
|
||||
|
||||
```
|
||||
kubectl apply -f ./kubernetes/servicemesh/linkerd/manifest/linkerd-edge-20.10.1.yaml
|
||||
```
|
||||
|
||||
Let's wait until all components are running
|
||||
|
||||
```
|
||||
watch kubectl -n linkerd get pods
|
||||
kubectl -n linkerd get svc
|
||||
```
|
||||
|
||||
## Do a final check
|
||||
|
||||
```
|
||||
linkerd check
|
||||
```
|
||||
|
||||
## The dashboard
|
||||
|
||||
Let's access the `linkerd` dashboard via `port-forward`
|
||||
|
||||
```
|
||||
kubectl -n linkerd port-forward svc/linkerd-web 8084
|
||||
```
|
||||
|
||||
# Mesh our video catalog services
|
||||
|
||||
There are 2 ways to mesh:
|
||||
|
||||
1) We can add an annotation to your deployment to persist the mesh if our YAML is part of a GitOps flow:
|
||||
This is a more permanent solution:
|
||||
|
||||
```
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
linkerd.io/inject: enabled
|
||||
```
|
||||
|
||||
2) Or inject `linkerd` on the fly:
|
||||
This may only be temporary as your CI/CD system may roll out the previous YAML
|
||||
|
||||
```
|
||||
kubectl get deploy
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
playlists-api 1/1 1 1 8h
|
||||
playlists-db 1/1 1 1 8h
|
||||
videos-api 1/1 1 1 8h
|
||||
videos-db 1/1 1 1 8h
|
||||
videos-web 1/1 1 1 8h
|
||||
|
||||
kubectl get deploy playlists-api -o yaml | linkerd inject - | kubectl apply -f -
|
||||
kubectl get deploy playlists-db -o yaml | linkerd inject - | kubectl apply -f -
|
||||
kubectl get deploy videos-api -o yaml | linkerd inject - | kubectl apply -f -
|
||||
kubectl get deploy videos-db -o yaml | linkerd inject - | kubectl apply -f -
|
||||
kubectl get deploy videos-web -o yaml | linkerd inject - | kubectl apply -f -
|
||||
kubectl -n ingress-nginx get deploy nginx-ingress-controller -o yaml | linkerd inject - | kubectl apply -f -
|
||||
|
||||
```
|
||||
|
||||
# Generate some traffic
|
||||
|
||||
Let's run a `curl` loop to generate some traffic to our site </br>
|
||||
We'll make a call to `/home/` and to simulate the browser making a call to get the playlists, <br/>
|
||||
we'll make a follow up call to `/api/playlists`
|
||||
|
||||
```
|
||||
While ($true) { curl -UseBasicParsing http://servicemesh.demo/home/;curl -UseBasicParsing http://servicemesh.demo/api/playlists; Start-Sleep -Seconds 1;}
|
||||
|
||||
linkerd -n default check --proxy
|
||||
|
||||
linkerd -n default stat deploy
|
||||
|
||||
```
|
||||
|
||||
# Add Faulty behaviour in videos API
|
||||
|
||||
```
|
||||
kubectl edit deploy videos-api
|
||||
|
||||
#set environment FLAKY=true
|
||||
```
|
||||
|
||||
# Service Profile
|
||||
|
||||
```
|
||||
linkerd profile -n default videos-api --tap deploy/videos-api --tap-duration 10s
|
||||
```
|
||||
|
||||
After crafting the `serviceprofile`, we can apply it using `kubectl`
|
||||
|
||||
```
|
||||
kubectl apply -f kubernetes/servicemesh/linkerd/serviceprofiles/videos-api.yaml
|
||||
```
|
||||
|
||||
We can see that service profile helps us add retry policies in place: <br/>
|
||||
|
||||
```
|
||||
linkerd routes -n default deploy/playlists-api --to svc/videos-api -o wide
|
||||
linkerd top deploy/videos-api
|
||||
```
|
||||
|
||||
# Mutual TLS
|
||||
|
||||
We can validate if mTLS is working
|
||||
|
||||
```
|
||||
/work # linkerd -n default edges deployment
|
||||
SRC DST SRC_NS DST_NS SECURED
|
||||
playlists-api videos-api default default √
|
||||
linkerd-prometheus playlists-api linkerd default √
|
||||
linkerd-prometheus playlists-db linkerd default √
|
||||
linkerd-prometheus videos-api linkerd default √
|
||||
linkerd-prometheus videos-db linkerd default √
|
||||
linkerd-prometheus videos-web linkerd default √
|
||||
linkerd-tap playlists-api linkerd default √
|
||||
linkerd-tap playlists-db linkerd default √
|
||||
linkerd-tap videos-api linkerd default √
|
||||
linkerd-tap videos-db linkerd default √
|
||||
linkerd-tap videos-web linkerd default √
|
||||
|
||||
linkerd -n default tap deploy
|
||||
|
||||
```
|
@ -0,0 +1,12 @@
|
||||
apiVersion: linkerd.io/v1alpha2
|
||||
kind: ServiceProfile
|
||||
metadata:
|
||||
name: videos-api.default.svc.cluster.local
|
||||
namespace: default
|
||||
spec:
|
||||
routes:
|
||||
- condition:
|
||||
method: GET
|
||||
pathRegex: /.*
|
||||
name: AUTO RETRY ALL
|
||||
isRetryable: true
|
34
kubernetes/servicemesh/readme.md
Normal file
34
kubernetes/servicemesh/readme.md
Normal file
@ -0,0 +1,34 @@
|
||||
# Introduction to Service Mesh
|
||||
|
||||
To understand service mesh, we need a good use case. <br/>
|
||||
We need some service-to-service communication. <br/>
|
||||
A basic microservice architecture will do. <br/>
|
||||
|
||||
Read Me : [The Introduction Guide](./introduction.md)
|
||||
|
||||
Video :point_down: <br/>
|
||||
|
||||
<a href="https://www.youtube.com/playlist?list=PLHq1uqvAteVsmxHpGsMjTOROn3i99lzTA" title="Service Mesh Intro"><img src="https://i.ytimg.com/vi/rVNPnHeGYBE/hqdefault.jpg" width="45%" height="45%" alt="Cloud Guide" /></a>
|
||||
|
||||
|
||||
# Service Mesh Guides
|
||||
|
||||
## Introduction to Linkerd
|
||||
|
||||
Getting started with Linkerd
|
||||
|
||||
Read Me: [readme](./linkerd/README.md)
|
||||
|
||||
Video :point_down: <br/>
|
||||
|
||||
<a href="https://youtu.be/Hc-XFPHDDk4" title="Cloud K8s"><img src="https://i.ytimg.com/vi/Hc-XFPHDDk4/hqdefault.jpg" width="45%" height="45%" alt="Linkerd" /></a>
|
||||
|
||||
## Introduction to Istio
|
||||
|
||||
Getting started with Istio
|
||||
|
||||
Read Me: [readme](./istio/README.md)
|
||||
|
||||
Video :point_down: <br/>
|
||||
|
||||
<!-- <a href="https://youtu.be/Hc-XFPHDDk4" title="Cloud K8s"><img src="https://i.ytimg.com/vi/Hc-XFPHDDk4/hqdefault.jpg" width="45%" height="45%" alt="Linkerd" /></a> -->
|
32
kubernetes/statefulsets/example-app.yaml
Normal file
32
kubernetes/statefulsets/example-app.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: hit-counter-lb
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 5000
|
||||
selector:
|
||||
app: myapp
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: hit-counter-app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: myapp
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: myapp
|
||||
spec:
|
||||
containers:
|
||||
- name: myapp
|
||||
image: aimvector/api-redis-ha:1.0
|
||||
ports:
|
||||
- containerPort: 5000
|
31
kubernetes/statefulsets/notes.md
Normal file
31
kubernetes/statefulsets/notes.md
Normal file
@ -0,0 +1,31 @@
|
||||
|
||||
# Create a namespace
|
||||
|
||||
```
|
||||
kubectl create ns example
|
||||
```
|
||||
|
||||
# Check the storageclass for host path provisioner
|
||||
|
||||
```
|
||||
kubectl get storageclass
|
||||
```
|
||||
|
||||
# Deploy our statefulset
|
||||
|
||||
```
|
||||
kubectl -n example apply -f .\kubernetes\statefulsets\statefulset.yaml
|
||||
kubectl -n example apply -f .\kubernetes\statefulsets\example-app.yaml
|
||||
```
|
||||
|
||||
# Enable Redis Cluster
|
||||
|
||||
```
|
||||
$IPs = $(kubectl -n example get pods -l app=redis-cluster -o jsonpath='{range.items[*]}{.status.podIP}:6379 ')
|
||||
kubectl -n example exec -it redis-cluster-0 -- /bin/sh -c "redis-cli -h 127.0.0.1 -p 6379 --cluster create ${IPs}"
|
||||
kubectl -n example exec -it redis-cluster-0 -- /bin/sh -c "redis-cli -h 127.0.0.1 -p 6379 cluster info"
|
||||
```
|
||||
|
||||
# More info
|
||||
|
||||
https://rancher.com/blog/2019/deploying-redis-cluster
|
86
kubernetes/statefulsets/statefulset.yaml
Normal file
86
kubernetes/statefulsets/statefulset.yaml
Normal file
@ -0,0 +1,86 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: redis-cluster
|
||||
data:
|
||||
update-node.sh: |
|
||||
#!/bin/sh
|
||||
REDIS_NODES="/data/nodes.conf"
|
||||
sed -i -e "/myself/ s/[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/${POD_IP}/" ${REDIS_NODES}
|
||||
exec "$@"
|
||||
redis.conf: |+
|
||||
cluster-enabled yes
|
||||
cluster-require-full-coverage no
|
||||
cluster-node-timeout 15000
|
||||
cluster-config-file /data/nodes.conf
|
||||
cluster-migration-barrier 1
|
||||
appendonly yes
|
||||
protected-mode no
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: redis-cluster
|
||||
spec:
|
||||
serviceName: redis-cluster
|
||||
replicas: 6
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis-cluster
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis-cluster
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:5.0.1-alpine
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
name: client
|
||||
- containerPort: 16379
|
||||
name: gossip
|
||||
command: ["/conf/update-node.sh", "redis-server", "/conf/redis.conf"]
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
volumeMounts:
|
||||
- name: conf
|
||||
mountPath: /conf
|
||||
readOnly: false
|
||||
- name: data
|
||||
mountPath: /data
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: conf
|
||||
configMap:
|
||||
name: redis-cluster
|
||||
defaultMode: 0755
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "hostpath"
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Mi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis-cluster
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: 6379
|
||||
targetPort: 6379
|
||||
name: client
|
||||
- port: 16379
|
||||
targetPort: 16379
|
||||
name: gossip
|
||||
selector:
|
||||
app: redis-cluster
|
156
kubernetes/velero/README.md
Normal file
156
kubernetes/velero/README.md
Normal file
@ -0,0 +1,156 @@
|
||||
# Introduction to Velero
|
||||
|
||||
## We need a Kubernetes cluster
|
||||
|
||||
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
|
||||
|
||||
```
|
||||
kind create cluster --name velero --image kindest/node:v1.19.1
|
||||
```
|
||||
|
||||
## Get a container to work in
|
||||
<br/>
|
||||
Run a small `alpine linux` container where we can install and play with `velero`: <br/>
|
||||
|
||||
```
|
||||
docker run -it --rm -v ${HOME}:/root/ -v ${PWD}:/work -w /work --net host alpine sh
|
||||
|
||||
# install curl & kubectl
|
||||
apk add --no-cache curl nano
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
export KUBE_EDITOR="nano"
|
||||
|
||||
#test cluster access:
|
||||
/work # kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
velero-control-plane Ready master 26m v1.18.4
|
||||
|
||||
```
|
||||
|
||||
## Velero CLI
|
||||
|
||||
Lets download the `velero` command line tool <br/>
|
||||
I grabbed the `v1.5.1` release using `curl`
|
||||
|
||||
You can go to the [releases](https://github.com/vmware-tanzu/velero/releases/tag/v1.5.1) page to get it
|
||||
|
||||
```
|
||||
curl -L -o /tmp/velero.tar.gz https://github.com/vmware-tanzu/velero/releases/download/v1.5.1/velero-v1.5.1-linux-amd64.tar.gz
|
||||
tar -C /tmp -xvf /tmp/velero.tar.gz
|
||||
mv /tmp/velero-v1.5.1-linux-amd64/velero /usr/local/bin/velero
|
||||
chmod +x /usr/local/bin/velero
|
||||
|
||||
velero --help
|
||||
```
|
||||
|
||||
|
||||
## Deploy some stuff
|
||||
|
||||
```
|
||||
kubectl apply -f kubernetes/configmaps/configmap.yaml
|
||||
kubectl apply -f kubernetes/secrets/secret.yaml
|
||||
kubectl apply -f kubernetes/deployments/deployment.yaml
|
||||
kubectl apply -f kubernetes/services/service.yaml
|
||||
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
## Create storage in Azure and AWS
|
||||
|
||||
In this example, we'll create a storage in AWS and Azure to try both scenarios.</br>
|
||||
You can follow along in the video </br>
|
||||
|
||||
Create a storage account and secret for: [Azure](./azure.md) </br>
|
||||
Create a storage account and secret for: [AWS](./aws.md) </br>
|
||||
|
||||
```
|
||||
|
||||
|
||||
```
|
||||
## Deploy Velero for Azure
|
||||
|
||||
Start [here](./azure.md) </br>
|
||||
|
||||
```
|
||||
|
||||
# Azure credential file
|
||||
cat << EOF > /tmp/credentials-velero
|
||||
AZURE_STORAGE_ACCOUNT_ACCESS_KEY=${AZURE_STORAGE_ACCOUNT_ACCESS_KEY}
|
||||
AZURE_CLOUD_NAME=AzurePublicCloud
|
||||
EOF
|
||||
|
||||
velero install \
|
||||
--provider azure \
|
||||
--plugins velero/velero-plugin-for-microsoft-azure:v1.1.0 \
|
||||
--bucket $BLOB_CONTAINER \
|
||||
--secret-file /tmp/credentials-velero \
|
||||
--backup-location-config resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_NAME,storageAccountKeyEnvVar=AZURE_STORAGE_ACCOUNT_ACCESS_KEY,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID \
|
||||
--use-volume-snapshots=false
|
||||
|
||||
|
||||
kubectl -n velero get pods
|
||||
kubectl logs deployment/velero -n velero
|
||||
|
||||
```
|
||||
|
||||
## Deploy Velero for AWS
|
||||
|
||||
Start [here](./aws.md)
|
||||
|
||||
```
|
||||
|
||||
cat > /tmp/credentials-velero <<EOF
|
||||
[default]
|
||||
aws_access_key_id=$AWS_ACCESS_ID
|
||||
aws_secret_access_key=$AWS_ACCESS_KEY
|
||||
EOF
|
||||
|
||||
velero install \
|
||||
--provider aws \
|
||||
--plugins velero/velero-plugin-for-aws:v1.1.0 \
|
||||
--bucket $BUCKET \
|
||||
--backup-location-config region=$REGION \
|
||||
--snapshot-location-config region=$REGION \
|
||||
--secret-file /tmp/credentials-velero
|
||||
|
||||
kubectl -n velero get pods
|
||||
kubectl logs deployment/velero -n velero
|
||||
|
||||
```
|
||||
|
||||
## Create a Backup
|
||||
|
||||
```
|
||||
velero backup create default-namespace-backup --include-namespaces default
|
||||
|
||||
# describe
|
||||
velero backup describe default-namespace-backup
|
||||
|
||||
# logs
|
||||
velero backup logs default-namespace-backup
|
||||
```
|
||||
|
||||
## Do a Restore
|
||||
|
||||
```
|
||||
# delete all resources
|
||||
|
||||
kubectl delete -f kubernetes/configmaps/configmap.yaml
|
||||
kubectl delete -f kubernetes/secrets/secret.yaml
|
||||
kubectl delete -f kubernetes/deployments/deployment.yaml
|
||||
kubectl delete -f kubernetes/services/service.yaml
|
||||
|
||||
velero restore create default-namespace-backup --from-backup default-namespace-backup
|
||||
|
||||
# describe
|
||||
velero restore describe default-namespace-backup
|
||||
|
||||
#logs
|
||||
velero restore logs default-namespace-backup
|
||||
|
||||
# see items restored
|
||||
|
||||
kubectl get all
|
||||
```
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user