Merge branch 'master' into kafka

This commit is contained in:
marcel-dempers 2021-05-27 14:53:21 +10:00
commit 0ba3a32a24
24 changed files with 782 additions and 17 deletions

View File

@ -0,0 +1,230 @@
# Kubernetes Daemonsets
## We need a Kubernetes cluster
Lets create a Kubernetes cluster to play with using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) </br>
Because a Daemonset is all about running pods on every node, lets create a 3 node cluster:
```
cd kubernetes/daemonsets
kind create cluster --name daemonsets --image kindest/node:v1.20.2 --config kind.yaml
```
Test our cluster:
```
kubectl get nodes
NAME STATUS ROLES AGE VERSION
daemonsets-control-plane Ready control-plane,master 65s v1.20.2
daemonsets-worker Ready <none> 31s v1.20.2
daemonsets-worker2 Ready <none> 31s v1.20.2
daemonsets-worker3 NotReady <none> 31s v1.20.2
```
# Introduction
Kubernetes provide [documentation](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) for what a Daemonset is with examples.
## Basic Daemonset
Let's deploy a daemonset that runs a pod on each node and collects the name of the node
```
kubectl apply -f daemonset.yaml
kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
example-daemonset-8lcr5 1/1 Running 0 3m21s 10.244.2.4 daemonsets-worker <none> <none>
example-daemonset-9jhgx 1/1 Running 0 81s 10.244.3.4 daemonsets-worker2 <none> <none>
example-daemonset-lvvsd 1/1 Running 0 2m41s 10.244.1.4 daemonsets-worker3 <none> <none>
example-daemonset-xxcv9 1/1 Running 0 119s 10.244.0.7 daemonsets-control-plane <none> <none>
```
We can see the logs of any pod
```
kubectl logs <example-daemonset-xxxx>
```
Cleanup:
```
kubectl delete ds example-daemonset
```
## Basic Daemonset: Exposing HTTP
Let's deploy a daemonset that runs a pod on each node and exposes an HTTP endpoint on each node. </br>
In this demo we'll use a simple NGINX on port 80
```
kubectl apply -f daemonset-communication.yaml
kubectl get pods
```
## Communicating with Daemonset Pods
https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#communicating-with-daemon-pods
Let's deploy a pod that can talk to our daemonset
```
kubectl apply -f pod.yaml
kubectl exec -it pod -- bash
```
### Service Type: ClusterIP or LoadBalancer
Let's deploy a service of type ClusterIP
```
kubectl apply -f ./services/clusterip-service.yaml
while true; do curl http://daemonset-svc-clusterip; sleep 1s; done
Hello from daemonsets-worker2
Hello from daemonsets-control-plane
Hello from daemonsets-worker2
Hello from daemonsets-worker3
Hello from daemonsets-worker
Hello from daemonsets-worker
```
### Node IP and Node Port
We can add the `nodePort` field to the pods port section to expose a port on the node. </br>
Let's expose the node port in the pod spec:
```
ports:
- containerPort: 80
hostPort: 80
name: "http"
```
This means we can contact the daemonset pod using the Node IP and port:
```
# get the node ips
kubectl get nodes -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP KERNEL-VERSION CONTAINER-RUNTIME
daemonsets-control-plane Ready control-plane,master 112m v1.20.2 172.18.0.4 <none>
daemonsets-worker Ready <none> 111m v1.20.2 172.18.0.3 <none>
daemonsets-worker2 Ready <none> 111m v1.20.2 172.18.0.2 <none>
daemonsets-worker3 Ready <none> 111m v1.20.2 172.18.0.6 <none>
#example:
bash-5.1# curl http://172.18.0.4:80
Hello from daemonsets-control-plane
bash-5.1# curl http://172.18.0.2:80
Hello from daemonsets-worker2
```
### Service: Headless service
Let's deploy a headless service where `clusterIP: None`
```
kubectl apply -f ./services/headless-service.yaml
```
There are a few ways to discover our pods:
1) Discover the [DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#services) records via the "Headless" services
```
apk add --no-cache bind-tools
dig daemonset-svc-headless.default.svc.cluster.local
```
Notice it resolves to multiple DNS records for each pod:
```
;; ANSWER SECTION:
daemonset-svc-headless.default.svc.cluster.local. 30 IN A 10.244.0.5
daemonset-svc-headless.default.svc.cluster.local. 30 IN A 10.244.2.2
daemonset-svc-headless.default.svc.cluster.local. 30 IN A 10.244.3.2
daemonset-svc-headless.default.svc.cluster.local. 30 IN A 10.244.1.2
```
2) Discover pods endpoints by retrieving the endpoints for the headless service
```
kubectl describe endpoints daemonset-svc-headless
```
Example:
```
Addresses: 10.244.0.5,10.244.1.2,10.244.2.2,10.244.3.2
```
Get A records for each pod by using the following format: </br>
`<pod-ip-address>.<my-namespace>.pod.<cluster-domain.example>.` </br>
```
#examples:
10-244-0-5.default.pod.cluster.local
10-244-1-2.default.pod.cluster.local
10-244-2-2.default.pod.cluster.local
10-244-3-2.default.pod.cluster.local
```
Communicate with the pods over DNS:
```
curl http://10-244-0-5.default.pod.cluster.local
Hello from daemonsets-control-plane
```
# Real world Examples:
## Monitoring Nodes: Node-Exporter Daemonset
<br/>
We clone the official kube-prometheus repo to get monitoring manifests for Kubernetes.
```
git clone https://github.com/prometheus-operator/kube-prometheus.git
```
Check the compatibility matrix [here](https://github.com/prometheus-operator/kube-prometheus/tree/v0.8.0#kubernetes-compatibility-matrix)
For this demo, we will use the compatible version tag 0.8
```
git checkout v0.8.0
```
Deploy Prometheus Operator and CRDs
```
cd .\manifests\
kubectl create -f .\setup\
```
Deploy remaining resources including node exporter daemonset
```
kubectl create -f .
# wait for pods to be up
kubectl get pods -n monitoring
#access prometheus in the browser
kubectl -n monitoring port-forward svc/prometheus-k8s 9090
```
See the Daemonset communications on the Prometheus [targets](http://localhost:9090/targets) page
Checkout my [monitoring guide for kubernetes](../../monitoring/prometheus/kubernetes/README.md) for more in depth info
## Monitoring: Logging via Fluentd
Take a look at my monitoring guide for [Fluentd](../../monitoring/logging/fluentd/kubernetes/README.md)

View File

@ -0,0 +1,55 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: daemonset-communicate
namespace: default
labels:
app: daemonset-communicate
spec:
selector:
matchLabels:
name: daemonset-communicate
template:
metadata:
labels:
name: daemonset-communicate
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
initContainers:
- name: create-file
image: alpine
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- "bin/sh"
- "-c"
- "echo 'Hello from '$NODE_NAME > /usr/share/nginx/html/index.html"
volumeMounts:
- name: nginx-page
mountPath: /usr/share/nginx/html/
containers:
- name: daemonset-communicate
image: nginx:1.20.0-alpine
volumeMounts:
- name: nginx-page
mountPath: /usr/share/nginx/html/
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 100Mi
ports:
- containerPort: 80
name: "http"
terminationGracePeriodSeconds: 30
volumes:
- name: nginx-page
emptyDir: {}

View File

@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-daemonset
namespace: default
labels:
app: example-daemonset
spec:
selector:
matchLabels:
name: example-daemonset
template:
metadata:
labels:
name: example-daemonset
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: example-daemonset
image: alpine:latest
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- "bin/sh"
- "-c"
- "echo 'Hello! I am running on '$NODE_NAME; while true; do sleep 300s ; done;"
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
terminationGracePeriodSeconds: 30

View File

@ -0,0 +1,7 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
- role: worker

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Pod
metadata:
name: pod
spec:
containers:
- name: pod
image: alpine
command:
- "/bin/sh"
- "-c"
- "apk add --no-cache curl bash && sleep 60m"

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: daemonset-svc-clusterip
spec:
type: ClusterIP
selector:
name: daemonset-communicate
ports:
- protocol: TCP
name: "http"
port: 80
targetPort: 80

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: daemonset-svc-headless
spec:
clusterIP: None
selector:
name: daemonset-communicate
ports:
- protocol: TCP
name: "http"
port: 80
targetPort: 80

View File

@ -14,7 +14,7 @@ FROM base as debug
RUN pip install ptvsd
WORKDIR /work/
CMD python -m ptvsd --host 0.0.0.0 --port 5678 --wait --multiprocess -m flask run -h 0.0.0 -p 5000
CMD python -m ptvsd --host 0.0.0.0 --port 5678 --wait --multiprocess -m flask run -h 0.0.0.0 -p 5000
###########START NEW IMAGE: PRODUCTION ###################
FROM base as prod

View File

@ -15,7 +15,7 @@ spec:
spec:
initContainers:
- name: config
image: redis:6.0-alpine
image: redis:6.2.3-alpine
command: [ "sh", "-c" ]
args:
- |
@ -45,7 +45,7 @@ spec:
mountPath: /tmp/redis/
containers:
- name: redis
image: redis:6.0-alpine
image: redis:6.2.3-alpine
command: ["redis-server"]
args: ["/etc/redis/redis.conf"]
ports:

View File

@ -15,7 +15,7 @@ spec:
spec:
initContainers:
- name: config
image: redis:6.0-alpine
image: redis:6.2.3-alpine
command: [ "sh", "-c" ]
args:
- |
@ -35,8 +35,9 @@ spec:
fi
done
echo "sentinel monitor mymaster $MASTER 6379 2" >> /tmp/master
echo "port 5000
sentinel resolve-hostnames yes
sentinel announce-hostnames yes
$(cat /tmp/master)
sentinel down-after-milliseconds mymaster 5000
sentinel failover-timeout mymaster 60000
@ -49,7 +50,7 @@ spec:
mountPath: /etc/redis/
containers:
- name: sentinel
image: redis:6.0-alpine
image: redis:6.2.3-alpine
command: ["redis-sentinel"]
args: ["/etc/redis/sentinel.conf"]
ports:

View File

@ -105,13 +105,13 @@ This is intentional to demonstrate a busy network.
+------------+ +---------------+ +--------------+
| videos-web +---->+ playlists-api +--->+ playlists-db |
| | | | | |
| | | | | [redis] |
+------------+ +-----+---------+ +--------------+
|
v
+-----+------+ +-----------+
| videos-api +------>+ videos-db |
| | | |
| | | [redis] |
+------------+ +-----------+
```

View File

@ -23,6 +23,8 @@ const serviceName = "playlists-api"
var environment = os.Getenv("ENVIRONMENT")
var redis_host = os.Getenv("REDIS_HOST")
var redis_port = os.Getenv("REDIS_PORT")
var jaeger_host_port = os.Getenv("JAEGER_HOST_PORT")
var ctx = context.Background()
var rdb *redis.Client
@ -40,7 +42,7 @@ func main() {
// Log the emitted spans to stdout.
Reporter: &config.ReporterConfig{
LogSpans: true,
LocalAgentHostPort: "jaeger:6831",
LocalAgentHostPort: jaeger_host_port,
},
}
@ -60,7 +62,7 @@ func main() {
opentracing.HTTPHeadersCarrier(r.Header),
)
span := tracer.StartSpan("/ GET", ext.RPCServerOption(spanCtx))
span := tracer.StartSpan("playlists-api: GET /", ext.RPCServerOption(spanCtx))
defer span.Finish()
cors(w)
@ -80,7 +82,7 @@ func main() {
vs := []videos{}
for vi := range playlists[pi].Videos {
span, _ := opentracing.StartSpanFromContext(ctx, "videos-api GET")
span, _ := opentracing.StartSpanFromContext(ctx, "playlists-api: videos-api GET /id")
v := videos{}
@ -96,8 +98,8 @@ func main() {
)
videoResp, err :=http.DefaultClient.Do(req)
span.Finish()
if err != nil {
fmt.Println(err)
span.SetTag("error", true)
@ -149,7 +151,7 @@ func main() {
func getPlaylists(ctx context.Context)(response string){
span, _ := opentracing.StartSpanFromContext(ctx, "redis-get")
span, _ := opentracing.StartSpanFromContext(ctx, "playlists-api: redis-get")
defer span.Finish()
playlistData, err := rdb.Get(ctx, "playlists").Result()

View File

@ -0,0 +1,65 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: videos-api
labels:
app: videos-api
spec:
selector:
matchLabels:
app: videos-api
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: videos-api
spec:
containers:
- name: videos-api
image: aimvector/jaeger-tracing:videos-api-netcore-1.0.0
imagePullPolicy : Always
ports:
- containerPort: 10010
env:
- name: "ENVIRONMENT"
value: "DEBUG"
- name: "REDIS_HOST"
value: "videos-db"
- name: "REDIS_PORT"
value: "6379"
- name: "JAEGER_AGENT_HOST"
value: "jaeger"
- name: "JAEGER_AGENT_PORT"
value: "6831"
- name: "JAEGER_SERVICE_NAME"
value: "videos-api"
- name: "JAEGER_REPORTER_LOG_SPANS"
value: "true"
- name: "JAEGER_SAMPLER_TYPE"
value: "const"
- name: "JAEGER_PROPAGATION"
value: "jaeger"
---
apiVersion: v1
kind: Service
metadata:
name: videos-api
labels:
app: videos-api
spec:
type: ClusterIP
selector:
app: videos-api
ports:
- protocol: TCP
name: http
port: 10010
targetPort: 10010
---

View File

@ -0,0 +1,16 @@
#docker run -it -v ${PWD}:/work -p 5000:5000 -w /work aimvector/jaeger-tracing:videos-api-netcore-1.0.0 bash
FROM mcr.microsoft.com/dotnet/sdk:5.0 as dev
WORKDIR /work/
FROM mcr.microsoft.com/dotnet/sdk:5.0 as build
WORKDIR /work/
COPY ./src/videos-api.csproj /work/videos-api.csproj
RUN dotnet restore
COPY ./src/ /work/
RUN mkdir /out/
RUN dotnet publish --no-restore --output /out/ --configuration Release
ENTRYPOINT ["dotnet", "run"]

View File

@ -0,0 +1,75 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Logging;
using StackExchange.Redis;
using System.Text.Json;
using System.Text.Json.Serialization;
using OpenTracing;
using OpenTracing.Propagation;
namespace videos_api.Controllers
{
[ApiController]
[Route("")]
public class VideosController : ControllerBase
{
private readonly ITracer _tracer;
private readonly string _redisHost;
private readonly string _redisPort;
private readonly ConnectionMultiplexer _redis;
private readonly ILogger<VideosController> _logger;
private readonly JsonSerializerOptions _serializationOptions;
public VideosController(ILogger<VideosController> logger, ITracer tracer)
{
_redisHost = Environment.GetEnvironmentVariable("REDIS_HOST");
_redisPort = Environment.GetEnvironmentVariable("REDIS_PORT");
_redis = ConnectionMultiplexer.Connect(_redisHost + ":" + _redisPort);
_logger = logger;
_tracer = tracer ?? throw new ArgumentNullException(nameof(tracer));
_serializationOptions = new JsonSerializerOptions
{
PropertyNameCaseInsensitive = true
};
}
[HttpGet]
[Route("/{id}")]
public Videos Get(string id)
{
ISpanContext traceContext = _tracer.Extract(BuiltinFormats.HttpHeaders, new TextMapExtractAdapter(GetHeaders()));
string videoContent;
using (var scope = _tracer.BuildSpan("videos-api-net: redis-get")
.AsChildOf(traceContext)
.StartActive(true))
{
IDatabase db = _redis.GetDatabase();
videoContent = db.StringGet(id);
}
var video = JsonSerializer.Deserialize<Videos>(videoContent,_serializationOptions);
return video;
}
public Dictionary<string, string> GetHeaders()
{
var headers = new Dictionary<string, string>();
foreach (var header in Request.Headers)
{
headers.Add(header.Key, header.Value);
}
return headers;
}
}
}

View File

@ -0,0 +1,32 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Hosting;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
namespace videos_api
{
public class Program
{
public static void Main(string[] args)
{
CreateHostBuilder(args).Build().Run();
}
public static IHostBuilder CreateHostBuilder(string[] args) =>
Host.CreateDefaultBuilder(args)
.ConfigureLogging(logging =>
{
logging.ClearProviders();
logging.AddConsole();
})
.ConfigureWebHostDefaults(webBuilder =>
{
webBuilder.UseUrls("http://*:10010");
webBuilder.UseStartup<Startup>();
});
}
}

View File

@ -0,0 +1,31 @@
{
"$schema": "http://json.schemastore.org/launchsettings.json",
"iisSettings": {
"windowsAuthentication": false,
"anonymousAuthentication": true,
"iisExpress": {
"applicationUrl": "http://localhost:27460",
"sslPort": 44312
}
},
"profiles": {
"IIS Express": {
"commandName": "IISExpress",
"launchBrowser": true,
"launchUrl": "swagger",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development"
}
},
"videos_api": {
"commandName": "Project",
"dotnetRunMessages": "true",
"launchBrowser": true,
"launchUrl": "swagger",
"applicationUrl": "https://localhost:5001;http://localhost:5000",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development"
}
}
}
}

View File

@ -0,0 +1,103 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.HttpsPolicy;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using Microsoft.OpenApi.Models;
using Jaeger;
using Jaeger.Reporters;
using Jaeger.Samplers;
using Jaeger.Senders;
using Jaeger.Senders.Thrift;
using OpenTracing;
using OpenTracing.Contrib.NetCore.Configuration;
using OpenTracing.Util;
namespace videos_api
{
public class Startup
{
public Startup(IConfiguration configuration)
{
Configuration = configuration;
}
public IConfiguration Configuration { get; }
// This method gets called by the runtime. Use this method to add services to the container.
public void ConfigureServices(IServiceCollection services)
{
services.AddLogging(loggingBuilder =>
{
loggingBuilder.AddConfiguration(Configuration.GetSection("Logging"));
loggingBuilder.AddConsole();
loggingBuilder.AddDebug();
});
services.AddSingleton<ITracer>(serviceProvider =>
{
ILoggerFactory loggerFactory = serviceProvider.GetRequiredService<ILoggerFactory>();
Jaeger.Configuration.SenderConfiguration.DefaultSenderResolver = new SenderResolver(loggerFactory)
.RegisterSenderFactory<ThriftSenderFactory>();
var config = Jaeger.Configuration.FromEnv(loggerFactory);
ITracer tracer = config.GetTracer();
GlobalTracer.Register(tracer);
return tracer;
// var loggerFactory = serviceProvider.GetRequiredService<ILoggerFactory>();
// var sampler = new ConstSampler(sample: true);
// var tracer = new Tracer.Builder("videos-api")
// .WithReporter(
// new RemoteReporter.Builder()
// .WithLoggerFactory(loggerFactory)
// .WithSender(
// new UdpSender("jaeger", 6831, 0))
// .Build())
// .WithLoggerFactory(loggerFactory)
// .WithSampler(sampler)
// .Build();
// GlobalTracer.Register(tracer);
// return tracer;
});
services.AddOpenTracing();
services.AddControllers();
services.AddSwaggerGen(c =>
{
c.SwaggerDoc("v1", new OpenApiInfo { Title = "videos_api", Version = "v1" });
});
}
// This method gets called by the runtime. Use this method to configure the HTTP request pipeline.
public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
{
if (env.IsDevelopment())
{
app.UseDeveloperExceptionPage();
app.UseSwagger();
app.UseSwaggerUI(c => c.SwaggerEndpoint("/swagger/v1/swagger.json", "videos_api v1"));
}
app.UseHttpsRedirection();
app.UseRouting();
app.UseAuthorization();
app.UseEndpoints(endpoints =>
{
endpoints.MapControllers();
});
}
}
}

View File

@ -0,0 +1,13 @@
using System;
namespace videos_api
{
public class Videos
{
public string Id { get; set; }
public string Title { get; set; }
public string Description { get; set; }
public string Imageurl { get; set; }
public string Url { get; set; }
}
}

View File

@ -0,0 +1,9 @@
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft": "Warning",
"Microsoft.Hosting.Lifetime": "Information"
}
}
}

View File

@ -0,0 +1,10 @@
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft": "Warning",
"Microsoft.Hosting.Lifetime": "Information"
}
},
"AllowedHosts": "*"
}

View File

@ -0,0 +1,15 @@
<Project Sdk="Microsoft.NET.Sdk.Web">
<PropertyGroup>
<TargetFramework>net5.0</TargetFramework>
<RootNamespace>videos_api</RootNamespace>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Jaeger" Version="1.0.0" />
<PackageReference Include="OpenTracing.Contrib.NetCore" Version="0.7.1" />
<PackageReference Include="StackExchange.Redis" Version="2.2.4" />
<PackageReference Include="Swashbuckle.AspNetCore" Version="5.6.3" />
</ItemGroup>
</Project>

View File

@ -25,6 +25,7 @@ const serviceName = "videos-api"
var environment = os.Getenv("ENVIRONMENT")
var redis_host = os.Getenv("REDIS_HOST")
var redis_port = os.Getenv("REDIS_PORT")
var jaeger_host_port = os.Getenv("JAEGER_HOST_PORT")
var flaky = os.Getenv("FLAKY")
var delay = os.Getenv("DELAY")
@ -45,7 +46,7 @@ func main() {
// Log the emitted spans to stdout.
Reporter: &config.ReporterConfig{
LogSpans: true,
LocalAgentHostPort: "jaeger:6831",
LocalAgentHostPort: jaeger_host_port,
},
}
@ -65,7 +66,7 @@ func main() {
opentracing.HTTPHeadersCarrier(r.Header),
)
span := tracer.StartSpan("/id GET", ext.RPCServerOption(spanCtx))
span := tracer.StartSpan("videos-api: GET /id", ext.RPCServerOption(spanCtx))
defer span.Finish()
if flaky == "true" {
@ -98,7 +99,7 @@ func main() {
func video(writer http.ResponseWriter, request *http.Request, p httprouter.Params, ctx context.Context)(response string){
span, _ := opentracing.StartSpanFromContext(ctx, "redis-get")
span, _ := opentracing.StartSpanFromContext(ctx, "videos-api: redis-get")
defer span.Finish()
id := p.ByName("id")

View File

@ -18,6 +18,7 @@ services:
- "ENVIRONMENT=DEBUG"
- "REDIS_HOST=playlists-db"
- "REDIS_PORT=6379"
- "JAEGER_HOST_PORT=jaeger:6831"
ports:
- 81:10010
networks:
@ -39,12 +40,32 @@ services:
- "ENVIRONMENT=DEBUG"
- "REDIS_HOST=videos-db"
- "REDIS_PORT=6379"
- "JAEGER_HOST_PORT=jaeger:6831"
#- "DELAY=true"
#- "FLAKY=true"
ports:
- 82:10010
networks:
- tracing
# videos-api-netcore:
# container_name: videos-api
# image: aimvector/jaeger-tracing:videos-api-netcore-1.0.0
# build:
# context: ./applications-go/videos-api-netcore
# environment:
# - "ENVIRONMENT=DEBUG"
# - "REDIS_HOST=videos-db"
# - "REDIS_PORT=6379"
# - "JAEGER_AGENT_HOST=jaeger"
# - "JAEGER_AGENT_PORT=6831"
# - "JAEGER_SERVICE_NAME=videos-api"
# - "JAEGER_REPORTER_LOG_SPANS=true"
# - "JAEGER_SAMPLER_TYPE=const"
# - "JAEGER_PROPAGATION=jaeger"
# ports:
# - 82:5000
# networks:
# - tracing
videos-db:
container_name: videos-db
image: redis:6.0-alpine