Introduction to the

This is the follow-up article of isTIO microservice experiment. Please refer to the previous article before the experiment.

Distributed call chain tracing

The installation

# Download yML file
mkdir jaeger && cd jaeger
wget https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml

The experimental environment does not support LoadBalancer
# You can modify jaeger-all-in-one template.yml to use nodePort
You can also leave it as it will use a random nodeport

# start
kubectl apply -n istio-system -f jaeger-all-in-one-template.yml

# check
kubectl get pods -n istio-system
kubectl get svc -n istio-system

Visit the previous Vue React screen several times and click the launch button

# visit
jaegerNodePort=$(kubectl get svc -n istio-system | grep jaeger-query | awk '{print $5}' | cut -d '/' -f 1 | cut -d ':' -f 2)
nodeName=$(kubectl get no | grep '<none>' | head -1 | awk '{print $1}')
nodeIP=$(ping -c 1 $nodeName | grep PING | awk '{print $3}' | tr -d '()')
echo "http://$nodeIP:"$jaegerNodePort

# Select istio-ingress to view the entire call chain

# to clean up
cd jaeger
kubectl delete -n istio-system -f jaeger-all-in-one-template.yml
Copy the code

Jaeger’s dashboard

Call chain

Service Tree Display

Collect logs and indicators

The installation

# Prometheus installation
cd /usr/local/istio

# Change nodePort support
cp install/kubernetes/addons/prometheus.yaml install/kubernetes/addons/prometheus.yaml.ori
vim install/kubernetes/addons/prometheus.yaml
...
apiVersion: v1
kind: Service
metadata:
  annotations:
    prometheus.io/scrape: 'true'
  labels:
    name: prometheus
  name: prometheus
  namespace: istio-system
spec:
  selector:
    app: prometheus
  ports:
  - name: prometheus
    protocol: TCP
    port: 9090
  Set nodePort to nodePort
  type: NodePort
...

# deployment
kubectl apply -f install/kubernetes/addons/prometheus.yaml

# config collection
istioctl create -f istio/new_telemetry.yml

Visit the previous Vue React screen several times and click the launch button

# Access web tests
prometheusNodePort=$(kubectl get svc -n istio-system | grep prometheus | awk '{print $5}' | cut -d '/' -f 1 | cut -d ':' -f 2)
nodeName=$(kubectl get no | grep '<none>' | head -1 | awk '{print $1}')
nodeIP=$(ping -c 1 $nodeName | grep PING | awk '{print $3}' | tr -d '()')
echo "http://$nodeIP:"$prometheusNodePort

Use the istio_double_request_count keyword

# check log
kubectl -n istio-system logs $(kubectl -n istio-system get pods -l istio=mixer -o jsonpath='{.items[0].metadata.name}') mixer | grep \"Instance \", \ "newlog. Logentry. Istio - system \" # clear kubectl delete - f the install/kubernetes/addons/Prometheus yaml istioctl delete -f istio/new_telemetry.ymlCopy the code

Collect TCP service indicators

The installation

# Prometheus installation
cd /usr/local/istio

# Change nodePort support
cp install/kubernetes/addons/prometheus.yaml install/kubernetes/addons/prometheus.yaml.ori
vim install/kubernetes/addons/prometheus.yaml
...
apiVersion: v1
kind: Service
metadata:
  annotations:
    prometheus.io/scrape: 'true'
  labels:
    name: prometheus
  name: prometheus
  namespace: istio-system
spec:
  selector:
    app: prometheus
  ports:
  - name: prometheus
    protocol: TCP
    port: 9090
  Set nodePort to nodePort
  type: NodePort
...

# deployment
kubectl apply -f install/kubernetes/addons/prometheus.yaml

# config collection
istioctl create -f istio/tcp_telemetry.yml

Deploy using mongodb application tests

# Access web tests
prometheusNodePort=$(kubectl get svc -n istio-system | grep prometheus | awk '{print $5}' | cut -d '/' -f 1 | cut -d ':' -f 2)
nodeName=$(kubectl get no | grep '<none>' | head -1 | awk '{print $1}')
nodeIP=$(ping -c 1 $nodeName | grep PING | awk '{print $3}' | tr -d '()')
echo "http://$nodeIP:"$prometheusNodePort

# use the istio_mongo_received_bytes keyword

# to clean up
kubectl delete -f install/kubernetes/addons/prometheus.yaml
istioctl delete -f istio/tcp_telemetry.yml
Copy the code

TCP data flow diagram

Grafana was used to visualize metrics

The installation

# Prometheus installation
cd /usr/local/istio

# Change nodePort support
cp install/kubernetes/addons/prometheus.yaml install/kubernetes/addons/prometheus.yaml.ori
vim install/kubernetes/addons/prometheus.yaml
...
apiVersion: v1
kind: Service
metadata:
  annotations:
    prometheus.io/scrape: 'true'
  labels:
    name: prometheus
  name: prometheus
  namespace: istio-system
spec:
  selector:
    app: prometheus
  ports:
  - name: prometheus
    protocol: TCP
    port: 9090
  Set nodePort to nodePort
  type: NodePort
...
cp install/kubernetes/addons/grafana.yaml install/kubernetes/addons/grafana.yaml.ori
vim install/kubernetes/addons/grafana.yaml
kind: Service
metadata:
  name: grafana
  namespace: istio-system
spec:
  Set nodePort to nodePort
  type: NodePort
  ports:
  - port: 3000
    protocol: TCP
    name: http
  selector:
    app: grafana

# deployment
kubectl apply -f install/kubernetes/addons/prometheus.yaml
kubectl apply -f install/kubernetes/addons/grafana.yaml

# Access web tests
grafanaNodePort=$(kubectl get svc -n istio-system | grep grafana | awk '{print $5}' | cut -d '/' -f 1 | cut -d ':' -f 2)
nodeName=$(kubectl get no | grep '<none>' | head -1 | awk '{print $1}')
nodeIP=$(ping -c 1 $nodeName | grep PING | awk '{print $3}' | tr -d '()')
echo "http://$nodeIP:"$grafanaNodePort

# Stress test view chart
Create fortio for testing
kubectl apply -f <(istioctl kube-inject -f istio/fortio-deploy.yaml)

# Normal access test
FORTIO_POD=$(kubectl get pod | grep fortio | awk '{ print $1 }')
kubectl exec -it $FORTIO_POD  -c fortio /usr/local/bin/fortio -- load -curl http://service-python/env

# Increase stress testing
kubectl exec -it $FORTIO_POD  -c fortio /usr/local/bin/fortio -- load -qps 20 -t 100s -loglevel Warning http://service-python/env
kubectl exec -it $FORTIO_POD  -c fortio /usr/local/bin/fortio -- load -qps 50 -t 100s -loglevel Warning http://service-go/env

# to clean up
kubectl delete -f install/kubernetes/addons/prometheus.yaml
kubectl delete -f install/kubernetes/addons/grafana.yaml
kubectl delete -f istio/fortio-deploy.yaml
Copy the code

Service Mesh data monitoring display

Pilot data monitoring display

Generating a service Tree

The installation

# change to nodePort
cd /usr/local/istio
cp install/kubernetes/addons/servicegraph.yaml install/kubernetes/addons/servicegraph.yaml.ori
vim install/kubernetes/addons/servicegraph.yaml
...
apiVersion: v1
kind: Service
metadata:
  name: servicegraph
  namespace: istio-system
spec:
  Set nodePort to nodePort
  type: NodePort
  ports:
  - name: http
    port: 8088
  selector:
    app: servicegraph
...

# Prometheus installation
cd /usr/local/istio

# Change nodePort support
cp install/kubernetes/addons/prometheus.yaml install/kubernetes/addons/prometheus.yaml.ori
vim install/kubernetes/addons/prometheus.yaml
...
apiVersion: v1
kind: Service
metadata:
  annotations:
    prometheus.io/scrape: 'true'
  labels:
    name: prometheus
  name: prometheus
  namespace: istio-system
spec:
  selector:
    app: prometheus
  ports:
  - name: prometheus
    protocol: TCP
    port: 9090
  Set nodePort to nodePort
  type: NodePort
...

# deployment
kubectl apply -f install/kubernetes/addons/prometheus.yaml
kubectl apply -f install/kubernetes/addons/servicegraph.yaml

Visit the previous Vue React screen several times and click the launch button

# Access web tests
servicegraphNodePort=$(kubectl get svc -n istio-system | grep servicegraph | awk '{print $5}' | cut -d '/' -f 1 | cut -d ':' -f 2)
nodeName=$(kubectl get no | grep '<none>' | head -1 | awk '{print $1}')
nodeIP=$(ping -c 1 $nodeName | grep PING | awk '{print $3}' | tr -d '()')
echo "http://$nodeIP:"$servicegraphNodePort/force/forcegraph.html

You can use urls
# /force/forcegraph.html
# /dotviz
# /dotgraph
# /d3graph
# /graph

# to clean up
kubectl delete -f install/kubernetes/addons/prometheus.yaml
kubectl delete -f install/kubernetes/addons/servicegraph.yaml
Copy the code

Service tree

Use Fluentd to collect logs

The installation

# installation efk
kubectl apply -f istio/logging-stack.yml

Configure ISTIO to use EFK
istioctl create -f istio/fluentd-istio.yml

Visit the previous Vue React screen several times and click the launch button

# Access web tests
kibanaNodePort=$(kubectl get svc -n istio-system | grep kibana | awk '{print $5}' | cut -d '/' -f 1 | cut -d ':' -f 2)
nodeName=$(kubectl get no | grep '<none>' | head -1 | awk '{print $1}')
nodeIP=$(ping -c 1 $nodeName | grep PING | awk '{print $3}' | tr -d '()')
echo "http://$nodeIP:"$kibanaNodePort

# to clean up
kubectl delete -f istio/logging-stack.yml
istio delete -f istio/fluentd-istio.yml
Copy the code