Docker deploying Kafka cluster

Time:2021-2-12

Using docker, it is very convenient to build Kafka cluster on one machine and test it. In order to simplify the configuration process, docker compose is used to build.

The construction process of Kafka is as follows:

  1. Write docker- compose.yml The contents are as follows:
version: '3.3'

services:
  zookeeper:
    image: wurstmeister/zookeeper
    ports:
      - 2181:2181
    container_name: zookeeper
    networks:
      default:
        ipv4_address: 172.19.0.11
  kafka0:
    image: wurstmeister/kafka
    depends_on:
      - zookeeper
    container_name: kafka0
    ports:
      - 9092:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:9092
      KAFKA_LISTENERS: PLAINTEXT://kafka0:9092
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_BROKER_ID: 0
    volumes:
      - /root/data/kafka0/data:/data
      - /root/data/kafka0/log:/datalog
    networks:
      default:
        ipv4_address: 172.19.0.12
  kafka1:
    image: wurstmeister/kafka
    depends_on:
      - zookeeper
    container_name: kafka1
    ports:
      - 9093:9093
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9093
      KAFKA_LISTENERS: PLAINTEXT://kafka1:9093
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_BROKER_ID: 1
    volumes:
      - /root/data/kafka1/data:/data
      - /root/data/kafka1/log:/datalog
    networks:
      default:
        ipv4_address: 172.19.0.13
  kafka2:
    image: wurstmeister/kafka
    depends_on:
      - zookeeper
    container_name: kafka2
    ports:
      - 9094:9094
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9094
      KAFKA_LISTENERS: PLAINTEXT://kafka2:9094
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_BROKER_ID: 2
    volumes:
      - /root/data/kafka2/data:/data
      - /root/data/kafka2/log:/datalog
    networks:
      default:
        ipv4_address: 172.19.0.14
  kafka-manager:
    image: sheepkiller/kafka-manager:latest
    restart: unless-stopped
    container_name: kafka-manager
    hostname: kafka-manager
    ports:
      - "9000:9000"
    Links: # connect the container created by this composition file
      - kafka1
      - kafka2
      - kafka3
    external_ Links: # connect containers other than the composition file
      - zookeeper
    environment:
      ZK_ Hosts: zoo1: 2181
      TZ: CST-8
networks:
  default:
    external:
      name: zookeeper_kafka
  1. Create subnet
    docker network create --subnet 172.19.0.0/16 --gateway 172.19.0.1 zookeeper_kafka
  2. Execute the docker compose command to build
    docker-compose -f docker-compose.yaml up -d
    inputdocker ps -aIf the command can see the three services we started and are running, it indicates that the deployment is successful
  1. Test Kafka
    inputdocker exec -it kafka0 bashEnter the kafka0 container and execute the following command to create topic
    cd /opt/kafka_2.13-2.6.0/bin/
    ./kafka-topics.sh --create --topic chat --partitions 5 --zookeeper 8.210.138.111:2181 --replication-factor 3
    Enter the following command to turn on the producer
    ./kafka-console-producer.sh --broker-list kafka0:9092 --topic chat
    Open another shell interface, enter the kafka2 container and execute the following command to open the consumer
    ./kafka-console-consumer.sh --bootstrap-server kafka2:9094 --topic chat --from-beginning

Go back to the producer shell and input the message to see if the same message will appear in the consumer shell. If it can appear, it means that the Kafka cluster is set up normally.

image.png

Kafka manager k8s installation

---
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: kafka-manager
  namespace: logging
  labels:
    name: kafka-manager
spec:
  replicas: 1
  selector:
    matchLabels:
      name: kafka-manager
  template:
    metadata:
      labels:
        app: kafka-manager
        name: kafka-manager
    spec:
      containers:
        - name: kafka-manager
          image: registry.cn-shenzhen.aliyuncs.com/zisefeizhu-baseimage/kafka:manager-latest
          ports:
            - containerPort: 9000
              protocol: TCP
          env:
            - name: ZK_HOSTS
              value: 8.210.138.111:2181
            - name: APPLICATION_SECRET
              value: letmein
            - name: TZ
              value: Asia/Shanghai
          imagePullPolicy: IfNotPresent
      restartPolicy: Always
      terminationGracePeriodSeconds: 30
      securityContext:
        runAsUser: 0
      schedulerName: default-scheduler
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
      maxSurge: 1
  revisionHistoryLimit: 7
  progressDeadlineSeconds: 600
  
 ---
 kind: Service
apiVersion: v1
metadata:
  name: kafka-manager
  namespace: logging
spec:
  ports:
    - protocol: TCP
      port: 9000
      targetPort: 9000
  selector:
    app: kafka-manager
  clusterIP: None
  type: ClusterIP
  sessionAffinity: None
  
  ---
  apiVersion: certmanager.k8s.io/v1alpha1
kind: ClusterIssuer
metadata:
  name: letsencrypt-kafka-zisefeizhu-cn
spec:
  acme:
    server: https://acme-v02.api.letsencrypt.org/directory
    email: [email protected]
    Privatekeysecretref: # indicates to which secret object the private key of this issuing authority will be stored
      name: letsencrypt-kafka-zisefeizhu-cn
    solvers:
      - selector:
          dnsNames:
            - 'kafka.zisefeizhu.cn'
        dns01:
          webhook:
            config:
              accessKeyId: LTAI4G6JfRFW7DzuMyRGHTS2
              accessKeySecretRef:
                key: accessKeySecret
                name: alidns-credentials
              regionId: "cn-shenzhen"
              ttl: 600
            groupName: certmanager.webhook.alidns
            solverName: alidns
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  annotations:
    kubernetes.io/ingress.class: "kong"
    certmanager.k8s.io/cluster-issuer: "letsencrypt-kafka-zisefeizhu-cn"
  name: kafka-manager
  namespace: logging
spec:
  tls:
    - hosts:
        - 'kafka.zisefeizhu.cn'
      secretName: kafka-zisefeizhu-cn-tls
  rules:
    - host: kafka.zisefeizhu.cn
      http:
        paths:
          - backend:
              serviceName: kafka-manager
              servicePort: 9000
            path: /

image.png