1. 架构

日志统计分析系统采用架构:

docker + kibana + logstash + elasticsearch + kafka + zookeeper + filebeat

2. 下载docker镜像

https://www.elastic.co/guide/index.html
https://www.elastic.co/guide/en/kibana/current/docker.html
https://www.elastic.co/guide/cn/kibana/current/docker.html
https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html

docker pull kibana:7.10.1
docker pull logstash:7.10.1
docker pull elasticsearch:7.10.1
docker pull confluentinc/cp-kafka:6.0.2
docker pull confluentinc/cp-zookeeper:6.0.2

3. 默认端口

  • kibana端口: 5601
  • logstash端口: 5044
  • elasticsearch端口: 9200
  • kafka端口: 9092
  • zookeeper端口: 2181

4. docker-compose.yml

创建目录:

mkdir -p /Users/xx/elk/elasticsearch/data
mkdir -p /Users/xx/elk/elasticsearch/plugins
mkdir -p /Users/xx/elk/logstash/pipeline
mkdir -p /Users/xx/elk/logstash/patterns

docker-compose.yml文件内容:

version: '3'
services:

  kibana:
    image: kibana:7.10.1
    container_name: kibana
    hostname: kibana
    restart: always
    links:
      - elasticsearch
    depends_on:
      - elasticsearch
    ports:
      - 5601:5601
    environment:
      TZ: Asia/Shanghai
      # - elasticsearch.hosts=http://elasticsearch:9200
      # - I18N_LOCALE=zh-CN
      # - XPACK_GRAPH_ENABLED=true
      # - TIMELION_ENABLED=true
      # - XPACK_MONITORING_COLLECTION_ENABLED="true"
      # SERVER_NAME: kibana
      # SERVER_HOST: 0.0.0.0
      ELASTICSEARCH_HOSTS: http://elasticsearch:9200
      SERVER_NAME: localhost
      SERVER_HOST: 0.0.0.0
      ELASTICSEARCH_URL: http://elasticsearch:9200/
      I18N_LOCALE: zh-CN
      XPACK_SECURITY_ENABLED: 'false'
      MONITORING_ENABLED: 'true'
    # volumes:
    #   - /Users/xx/elk/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml
    # networks:
    #   - default
  logstash:
    image: logstash:7.10.1
    container_name: logstash
    hostname: logstash
    restart: always
    depends_on:
      - elasticsearch
    ports:
      - 5044:5044
    links:
      - elasticsearch
      - kafka
    environment:
      TZ: Asia/Shanghai
    volumes:
      - /Users/xx/elk/logstash/pipeline:/usr/share/logstash/pipeline
      - /Users/xx/elk/logstash/patterns:/usr/share/logstash/patterns
    # networks:
    #   - default
  elasticsearch:
    image: elasticsearch:7.10.1
    container_name: elasticsearch
    hostname: elasticsearch
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
    environment:
      TZ: Asia/Shanghai
      bootstrap.memory_lock: 'true'
      cluster.name: docker-elk-cluster
      discovery.type: single-node
      xpack.security.enabled: 'false'
      ES_JAVA_OPTS: '-Xms512M -Xmx512M'
      http.cors.enabled: 'true'
      http.cors.allow-origin: '*'
      network.bind_host: 0.0.0.0

      # - bootstrap.memory_lock=true
      # - cluster.name=docker-elk-cluster
      # - discovery.type=single-node
      # - xpack.security.enabled=false
      # - "ES_JAVA_OPTS=-Xms512M -Xmx512M"
      # - "ES_JAVA_OPTS=-Xms1g -Xmx1g"
      # - http.cors.enabled=true
      # - http.cors.allow-origin=*
      # ES_JAVA_OPTS: "-Xms2g -Xmx2g"
      # discovery.type: single-node
    ports:
      - 9200:9200
    volumes:
      - /Users/xx/elk/elasticsearch/data:/usr/share/elasticsearch/data
      - /Users/xx/elk/elasticsearch/plugins:/usr/share/elasticsearch/plugins
    # networks:
    #   - default
  kafka:
    image: confluentinc/cp-kafka:6.0.2
    container_name: kafka
    hostname: kafka
    restart: always
    depends_on:
      - zookeeper
    links:
      - zookeeper
    ports:
      - 9092:9092
    environment:
      TZ: Asia/Shanghai
      KAFKA_BROKER_ID: 1
      KAFKA_LISTENERS: PLAINTEXT://:9092
      KAFKA_ADVERTISED_HOST_NAME: kafka
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
      KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
      # KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      # KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_HEAP_OPTS: "-Xmx512M -Xms16M"
      # KAFKA_ADVERTISED_PORT: 9092
      KAFKA_CREATE_TOPICS: "log:3:3"
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
    # volumes:
    #   - /etc/timezone:/etc/timezone
    #   - /etc/localtime:/etc/localtime
    # networks:
    #   - default
  zookeeper:
    image: confluentinc/cp-zookeeper:6.0.2
    container_name: zookeeper
    hostname: zookeeper
    restart: always
    ports:
      - 2181:2181
    environment:
      TZ: Asia/Shanghai
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000
    # networks:
    #   - default

networks:
  default:
    driver: bridge

5. 一键构建

后台启动,如果容器不存在根据镜像自动创建:

docker-compose up -d

停止容器并删除容器:

docker-compose down -v

启动集群容器,容器不存在就无法启动:

docker-compose start

停止集群容器:

docker-compose stop

重启集群容器:

docker-compose restart

查看集群容器状态:

docker-compose ps

查看日志:

docker-compose logs

6. 访问

kibana访问:http://127.0.0.1:5601

elasticsearch访问: http://127.0.0.1:9200

http://localhost:9200/_nodes?pretty=true