diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..e873dbb --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +# Ignore OS artifacts +**/.DS_Store diff --git a/.env b/.env new file mode 100644 index 0000000..9c0f25c --- /dev/null +++ b/.env @@ -0,0 +1 @@ +ELK_VERSION=7.11.0 diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..2858dda --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Declare files that will always have LF line endings on checkout. +*.sh text eol=lf \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..42d723d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +blank_issues_enabled: false +contact_links: +- name: Discuss the Elastic Stack + url: https://discuss.elastic.co + about: Please ask questions related to the usage of Elastic products in those forums. +- name: Docker Community Forums + url: https://forums.docker.com + about: Please ask questions related to the usage of Docker products in those forums. +- name: docker-elk Gitter chat room + url: https://gitter.im/deviantony/docker-elk + about: General questions regarding this project can also be asked in the chat. diff --git a/.github/ISSUE_TEMPLATE/issue_report.md b/.github/ISSUE_TEMPLATE/issue_report.md new file mode 100644 index 0000000..2d0841d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/issue_report.md @@ -0,0 +1,87 @@ +--- +name: Issue report +about: Report a problem with the docker-elk integration or its documentation. +--- + + + + +### Problem description + + + +### Extra information + +#### Stack configuration + + + +#### Docker setup + + + +```console +$ docker version + +[OUTPUT HERE] +``` + + + +```console +$ docker-compose version + +[OUTPUT HERE] +``` + +#### Container logs + + + +```console +$ docker-compose logs + +[OUTPUT HERE] +``` diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..bf8a326 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,293 @@ +name: CI + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + + test-compose: + name: 'Test suite: Compose' + # List of supported runners: + # https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-runners-and-hardware-resources + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + ##################################################### + # # + # Install all dependencies required by test suites. # + # # + ##################################################### + + - name: Prepare environment + run: | + + # Install Linux packages + # + # List of packages pre-installed in the runner: + # https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-software + + sudo apt install -y expect + + # Pre-build container images + + docker-compose build + + ######################################################## + # # + # Ensure §"Initial setup" of the README remains valid. # + # # + ######################################################## + + - name: Set password of every built-in user to 'testpasswd' + run: | + + # Change password of 'elastic' user from 'changeme' to 'testpasswd' in config files + + sed -i -e 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' -e 's/\(xpack.monitoring.elasticsearch.password:\) changeme/\1 testpasswd/g' logstash/config/logstash.yml + sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf + sed -i -e 's/\(elasticsearch.username:\) elastic/\1 kibana_system/g' -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' kibana/config/kibana.yml + sed -i -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' -e 's/\(secret_management.encryption_keys:\)/\1 [test-encrypt]/g' extensions/enterprise-search/config/enterprise-search.yml + sed -i 's/\(password:\) changeme/\1 testpasswd/g' extensions/apm-server/config/apm-server.yml + sed -i 's/\(password:\) changeme/\1 testpasswd/g' extensions/metricbeat/config/metricbeat.yml + + # Run Elasticsearch and wait for its availability + + docker-compose up -d elasticsearch + source .github/workflows/scripts/lib/testing.sh + poll_ready "$(container_id elasticsearch)" "http://$(service_ip elasticsearch):9200/" -u 'elastic:changeme' + + # Set passwords + + .github/workflows/scripts/elasticsearch-setup-passwords.exp + + ########################################################## + # # + # Test core components: Elasticsearch, Logstash, Kibana. # + # # + ########################################################## + + - name: Run the stack + run: docker-compose up -d + + - name: Execute core test suite + run: .github/workflows/scripts/run-tests-core.sh + + - name: 'debug: Display state and logs (core)' + # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#jobsjob_idif + # https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions#job-status-check-functions + if: always() + run: | + docker-compose ps + docker-compose logs elasticsearch + docker-compose logs logstash + docker-compose logs kibana + + ############################## + # # + # Test supported extensions. # + # # + ############################## + + # + # Logspout + # + + - name: Execute Logspout test suite + run: | + + # Set mandatory Logstash settings + + sed -i '$ a input { udp { port => 5000 codec => json } }' logstash/pipeline/logstash.conf + + # Run Logspout and execute tests. + # Logstash will be restarted as a result of building the Logspout + # image, so changes above will automatically take effect. + + docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml up -d logspout + .github/workflows/scripts/run-tests-logspout.sh + + # Revert changes to Logstash configuration + + sed -i '/input { udp { port => 5000 codec => json } }/d' logstash/pipeline/logstash.conf + + - name: 'debug: Display state and logs (Logspout)' + if: always() + run: | + docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml ps + docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml logs logspout + # next steps don't need Logspout + docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml stop logspout + # next steps don't need Logstash + docker-compose stop logstash + + # + # Enterprise Search + # + + - name: Execute Enterprise Search test suite + run: | + + # Set mandatory Elasticsearch settings + + sed -i '$ a xpack.security.authc.api_key.enabled: true' elasticsearch/config/elasticsearch.yml + + # Restart Elasticsearch for changes to take effect + + docker-compose restart elasticsearch + + # Run Enterprise Search and execute tests + + docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up -d enterprise-search + .github/workflows/scripts/run-tests-enterprise-search.sh + + # Revert changes to Elasticsearch configuration + + sed -i '/xpack.security.authc.api_key.enabled: true/d' elasticsearch/config/elasticsearch.yml + docker-compose restart elasticsearch + + - name: 'debug: Display state and logs (Enterprise Search)' + if: always() + run: | + docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml ps + docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml logs enterprise-search + # next steps don't need Enterprise Search + docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml stop enterprise-search + + # + # APM Server + # + + - name: Execute APM Server test suite + run: | + docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml up -d apm-server + .github/workflows/scripts/run-tests-apm-server.sh + + - name: 'debug: Display state and logs (APM Server)' + if: always() + run: | + docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml ps + docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml logs apm-server + # next steps don't need APM Server + docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml stop apm-server + + # + # Metricbeat + # + + - name: Execute Metricbeat test suite + run: | + docker-compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml up -d metricbeat + .github/workflows/scripts/run-tests-metricbeat.sh + + - name: 'debug: Display state and logs (Metricbeat)' + if: always() + run: | + docker-compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml ps + docker-compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml logs metricbeat + # next steps don't need Metricbeat + docker-compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml stop metricbeat + + ############## + # # + # Tear down. # + # # + ############## + + - name: Terminate all components + if: always() + run: >- + docker-compose + -f docker-compose.yml + -f extensions/logspout/logspout-compose.yml + -f extensions/enterprise-search/enterprise-search-compose.yml + -f extensions/apm-server/apm-server-compose.yml + -f extensions/metricbeat/metricbeat-compose.yml + down -v + + test-swarm: + name: 'Test suite: Swarm' + runs-on: ubuntu-latest + + env: + MODE: swarm + + steps: + - uses: actions/checkout@v2 + + ##################################################### + # # + # Install all dependencies required by test suites. # + # # + ##################################################### + + - name: Prepare environment + run: | + + # Install Linux packages + + sudo apt install -y expect + + # Enable Swarm mode + + docker swarm init + + ######################################################## + # # + # Ensure §"Initial setup" of the README remains valid. # + # # + ######################################################## + + - name: Set password of every built-in user to 'testpasswd' + run: | + + # Change password of 'elastic' user from 'changeme' to 'testpasswd' in config files + + sed -i -e 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' -e 's/\(xpack.monitoring.elasticsearch.password:\) changeme/\1 testpasswd/g' logstash/config/logstash.yml + sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf + sed -i -e 's/\(elasticsearch.username:\) elastic/\1 kibana_system/g' -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' kibana/config/kibana.yml + + # Run Elasticsearch and wait for its availability + + docker stack deploy -c ./docker-stack.yml elk + docker service scale elk_logstash=0 elk_kibana=0 + source .github/workflows/scripts/lib/testing.sh + poll_ready "$(container_id elasticsearch)" "http://$(service_ip elasticsearch):9200/" -u 'elastic:changeme' + + # Set passwords + + .github/workflows/scripts/elasticsearch-setup-passwords.exp swarm + + ########################################################## + # # + # Test core components: Elasticsearch, Logstash, Kibana. # + # # + ########################################################## + + - name: Run the stack + run: docker service scale elk_logstash=1 elk_kibana=1 + + - name: Execute core test suite + run: .github/workflows/scripts/run-tests-core.sh swarm + + - name: 'debug: Display state and logs (core)' + if: always() + run: | + docker stack services elk + docker service logs elk_elasticsearch + docker service logs elk_kibana + docker service logs elk_logstash + + ############## + # # + # Tear down. # + # # + ############## + + - name: Terminate all components + if: always() + run: docker stack rm elk diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..c50939f --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,29 @@ +name: Documentation + +on: + schedule: + - cron: '0 0 * * 0' # At 00:00 every Sunday + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + + markdown-check: + name: Check Markdown + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Check links + uses: gaurav-nelson/github-action-markdown-link-check@v1 + with: + config-file: .github/workflows/mlc_config.json + + - name: Lint + uses: avto-dev/markdown-lint@v1 + with: + args: '**/*.md' + config: .github/workflows/lint/markdown.yaml diff --git a/.github/workflows/lint/markdown.yaml b/.github/workflows/lint/markdown.yaml new file mode 100644 index 0000000..dd9e569 --- /dev/null +++ b/.github/workflows/lint/markdown.yaml @@ -0,0 +1,152 @@ +default: false # includes/excludes all rules by default + +# Heading levels should only increment by one level at a time +MD001: true + +# Heading style +MD003: true + +# Unordered list style +MD004: true + +# Inconsistent indentation for list items at the same level +MD005: true + +# Consider starting bulleted lists at the beginning of the line +MD006: true + +# Unordered list indentation +MD007: true + +# Trailing spaces +MD009: true + +# Hard tabs +MD010: true + +# Reversed link syntax +MD011: true + +# Multiple consecutive blank lines +MD012: true + +# Line length +MD013: + line_length: 120 + code_blocks: false + +# Dollar signs used before commands without showing output +MD014: false + +# No space after hash on atx style heading +MD018: true + +# Multiple spaces after hash on atx style heading +MD019: true + +# No space inside hashes on closed atx style heading +MD020: true + +# Multiple spaces inside hashes on closed atx style heading +MD021: true + +# Headings should be surrounded by blank lines +MD022: true + +# Headings must start at the beginning of the line +MD023: true + +# Multiple headings with the same content +MD024: + allow_different_nesting: true + +# Multiple top level headings in the same document +MD025: true + +# Trailing punctuation in heading +MD026: true + +# Multiple spaces after blockquote symbol +MD027: true + +# Blank line inside blockquote +MD028: false + +# Ordered list item prefix +MD029: + style: 'one' + +# Spaces after list markers +MD030: true + +# Fenced code blocks should be surrounded by blank lines +MD031: true + +# Lists should be surrounded by blank lines +MD032: true + +# Inline HTML +MD033: true + +# Bare URL used +MD034: true + +# Horizontal rule style +MD035: + style: '---' + +# Emphasis used instead of a heading +MD036: true + +# Spaces inside emphasis markers +MD037: true + +# Spaces inside code span elements +MD038: true + +# Spaces inside link text +MD039: true + +# Fenced code blocks should have a language specified +MD040: true + +# First line in file should be a top level heading +MD041: true + +# No empty links +MD042: true + +# Required heading structure +MD043: false + +# Proper names should have the correct capitalization +MD044: + names: + - docker-elk + - Elasticsearch + - Logstash + - Kibana + - Docker + - Compose + - macOS + code_blocks: false + +# Images should have alternate text (alt text) +MD045: true + +# Code block style +MD046: + style: fenced + +# Files should end with a single newline character +MD047: true + +# Code fence style +MD048: + style: 'backtick' + +# Custom rules: +CHANGELOG-RULE-001: true +CHANGELOG-RULE-002: true +CHANGELOG-RULE-003: true +CHANGELOG-RULE-004: true diff --git a/.github/workflows/mlc_config.json b/.github/workflows/mlc_config.json new file mode 100644 index 0000000..6b37a94 --- /dev/null +++ b/.github/workflows/mlc_config.json @@ -0,0 +1,5 @@ +{ + "ignorePatterns": [ + { "pattern": "^http:\/\/localhost:" } + ] +} diff --git a/.github/workflows/scripts/elasticsearch-setup-passwords.exp b/.github/workflows/scripts/elasticsearch-setup-passwords.exp new file mode 100755 index 0000000..3061f92 --- /dev/null +++ b/.github/workflows/scripts/elasticsearch-setup-passwords.exp @@ -0,0 +1,28 @@ +#!/usr/bin/expect -f + +# List of expected users with dummy password +set user "(elastic|apm_system|kibana_system|logstash_system|beats_system|remote_monitoring_user)" +set password "testpasswd" + +# Find elasticsearch container id +set MODE [lindex $argv 0] +if { [string match "swarm" $MODE] } { + set cid [exec docker ps -q -f label=com.docker.swarm.service.name=elk_elasticsearch] +} else { + set cid [exec docker ps -q -f label=com.docker.compose.service=elasticsearch] +} + +set cmd "docker exec -it $cid bin/elasticsearch-setup-passwords interactive -s -b -u http://localhost:9200" + +spawn {*}$cmd + +expect { + -re "(E|Ree)nter password for \\\[$user\\\]: " { + send "$password\r" + exp_continue + } + eof +} + +lassign [wait] pid spawnid os_error_flag value +exit $value diff --git a/.github/workflows/scripts/lib/testing.sh b/.github/workflows/scripts/lib/testing.sh new file mode 100755 index 0000000..913c9d4 --- /dev/null +++ b/.github/workflows/scripts/lib/testing.sh @@ -0,0 +1,114 @@ +#!/usr/bin/env bash + +# Log a message. +function log { + echo -e "\n[+] $1\n" +} + +# Log an error. +function err { + echo -e "\n[x] $1\n" >&2 +} + +# Return the ID of the container running the given service. +function container_id { + local svc=$1 + + local label + if [[ "${MODE:-}" == "swarm" ]]; then + label="com.docker.swarm.service.name=elk_${svc}" + else + label="com.docker.compose.service=${svc}" + fi + + local cid + + # retry for max 60s (30*2s) + for _ in $(seq 1 30); do + cid="$(docker container ls -aq -f label="$label")" + if [ -n "$cid" ]; then + break + fi + + echo -n '.' >&2 + sleep 2 + done + + if [ -z "${cid:-}" ]; then + err "Timed out waiting for creation of container with label ${label}" + return 1 + fi + + echo "$cid" +} + +# Return the IP address at which a service can be reached. +# In Compose mode, returns the container's IP. +# In Swarm mode, returns the IP of the node to ensure traffic enters the routing mesh (ingress). +function service_ip { + local svc=$1 + + local ip + + if [[ "${MODE:-}" == "swarm" ]]; then + #ingress_net="$(docker network inspect ingress --format '{{ .Id }}')" + #ip="$(docker service inspect elk_"$svc" --format "{{ range .Endpoint.VirtualIPs }}{{ if eq .NetworkID \"${ingress_net}\" }}{{ .Addr }}{{ end }}{{ end }}" | cut -d/ -f1)" + node="$(docker node ls --format '{{ .ID }}')" + ip="$(docker node inspect "$node" --format '{{ .Status.Addr }}')" + if [ -z "${ip:-}" ]; then + err "Node ${node} has no IP address" + return 1 + fi + + echo "$ip" + return + fi + + local cid + cid="$(container_id "$svc")" + + ip="$(docker container inspect "$cid" --format '{{ (index .NetworkSettings.Networks "docker-elk_elk").IPAddress }}')" + if [ -z "${ip:-}" ]; then + err "Container ${cid} has no IP address" + return 1 + fi + + echo "$ip" +} + +# Poll the given service at the given port:/path until it responds with HTTP code 200. +function poll_ready { + local cid=$1 + local url=$2 + + local -a args=( '-s' '-D-' '-m3' '-w' '%{http_code}' "$url" ) + if [ "$#" -ge 3 ]; then + args+=( ${@:3} ) + fi + + echo "curl arguments: ${args[*]}" + + local -i result=1 + local output + + # retry for max 180s (36*5s) + for _ in $(seq 1 36); do + if [[ $(docker container inspect "$cid" --format '{{ .State.Status}}') == 'exited' ]]; then + err "Container exited ($(docker container inspect "$cid" --format '{{ .Name }}'))" + return 1 + fi + + output="$(curl "${args[@]}" || true)" + if [ "${output: -3}" -eq 200 ]; then + result=0 + break + fi + + echo -n 'x' >&2 + sleep 5 + done + + echo -e "\n${output::-3}" + + return $result +} diff --git a/.github/workflows/scripts/run-tests-apm-server.sh b/.github/workflows/scripts/run-tests-apm-server.sh new file mode 100755 index 0000000..f3d135d --- /dev/null +++ b/.github/workflows/scripts/run-tests-apm-server.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" + + +cid="$(container_id apm-server)" +ip="$(service_ip apm-server)" + +log 'Waiting for readiness of APM Server' +poll_ready "$cid" "http://${ip}:8200/" diff --git a/.github/workflows/scripts/run-tests-core.sh b/.github/workflows/scripts/run-tests-core.sh new file mode 100755 index 0000000..d4172a3 --- /dev/null +++ b/.github/workflows/scripts/run-tests-core.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" + + +cid_es="$(container_id elasticsearch)" +cid_ls="$(container_id logstash)" +cid_kb="$(container_id kibana)" + +ip_es="$(service_ip elasticsearch)" +ip_ls="$(service_ip logstash)" +ip_kb="$(service_ip kibana)" + +log 'Waiting for readiness of Elasticsearch' +poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' + +log 'Waiting for readiness of Logstash' +poll_ready "$cid_ls" "http://${ip_ls}:9600/_node/pipelines/main?pretty" + +log 'Waiting for readiness of Kibana' +poll_ready "$cid_kb" "http://${ip_kb}:5601/api/status" -u 'kibana_system:testpasswd' + +log 'Creating Logstash index pattern in Kibana' +source .env +curl -X POST -D- "http://${ip_kb}:5601/api/saved_objects/index-pattern" \ + -s -w '\n' \ + -H 'Content-Type: application/json' \ + -H "kbn-version: ${ELK_VERSION}" \ + -u elastic:testpasswd \ + -d '{"attributes":{"title":"logstash-*","timeFieldName":"@timestamp"}}' + +log 'Searching index pattern via Kibana API' +response="$(curl "http://${ip_kb}:5601/api/saved_objects/_find?type=index-pattern" -s -u elastic:testpasswd)" +echo "$response" +count="$(jq -rn --argjson data "${response}" '$data.total')" +if [[ $count -ne 1 ]]; then + echo "Expected 1 index pattern, got ${count}" + exit 1 +fi + +log 'Sending message to Logstash TCP input' +echo 'dockerelk' | nc -q0 "$ip_ls" 5000 + +sleep 1 +curl -X POST "http://${ip_es}:9200/_refresh" -u elastic:testpasswd \ + -s -w '\n' + +log 'Searching message in Elasticsearch' +response="$(curl "http://${ip_es}:9200/_count?q=message:dockerelk&pretty" -s -u elastic:testpasswd)" +echo "$response" +count="$(jq -rn --argjson data "${response}" '$data.count')" +if [[ $count -ne 1 ]]; then + echo "Expected 1 document, got ${count}" + exit 1 +fi diff --git a/.github/workflows/scripts/run-tests-enterprise-search.sh b/.github/workflows/scripts/run-tests-enterprise-search.sh new file mode 100755 index 0000000..fb1ab99 --- /dev/null +++ b/.github/workflows/scripts/run-tests-enterprise-search.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" + + +cid_es="$(container_id elasticsearch)" +cid_en="$(container_id enterprise-search)" + +ip_es="$(service_ip elasticsearch)" +ip_en="$(service_ip enterprise-search)" + +log 'Waiting for readiness of Elasticsearch' +poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' + +log 'Waiting for readiness of Enterprise Search' +poll_ready "$cid_en" "http://${ip_en}:3002/api/ent/v1/internal/health" -u 'elastic:testpasswd' + +log 'Ensuring that App Search API keys were created in Elasticsearch' +response="$(curl "http://${ip_es}:9200/.ent-search-actastic-app_search_api_tokens_v3/_count?pretty" -s -u elastic:testpasswd)" +echo "$response" +declare -i count +count="$(jq -rn --argjson data "${response}" '$data.count')" +if (( count != 2)); then + echo "Expected search and private keys, got ${count} result(s)" + exit 1 +fi diff --git a/.github/workflows/scripts/run-tests-logspout.sh b/.github/workflows/scripts/run-tests-logspout.sh new file mode 100755 index 0000000..f2e9dde --- /dev/null +++ b/.github/workflows/scripts/run-tests-logspout.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" + + +cid_es="$(container_id elasticsearch)" +cid_ls="$(container_id logspout)" + +ip_es="$(service_ip elasticsearch)" +ip_ls="$(service_ip logspout)" + +log 'Waiting for readiness of Elasticsearch' +poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' + +log 'Waiting for readiness of Logspout' +poll_ready "$cid_ls" "http://${ip_ls}/health" + +# When Logspout starts, it prints the following log line: +# 2021/01/07 16:14:52 # logspout v3.2.13-custom by gliderlabs +# +# which we expect to find by querying: +# docker.image:"docker-elk_logspout" AND message:"logspout gliderlabs"~3 +# +log 'Searching a log entry forwarded by Logspout' + +declare response +declare -i count + +# retry for max 60s (30*2s) +for _ in $(seq 1 30); do + response="$(curl "http://${ip_es}:9200/_count?q=docker.image:%22docker-elk_logspout%22%20AND%20message:%22logspout%20gliderlabs%22~3&pretty" -s -u elastic:testpasswd)" + count="$(jq -rn --argjson data "${response}" '$data.count')" + if [[ $count -gt 0 ]]; then + break + fi + + echo -n 'x' >&2 + sleep 2 +done +echo -e '\n' >&2 + +echo "$response" +# Logspout may restart if Logstash isn't ready yet, so we tolerate multiple +# results +if [[ $count -lt 1 ]]; then + echo "Expected at least 1 document, got ${count}" + exit 1 +fi diff --git a/.github/workflows/scripts/run-tests-metricbeat.sh b/.github/workflows/scripts/run-tests-metricbeat.sh new file mode 100755 index 0000000..6dc99c0 --- /dev/null +++ b/.github/workflows/scripts/run-tests-metricbeat.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" + + +cid_es="$(container_id elasticsearch)" +cid_mb="$(container_id metricbeat)" + +ip_es="$(service_ip elasticsearch)" +ip_mb="$(service_ip metricbeat)" + +log 'Waiting for readiness of Elasticsearch' +poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' + +log 'Waiting for readiness of Metricbeat' +poll_ready "$cid_mb" "http://${ip_mb}:5066/?pretty" + +# We expect to find one monitoring entry for the 'elasticsearch' Compose +# service using the following query: +# +# agent.type:"metricbeat" +# AND event.module:"docker" +# AND event.dataset:"docker.container" +# AND container.name:"docker-elk_elasticsearch_1" +# +log 'Searching a document generated by Metricbeat' + +declare response +declare -i count + +# retry for max 60s (30*2s) +for _ in $(seq 1 30); do + response="$(curl "http://${ip_es}:9200/metricbeat-*/_search?q=agent.type:%22metricbeat%22%20AND%20event.module:%22docker%22%20AND%20event.dataset:%22docker.container%22%20AND%20container.name:%22docker-elk_elasticsearch_1%22&pretty" -s -u elastic:testpasswd)" + count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')" + if (( count > 0 )); then + break + fi + + echo -n 'x' >&2 + sleep 2 +done +echo -e '\n' >&2 + +echo "$response" +if (( count != 1 )); then + echo "Expected 1 document, got ${count}" + exit 1 +fi diff --git a/.github/workflows/update.yml b/.github/workflows/update.yml new file mode 100644 index 0000000..d9b2480 --- /dev/null +++ b/.github/workflows/update.yml @@ -0,0 +1,45 @@ +name: Update Elastic release + +on: + schedule: + - cron: '0 0 * * 0' # At 00:00 every Sunday + +jobs: + + check-and-update: + name: Check and update Elastic release + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Get latest release version + uses: octokit/request-action@v2.x + id: get_latest_release + with: + route: GET /repos/:repository/releases/latest + repository: elastic/elasticsearch + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Update stack version + run: | + source .env + cur_ver="$ELK_VERSION" + new_ver=${{ fromJson(steps.get_latest_release.outputs.data).tag_name }} + + # Escape dot characters so sed interprets them as literal dots + cur_ver="$(echo $cur_ver | sed 's/\./\\./g')" + # Trim leading 'v' in semantic version + new_ver="${new_ver:1}" + + for f in .env docker-stack.yml README.md; do + sed -i "s/${cur_ver}/${new_ver}/g" "$f" + done + + - name: Send pull request to update to new version + uses: peter-evans/create-pull-request@v3 + with: + commit-message: Update to ${{ fromJson(steps.get_latest_release.outputs.data).tag_name }} + title: Update to ${{ fromJson(steps.get_latest_release.outputs.data).tag_name }} + delete-branch: true diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..0dbd69f --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Anthony Lapenna + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..45afe09 --- /dev/null +++ b/README.md @@ -0,0 +1,456 @@ +# Elastic stack (ELK) on Docker + +[![Elastic Stack version](https://img.shields.io/badge/Elastic%20Stack-7.11.0-00bfb3?style=flat&logo=elastic-stack)](https://www.elastic.co/blog/category/releases) +[![Build Status](https://github.com/deviantony/docker-elk/workflows/CI/badge.svg?branch=main)](https://github.com/deviantony/docker-elk/actions?query=workflow%3ACI+branch%3Amain) +[![Join the chat at https://gitter.im/deviantony/docker-elk](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/deviantony/docker-elk?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +Run the latest version of the [Elastic stack][elk-stack] with Docker and Docker Compose. + +It gives you the ability to analyze any data set by using the searching/aggregation capabilities of Elasticsearch and +the visualization power of Kibana. + +*:information_source: The Docker images backing this stack include [X-Pack][xpack] with [paid features][paid-features] +enabled by default (see [How to disable paid features](#how-to-disable-paid-features) to disable them). **The [trial +license][trial-license] is valid for 30 days**. After this license expires, you can continue using the free features +seamlessly, without losing any data.* + +Based on the official Docker images from Elastic: + +* [Elasticsearch](https://github.com/elastic/elasticsearch/tree/master/distribution/docker) +* [Logstash](https://github.com/elastic/logstash/tree/master/docker) +* [Kibana](https://github.com/elastic/kibana/tree/master/src/dev/build/tasks/os_packages/docker_generator) + +Other available stack variants: + +* [`tls`](https://github.com/deviantony/docker-elk/tree/tls): TLS encryption enabled in Elasticsearch. +* [`searchguard`](https://github.com/deviantony/docker-elk/tree/searchguard): Search Guard support + +--- + +## Philosophy + +We aim at providing the simplest possible entry into the Elastic stack for anybody who feels like experimenting with +this powerful combo of technologies. This project's default configuration is purposely minimal and unopinionated. It +does not rely on any external dependency or custom automation to get things up and running. + +Instead, we believe in good documentation so that you can use this repository as a template, tweak it, and make it _your +own_. [sherifabdlnaby/elastdocker][elastdocker] is one example among others of project that builds upon this idea. + +--- + +## Contents + +1. [Requirements](#requirements) + * [Host setup](#host-setup) + * [SELinux](#selinux) + * [Docker for Desktop](#docker-for-desktop) + * [Windows](#windows) + * [macOS](#macos) +1. [Usage](#usage) + * [Version selection](#version-selection) + * [Bringing up the stack](#bringing-up-the-stack) + * [Cleanup](#cleanup) + * [Initial setup](#initial-setup) + * [Setting up user authentication](#setting-up-user-authentication) + * [Injecting data](#injecting-data) + * [Default Kibana index pattern creation](#default-kibana-index-pattern-creation) +1. [Configuration](#configuration) + * [How to configure Elasticsearch](#how-to-configure-elasticsearch) + * [How to configure Kibana](#how-to-configure-kibana) + * [How to configure Logstash](#how-to-configure-logstash) + * [How to disable paid features](#how-to-disable-paid-features) + * [How to scale out the Elasticsearch cluster](#how-to-scale-out-the-elasticsearch-cluster) + * [How to reset a password programmatically](#how-to-reset-a-password-programmatically) +1. [Extensibility](#extensibility) + * [How to add plugins](#how-to-add-plugins) + * [How to enable the provided extensions](#how-to-enable-the-provided-extensions) +1. [JVM tuning](#jvm-tuning) + * [How to specify the amount of memory used by a service](#how-to-specify-the-amount-of-memory-used-by-a-service) + * [How to enable a remote JMX connection to a service](#how-to-enable-a-remote-jmx-connection-to-a-service) +1. [Going further](#going-further) + * [Plugins and integrations](#plugins-and-integrations) + * [Swarm mode](#swarm-mode) + +## Requirements + +### Host setup + +* [Docker Engine](https://docs.docker.com/install/) version **17.05** or newer +* [Docker Compose](https://docs.docker.com/compose/install/) version **1.20.0** or newer +* 1.5 GB of RAM + +*:information_source: Especially on Linux, make sure your user has the [required permissions][linux-postinstall] to +interact with the Docker daemon.* + +By default, the stack exposes the following ports: + +* 5044: Logstash Beats input +* 5000: Logstash TCP input +* 9600: Logstash monitoring API +* 9200: Elasticsearch HTTP +* 9300: Elasticsearch TCP transport +* 5601: Kibana + +**:warning: Elasticsearch's [bootstrap checks][booststap-checks] were purposely disabled to facilitate the setup of the +Elastic stack in development environments. For production setups, we recommend users to set up their host according to +the instructions from the Elasticsearch documentation: [Important System Configuration][es-sys-config].** + +### SELinux + +On distributions which have SELinux enabled out-of-the-box you will need to either re-context the files or set SELinux +into Permissive mode in order for docker-elk to start properly. For example on Redhat and CentOS, the following will +apply the proper context: + +```console +$ chcon -R system_u:object_r:admin_home_t:s0 docker-elk/ +``` + +### Docker for Desktop + +#### Windows + +Ensure the [Shared Drives][win-shareddrives] feature is enabled for the `C:` drive. + +#### macOS + +The default Docker for Mac configuration allows mounting files from `/Users/`, `/Volumes/`, `/private/`, and `/tmp` +exclusively. Make sure the repository is cloned in one of those locations or follow the instructions from the +[documentation][mac-mounts] to add more locations. + +## Usage + +### Version selection + +This repository tries to stay aligned with the latest version of the Elastic stack. The `main` branch tracks the current +major version (7.x). + +To use a different version of the core Elastic components, simply change the version number inside the `.env` file. If +you are upgrading an existing stack, please carefully read the note in the next section. + +**:warning: Always pay attention to the [official upgrade instructions][upgrade] for each individual component before +performing a stack upgrade.** + +Older major versions are also supported on separate branches: + +* [`release-6.x`](https://github.com/deviantony/docker-elk/tree/release-6.x): 6.x series +* [`release-5.x`](https://github.com/deviantony/docker-elk/tree/release-5.x): 5.x series (End-Of-Life) + +### Bringing up the stack + +Clone this repository onto the Docker host that will run the stack, then start services locally using Docker Compose: + +```console +$ docker-compose up +``` + +You can also run all services in the background (detached mode) by adding the `-d` flag to the above command. + +**:warning: You must rebuild the stack images with `docker-compose build` whenever you switch branch or update the +version of an already existing stack.** + +If you are starting the stack for the very first time, please read the section below attentively. + +### Cleanup + +Elasticsearch data is persisted inside a volume by default. + +In order to entirely shutdown the stack and remove all persisted data, use the following Docker Compose command: + +```console +$ docker-compose down -v +``` + +## Initial setup + +### Setting up user authentication + +*:information_source: Refer to [How to disable paid features](#how-to-disable-paid-features) to disable authentication.* + +The stack is pre-configured with the following **privileged** bootstrap user: + +* user: *elastic* +* password: *changeme* + +Although all stack components work out-of-the-box with this user, we strongly recommend using the unprivileged [built-in +users][builtin-users] instead for increased security. + +1. Initialize passwords for built-in users + + ```console + $ docker-compose exec -T elasticsearch bin/elasticsearch-setup-passwords auto --batch + ``` + + Passwords for all 6 built-in users will be randomly generated. Take note of them. + +1. Unset the bootstrap password (_optional_) + + Remove the `ELASTIC_PASSWORD` environment variable from the `elasticsearch` service inside the Compose file + (`docker-compose.yml`). It is only used to initialize the keystore during the initial startup of Elasticsearch. + +1. Replace usernames and passwords in configuration files + + Use the `kibana_system` user (`kibana` for releases <7.8.0) inside the Kibana configuration file + (`kibana/config/kibana.yml`) and the `logstash_system` user inside the Logstash configuration file + (`logstash/config/logstash.yml`) in place of the existing `elastic` user. + + Replace the password for the `elastic` user inside the Logstash pipeline file (`logstash/pipeline/logstash.conf`). + + *:information_source: Do not use the `logstash_system` user inside the Logstash **pipeline** file, it does not have + sufficient permissions to create indices. Follow the instructions at [Configuring Security in Logstash][ls-security] + to create a user with suitable roles.* + + See also the [Configuration](#configuration) section below. + +1. Restart Kibana and Logstash to apply changes + + ```console + $ docker-compose restart kibana logstash + ``` + + *:information_source: Learn more about the security of the Elastic stack at [Tutorial: Getting started with + security][sec-tutorial].* + +### Injecting data + +Give Kibana about a minute to initialize, then access the Kibana web UI by opening in a web +browser and use the following credentials to log in: + +* user: *elastic* +* password: *\* + +Now that the stack is running, you can go ahead and inject some log entries. The shipped Logstash configuration allows +you to send content via TCP: + +```console +# Using BSD netcat (Debian, Ubuntu, MacOS system, ...) +$ cat /path/to/logfile.log | nc -q0 localhost 5000 +``` + +```console +# Using GNU netcat (CentOS, Fedora, MacOS Homebrew, ...) +$ cat /path/to/logfile.log | nc -c localhost 5000 +``` + +You can also load the sample data provided by your Kibana installation. + +### Default Kibana index pattern creation + +When Kibana launches for the first time, it is not configured with any index pattern. + +#### Via the Kibana web UI + +*:information_source: You need to inject data into Logstash before being able to configure a Logstash index pattern via +the Kibana web UI.* + +Navigate to the _Discover_ view of Kibana from the left sidebar. You will be prompted to create an index pattern. Enter +`logstash-*` to match Logstash indices then, on the next page, select `@timestamp` as the time filter field. Finally, +click _Create index pattern_ and return to the _Discover_ view to inspect your log entries. + +Refer to [Connect Kibana with Elasticsearch][connect-kibana] and [Creating an index pattern][index-pattern] for detailed +instructions about the index pattern configuration. + +#### On the command line + +Create an index pattern via the Kibana API: + +```console +$ curl -XPOST -D- 'http://localhost:5601/api/saved_objects/index-pattern' \ + -H 'Content-Type: application/json' \ + -H 'kbn-version: 7.11.0' \ + -u elastic: \ + -d '{"attributes":{"title":"logstash-*","timeFieldName":"@timestamp"}}' +``` + +The created pattern will automatically be marked as the default index pattern as soon as the Kibana UI is opened for the +first time. + +## Configuration + +*:information_source: Configuration is not dynamically reloaded, you will need to restart individual components after +any configuration change.* + +### How to configure Elasticsearch + +The Elasticsearch configuration is stored in [`elasticsearch/config/elasticsearch.yml`][config-es]. + +You can also specify the options you want to override by setting environment variables inside the Compose file: + +```yml +elasticsearch: + + environment: + network.host: _non_loopback_ + cluster.name: my-cluster +``` + +Please refer to the following documentation page for more details about how to configure Elasticsearch inside Docker +containers: [Install Elasticsearch with Docker][es-docker]. + +### How to configure Kibana + +The Kibana default configuration is stored in [`kibana/config/kibana.yml`][config-kbn]. + +It is also possible to map the entire `config` directory instead of a single file. + +Please refer to the following documentation page for more details about how to configure Kibana inside Docker +containers: [Install Kibana with Docker][kbn-docker]. + +### How to configure Logstash + +The Logstash configuration is stored in [`logstash/config/logstash.yml`][config-ls]. + +It is also possible to map the entire `config` directory instead of a single file, however you must be aware that +Logstash will be expecting a [`log4j2.properties`][log4j-props] file for its own logging. + +Please refer to the following documentation page for more details about how to configure Logstash inside Docker +containers: [Configuring Logstash for Docker][ls-docker]. + +### How to disable paid features + +Switch the value of Elasticsearch's `xpack.license.self_generated.type` option from `trial` to `basic` (see [License +settings][trial-license]). + +### How to scale out the Elasticsearch cluster + +Follow the instructions from the Wiki: [Scaling out Elasticsearch](https://github.com/deviantony/docker-elk/wiki/Elasticsearch-cluster) + +### How to reset a password programmatically + +If for any reason your are unable to use Kibana to change the password of your users (including [built-in +users][builtin-users]), you can use the Elasticsearch API instead and achieve the same result. + +In the example below, we reset the password of the `elastic` user (notice "/user/elastic" in the URL): + +```console +$ curl -XPOST -D- 'http://localhost:9200/_security/user/elastic/_password' \ + -H 'Content-Type: application/json' \ + -u elastic: \ + -d '{"password" : ""}' +``` + +## Extensibility + +### How to add plugins + +To add plugins to any ELK component you have to: + +1. Add a `RUN` statement to the corresponding `Dockerfile` (eg. `RUN logstash-plugin install logstash-filter-json`) +1. Add the associated plugin code configuration to the service configuration (eg. Logstash input/output) +1. Rebuild the images using the `docker-compose build` command + +### How to enable the provided extensions + +A few extensions are available inside the [`extensions`](extensions) directory. These extensions provide features which +are not part of the standard Elastic stack, but can be used to enrich it with extra integrations. + +The documentation for these extensions is provided inside each individual subdirectory, on a per-extension basis. Some +of them require manual changes to the default ELK configuration. + +## JVM tuning + +### How to specify the amount of memory used by a service + +By default, both Elasticsearch and Logstash start with [1/4 of the total host +memory](https://docs.oracle.com/javase/8/docs/technotes/guides/vm/gctuning/parallel.html#default_heap_size) allocated to +the JVM Heap Size. + +The startup scripts for Elasticsearch and Logstash can append extra JVM options from the value of an environment +variable, allowing the user to adjust the amount of memory that can be used by each component: + +| Service | Environment variable | +|---------------|----------------------| +| Elasticsearch | ES_JAVA_OPTS | +| Logstash | LS_JAVA_OPTS | + +To accomodate environments where memory is scarce (Docker for Mac has only 2 GB available by default), the Heap Size +allocation is capped by default to 256MB per service in the `docker-compose.yml` file. If you want to override the +default JVM configuration, edit the matching environment variable(s) in the `docker-compose.yml` file. + +For example, to increase the maximum JVM Heap Size for Logstash: + +```yml +logstash: + + environment: + LS_JAVA_OPTS: -Xmx1g -Xms1g +``` + +### How to enable a remote JMX connection to a service + +As for the Java Heap memory (see above), you can specify JVM options to enable JMX and map the JMX port on the Docker +host. + +Update the `{ES,LS}_JAVA_OPTS` environment variable with the following content (I've mapped the JMX service on the port +18080, you can change that). Do not forget to update the `-Djava.rmi.server.hostname` option with the IP address of your +Docker host (replace **DOCKER_HOST_IP**): + +```yml +logstash: + + environment: + LS_JAVA_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=18080 -Dcom.sun.management.jmxremote.rmi.port=18080 -Djava.rmi.server.hostname=DOCKER_HOST_IP -Dcom.sun.management.jmxremote.local.only=false +``` + +## Going further + +### Plugins and integrations + +See the following Wiki pages: + +* [External applications](https://github.com/deviantony/docker-elk/wiki/External-applications) +* [Popular integrations](https://github.com/deviantony/docker-elk/wiki/Popular-integrations) + +### Swarm mode + +Experimental support for Docker [Swarm mode][swarm-mode] is provided in the form of a `docker-stack.yml` file, which can +be deployed in an existing Swarm cluster using the following command: + +```console +$ docker stack deploy -c docker-stack.yml elk +``` + +If all components get deployed without any error, the following command will show 3 running services: + +```console +$ docker stack services elk +``` + +*:information_source: To scale Elasticsearch in Swarm mode, configure seed hosts with the DNS name `tasks.elasticsearch` +instead of `elasticsearch`.* + +[elk-stack]: https://www.elastic.co/what-is/elk-stack +[xpack]: https://www.elastic.co/what-is/open-x-pack +[paid-features]: https://www.elastic.co/subscriptions +[trial-license]: https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html + +[elastdocker]: https://github.com/sherifabdlnaby/elastdocker + +[linux-postinstall]: https://docs.docker.com/install/linux/linux-postinstall/ + +[booststap-checks]: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html +[es-sys-config]: https://www.elastic.co/guide/en/elasticsearch/reference/current/system-config.html + +[win-shareddrives]: https://docs.docker.com/docker-for-windows/#shared-drives +[mac-mounts]: https://docs.docker.com/docker-for-mac/osxfs/ + +[builtin-users]: https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html +[ls-security]: https://www.elastic.co/guide/en/logstash/current/ls-security.html +[sec-tutorial]: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-getting-started.html + +[connect-kibana]: https://www.elastic.co/guide/en/kibana/current/connect-to-elasticsearch.html +[index-pattern]: https://www.elastic.co/guide/en/kibana/current/index-patterns.html + +[config-es]: ./elasticsearch/config/elasticsearch.yml +[config-kbn]: ./kibana/config/kibana.yml +[config-ls]: ./logstash/config/logstash.yml + +[es-docker]: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html +[kbn-docker]: https://www.elastic.co/guide/en/kibana/current/docker.html +[ls-docker]: https://www.elastic.co/guide/en/logstash/current/docker-config.html + +[log4j-props]: https://github.com/elastic/logstash/tree/7.6/docker/data/logstash/config +[esuser]: https://github.com/elastic/elasticsearch/blob/7.6/distribution/docker/src/docker/Dockerfile#L23-L24 + +[upgrade]: https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html + +[swarm-mode]: https://docs.docker.com/engine/swarm/ diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..669e337 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,77 @@ +version: '3.2' + +services: + elasticsearch: + build: + context: elasticsearch/ + args: + ELK_VERSION: $ELK_VERSION + volumes: + - type: bind + source: ./elasticsearch/config/elasticsearch.yml + target: /usr/share/elasticsearch/config/elasticsearch.yml + read_only: true + - type: volume + source: elasticsearch + target: /usr/share/elasticsearch/data + ports: + - "9200:9200" + - "9300:9300" + environment: + ES_JAVA_OPTS: "-Xmx256m -Xms256m" + ELASTIC_PASSWORD: changeme + # Use single node discovery in order to disable production mode and avoid bootstrap checks. + # see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html + discovery.type: single-node + networks: + - elk + + logstash: + build: + context: logstash/ + args: + ELK_VERSION: $ELK_VERSION + volumes: + - type: bind + source: ./logstash/config/logstash.yml + target: /usr/share/logstash/config/logstash.yml + read_only: true + - type: bind + source: ./logstash/pipeline + target: /usr/share/logstash/pipeline + read_only: true + ports: + - "5044:5044" + - "5000:5000/tcp" + - "5000:5000/udp" + - "9600:9600" + environment: + LS_JAVA_OPTS: "-Xmx256m -Xms256m" + networks: + - elk + depends_on: + - elasticsearch + + kibana: + build: + context: kibana/ + args: + ELK_VERSION: $ELK_VERSION + volumes: + - type: bind + source: ./kibana/config/kibana.yml + target: /usr/share/kibana/config/kibana.yml + read_only: true + ports: + - "5601:5601" + networks: + - elk + depends_on: + - elasticsearch + +networks: + elk: + driver: bridge + +volumes: + elasticsearch: diff --git a/docker-stack.yml b/docker-stack.yml new file mode 100644 index 0000000..1a5e55e --- /dev/null +++ b/docker-stack.yml @@ -0,0 +1,72 @@ +version: '3.3' + +services: + + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0 + ports: + - "9200:9200" + - "9300:9300" + configs: + - source: elastic_config + target: /usr/share/elasticsearch/config/elasticsearch.yml + environment: + ES_JAVA_OPTS: "-Xmx256m -Xms256m" + ELASTIC_PASSWORD: changeme + # Use single node discovery in order to disable production mode and avoid bootstrap checks. + # see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html + discovery.type: single-node + # Force publishing on the 'elk' overlay. + network.publish_host: _eth0_ + networks: + - elk + deploy: + mode: replicated + replicas: 1 + + logstash: + image: docker.elastic.co/logstash/logstash:7.11.0 + ports: + - "5044:5044" + - "5000:5000" + - "9600:9600" + configs: + - source: logstash_config + target: /usr/share/logstash/config/logstash.yml + - source: logstash_pipeline + target: /usr/share/logstash/pipeline/logstash.conf + environment: + LS_JAVA_OPTS: "-Xmx256m -Xms256m" + networks: + - elk + deploy: + mode: replicated + replicas: 1 + + kibana: + image: docker.elastic.co/kibana/kibana:7.11.0 + ports: + - "5601:5601" + configs: + - source: kibana_config + target: /usr/share/kibana/config/kibana.yml + networks: + - elk + deploy: + mode: replicated + replicas: 1 + +configs: + + elastic_config: + file: ./elasticsearch/config/elasticsearch.yml + logstash_config: + file: ./logstash/config/logstash.yml + logstash_pipeline: + file: ./logstash/pipeline/logstash.conf + kibana_config: + file: ./kibana/config/kibana.yml + +networks: + elk: + driver: overlay diff --git a/elasticsearch/Dockerfile b/elasticsearch/Dockerfile new file mode 100644 index 0000000..3928544 --- /dev/null +++ b/elasticsearch/Dockerfile @@ -0,0 +1,7 @@ +ARG ELK_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION} + +# Add your elasticsearch plugins setup here +# Example: RUN elasticsearch-plugin install analysis-icu diff --git a/elasticsearch/config/elasticsearch.yml b/elasticsearch/config/elasticsearch.yml new file mode 100644 index 0000000..86822dd --- /dev/null +++ b/elasticsearch/config/elasticsearch.yml @@ -0,0 +1,13 @@ +--- +## Default Elasticsearch configuration from Elasticsearch base image. +## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml +# +cluster.name: "docker-cluster" +network.host: 0.0.0.0 + +## X-Pack settings +## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html +# +xpack.license.self_generated.type: trial +xpack.security.enabled: true +xpack.monitoring.collection.enabled: true diff --git a/extensions/README.md b/extensions/README.md new file mode 100644 index 0000000..50016fb --- /dev/null +++ b/extensions/README.md @@ -0,0 +1,3 @@ +# Extensions + +Third-party extensions that enable extra integrations with the Elastic stack. diff --git a/extensions/apm-server/Dockerfile b/extensions/apm-server/Dockerfile new file mode 100644 index 0000000..2c611df --- /dev/null +++ b/extensions/apm-server/Dockerfile @@ -0,0 +1,3 @@ +ARG ELK_VERSION + +FROM docker.elastic.co/apm/apm-server:${ELK_VERSION} diff --git a/extensions/apm-server/README.md b/extensions/apm-server/README.md new file mode 100644 index 0000000..55df2d5 --- /dev/null +++ b/extensions/apm-server/README.md @@ -0,0 +1,56 @@ +# APM Server extension + +The APM Server receives data from APM agents and transforms them into Elasticsearch documents that can be visualised in +Kibana. + +## Usage + +To include APM Server in the stack, run Docker Compose from the root of the repository with an additional command line +argument referencing the `apm-server-compose.yml` file: + +```console +$ docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml up +``` + +Meanwhile, you can navigate to the **APM** application in Kibana and follow the setup instructions to get started. + +## Connecting an agent to APM Server + +The most basic configuration to send traces to APM server is to specify the `SERVICE_NAME` and `SERVICE_URL`. Here is an +example Python Flask configuration: + +```python +import elasticapm +from elasticapm.contrib.flask import ElasticAPM + +from flask import Flask + +app = Flask(__name__) +app.config['ELASTIC_APM'] = { + # Set required service name. Allowed characters: + # a-z, A-Z, 0-9, -, _, and space + 'SERVICE_NAME': 'PYTHON_FLASK_TEST_APP', + + # Set custom APM Server URL (default: http://localhost:8200) + 'SERVER_URL': 'http://apm-server:8200', + + 'DEBUG': True, +} +``` + +Configuration settings for each supported language are available in the APM documentation: [APM Agents][apm-agents]. + +## Checking connectivity and importing default APM dashboards + +1. On the Kibana home page, click `Add APM` under the _Observability_ panel. +1. Click `Check APM Server status` to confirm the server is up and running. +1. Click `Check agent status` to verify your agent has registered properly. +1. Click `Load Kibana objects` to create an index pattern for APM. +1. Click `Launch APM` to be taken to the APM dashboard. + +## See also + +[Running APM Server on Docker][apm-docker] + +[apm-agents]: https://www.elastic.co/guide/en/apm/get-started/current/components.html#_apm_agents +[apm-docker]: https://www.elastic.co/guide/en/apm/server/current/running-on-docker.html diff --git a/extensions/apm-server/apm-server-compose.yml b/extensions/apm-server/apm-server-compose.yml new file mode 100644 index 0000000..bd21a2d --- /dev/null +++ b/extensions/apm-server/apm-server-compose.yml @@ -0,0 +1,23 @@ +version: '3.2' + +services: + apm-server: + build: + context: extensions/apm-server/ + args: + ELK_VERSION: $ELK_VERSION + command: + # Disable strict permission checking on 'apm-server.yml' configuration file + # https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html + - --strict.perms=false + volumes: + - type: bind + source: ./extensions/apm-server/config/apm-server.yml + target: /usr/share/apm-server/apm-server.yml + read_only: true + ports: + - '8200:8200' + networks: + - elk + depends_on: + - elasticsearch diff --git a/extensions/apm-server/config/apm-server.yml b/extensions/apm-server/config/apm-server.yml new file mode 100644 index 0000000..493a49e --- /dev/null +++ b/extensions/apm-server/config/apm-server.yml @@ -0,0 +1,8 @@ +apm-server: + host: 0.0.0.0:8200 + +output: + elasticsearch: + hosts: ['http://elasticsearch:9200'] + username: elastic + password: changeme diff --git a/extensions/curator/Dockerfile b/extensions/curator/Dockerfile new file mode 100644 index 0000000..84ff5ad --- /dev/null +++ b/extensions/curator/Dockerfile @@ -0,0 +1,17 @@ +FROM bitnami/elasticsearch-curator:5.8.1 + +USER root + +RUN install_packages cron && \ + echo \ + '* * * * *' \ + root \ + LC_ALL=C.UTF-8 LANG=C.UTF-8 \ + /opt/bitnami/python/bin/curator \ + --config=/usr/share/curator/config/curator.yml \ + /usr/share/curator/config/delete_log_files_curator.yml \ + '>/proc/1/fd/1' '2>/proc/1/fd/2' \ + >>/etc/crontab + +ENTRYPOINT ["cron"] +CMD ["-f", "-L8"] diff --git a/extensions/curator/README.md b/extensions/curator/README.md new file mode 100644 index 0000000..5c38786 --- /dev/null +++ b/extensions/curator/README.md @@ -0,0 +1,20 @@ +# Curator + +Elasticsearch Curator helps you curate or manage your indices. + +## Usage + +If you want to include the Curator extension, run Docker Compose from the root of the repository with an additional +command line argument referencing the `curator-compose.yml` file: + +```bash +$ docker-compose -f docker-compose.yml -f extensions/curator/curator-compose.yml up +``` + +This sample setup demonstrates how to run `curator` every minute using `cron`. + +All configuration files are available in the `config/` directory. + +## Documentation + +[Curator Reference](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html) diff --git a/extensions/curator/config/curator.yml b/extensions/curator/config/curator.yml new file mode 100644 index 0000000..8b06971 --- /dev/null +++ b/extensions/curator/config/curator.yml @@ -0,0 +1,12 @@ +# Curator configuration +# https://www.elastic.co/guide/en/elasticsearch/client/curator/current/configfile.html + +client: + hosts: + - elasticsearch + port: 9200 + http_auth: elastic:changeme + +logging: + loglevel: INFO + logformat: default diff --git a/extensions/curator/config/delete_log_files_curator.yml b/extensions/curator/config/delete_log_files_curator.yml new file mode 100644 index 0000000..779c67a --- /dev/null +++ b/extensions/curator/config/delete_log_files_curator.yml @@ -0,0 +1,21 @@ +actions: + 1: + action: delete_indices + description: >- + Delete indices. Find which to delete by first limiting the list to + logstash- prefixed indices. Then further filter those to prevent deletion + of anything less than the number of days specified by unit_count. + Ignore the error if the filter does not result in an actionable list of + indices (ignore_empty_list) and exit cleanly. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: prefix + value: logstash- + - filtertype: age + source: creation_date + direction: older + unit: days + unit_count: 2 diff --git a/extensions/curator/curator-compose.yml b/extensions/curator/curator-compose.yml new file mode 100644 index 0000000..96dae3c --- /dev/null +++ b/extensions/curator/curator-compose.yml @@ -0,0 +1,20 @@ +version: '3.2' + +services: + curator: + build: + context: extensions/curator/ + init: true + volumes: + - type: bind + source: ./extensions/curator/config/curator.yml + target: /usr/share/curator/config/curator.yml + read_only: true + - type: bind + source: ./extensions/curator/config/delete_log_files_curator.yml + target: /usr/share/curator/config/delete_log_files_curator.yml + read_only: true + networks: + - elk + depends_on: + - elasticsearch diff --git a/extensions/enterprise-search/Dockerfile b/extensions/enterprise-search/Dockerfile new file mode 100644 index 0000000..7edd720 --- /dev/null +++ b/extensions/enterprise-search/Dockerfile @@ -0,0 +1,4 @@ +ARG ELK_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/enterprise-search/enterprise-search:${ELK_VERSION} diff --git a/extensions/enterprise-search/README.md b/extensions/enterprise-search/README.md new file mode 100644 index 0000000..d055c88 --- /dev/null +++ b/extensions/enterprise-search/README.md @@ -0,0 +1,134 @@ +# Enterprise Search extension + +Elastic Enterprise Search is a suite of products for search applications backed by the Elastic Stack. + +## Requirements + +* 2 GB of free RAM, on top of the resources required by the other stack components and extensions. + +Enterprise Search exposes the TCP port `3002` for its Web UI and API. + +## Usage + +### Generate an encryption key + +Enterprise Search requires one or more [encryption keys][enterprisesearch-encryption] to be configured before the +initial startup. Failing to do so prevents the server from starting. + +Encryption keys can contain any series of characters. Elastic recommends using 256-bit keys for optimal security. + +Those encryption keys must be added manually to the [`config/enterprise-search.yml`][config-enterprisesearch] file. By +default, the list of encryption keys is empty and must be populated using one of the following formats: + +```yaml +secret_management.encryption_keys: + - my_first_encryption_key + - my_second_encryption_key + - ... +``` + +```yaml +secret_management.encryption_keys: [my_first_encryption_key, my_second_encryption_key, ...] +``` + +> :information_source: To generate a strong encryption key, for example using the AES-256 cipher, you can use the +> OpenSSL utility or any other online/offline tool of your choice: +> +> ```console +> $ openssl enc -aes-256 -P +> +> enter aes-256-cbc encryption password: +> Verifying - enter aes-256-cbc encryption password: +> ... +> +> key= +> ``` + +### Enable Elasticsearch's API key service + +Enterprise Search requires Elasticsearch's built-in [API key service][es-security] to be enabled in order to start. +Unless Elasticsearch is configured to enable TLS on the HTTP interface (disabled by default), this service is disabled +by default. + +To enable it, modify the Elasticsearch configuration file in [`elasticsearch/config/elasticsearch.yml`][config-es] and +add the following setting: + +```yaml +xpack.security.authc.api_key.enabled: true +``` + +### Start the server + +To include Enterprise Search in the stack, run Docker Compose from the root of the repository with an additional command +line argument referencing the `enterprise-search-compose.yml` file: + +```console +$ docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up +``` + +Allow a few minutes for the stack to start, then open your web browser at the address to see the +Enterprise Search home page. + +Enterprise Search is configured on first boot with the following default credentials: + +* user: *enterprise_search* +* password: *changeme* + +## Security + +The Enterprise Search password is defined inside the Compose file via the `ENT_SEARCH_DEFAULT_PASSWORD` environment +variable. We highly recommend choosing a more secure password than the default one for security reasons. + +To do so, change the value `ENT_SEARCH_DEFAULT_PASSWORD` environment variable inside the Compose file **before the first +boot**: + +```yaml +enterprise-search: + + environment: + ENT_SEARCH_DEFAULT_PASSWORD: {{some strong password}} +``` + +> :warning: The default Enterprise Search password can only be set during the initial boot. Once the password is +> persisted in Elasticsearch, it can only be changed via the Elasticsearch API. + +For more information, please refer to [User Management and Security][enterprisesearch-security]. + +## Configuring Enterprise Search + +The Enterprise Search configuration is stored in [`config/enterprise-search.yml`][config-enterprisesearch]. You can +modify this file using the [Default Enterprise Search configuration][enterprisesearch-config] as a reference. + +You can also specify the options you want to override by setting environment variables inside the Compose file: + +```yaml +enterprise-search: + + environment: + ent_search.auth.source: standard + worker.threads: '6' +``` + +Any change to the Enterprise Search configuration requires a restart of the Enterprise Search container: + +```console +$ docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml restart enterprise-search +``` + +Please refer to the following documentation page for more details about how to configure Enterprise Search inside a +Docker container: [Running Enterprise Search Using Docker][enterprisesearch-docker]. + +## See also + +[Enterprise Search documentation][enterprisesearch-docs] + +[config-enterprisesearch]: ./config/enterprise-search.yml + +[enterprisesearch-encryption]: https://www.elastic.co/guide/en/enterprise-search/current/encryption-keys.html +[enterprisesearch-security]: https://www.elastic.co/guide/en/workplace-search/current/workplace-search-security.html +[enterprisesearch-config]: https://www.elastic.co/guide/en/enterprise-search/current/configuration.html +[enterprisesearch-docker]: https://www.elastic.co/guide/en/enterprise-search/current/docker.html +[enterprisesearch-docs]: https://www.elastic.co/guide/en/enterprise-search/current/index.html + +[es-security]: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#api-key-service-settings +[config-es]: ../../elasticsearch/config/elasticsearch.yml diff --git a/extensions/enterprise-search/config/enterprise-search.yml b/extensions/enterprise-search/config/enterprise-search.yml new file mode 100644 index 0000000..891b510 --- /dev/null +++ b/extensions/enterprise-search/config/enterprise-search.yml @@ -0,0 +1,27 @@ +--- +## Enterprise Search core configuration +## https://www.elastic.co/guide/en/enterprise-search/current/configuration.html +# + +## --------------------- REQUIRED --------------------- + +# Encryption keys to protect application secrets. +secret_management.encryption_keys: + # add encryption keys below + #- add encryption keys here + +## ---------------------------------------------------- + +# IP address Enterprise Search listens on +ent_search.listen_host: 0.0.0.0 + +# URL at which users reach Enterprise Search +ent_search.external_url: http://localhost:3002 + +# Elasticsearch URL and credentials +elasticsearch.host: http://elasticsearch:9200 +elasticsearch.username: elastic +elasticsearch.password: changeme + +# Allow Enterprise Search to modify Elasticsearch settings. Used to enable auto-creation of Elasticsearch indexes. +allow_es_settings_modification: true diff --git a/extensions/enterprise-search/enterprise-search-compose.yml b/extensions/enterprise-search/enterprise-search-compose.yml new file mode 100644 index 0000000..cd58caa --- /dev/null +++ b/extensions/enterprise-search/enterprise-search-compose.yml @@ -0,0 +1,22 @@ +version: '3.2' + +services: + enterprise-search: + build: + context: extensions/enterprise-search/ + args: + ELK_VERSION: $ELK_VERSION + volumes: + - type: bind + source: ./extensions/enterprise-search/config/enterprise-search.yml + target: /usr/share/enterprise-search/config/enterprise-search.yml + read_only: true + environment: + JAVA_OPTS: -Xmx2g -Xms2g + ENT_SEARCH_DEFAULT_PASSWORD: changeme + ports: + - '3002:3002' + networks: + - elk + depends_on: + - elasticsearch diff --git a/extensions/logspout/Dockerfile b/extensions/logspout/Dockerfile new file mode 100644 index 0000000..9591df5 --- /dev/null +++ b/extensions/logspout/Dockerfile @@ -0,0 +1,5 @@ +# uses ONBUILD instructions described here: +# https://github.com/gliderlabs/logspout/tree/master/custom + +FROM gliderlabs/logspout:master +ENV SYSLOG_FORMAT rfc3164 diff --git a/extensions/logspout/README.md b/extensions/logspout/README.md new file mode 100644 index 0000000..f6a4d2b --- /dev/null +++ b/extensions/logspout/README.md @@ -0,0 +1,28 @@ +# Logspout extension + +Logspout collects all Docker logs using the Docker logs API, and forwards them to Logstash without any additional +configuration. + +## Usage + +If you want to include the Logspout extension, run Docker Compose from the root of the repository with an additional +command line argument referencing the `logspout-compose.yml` file: + +```bash +$ docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml up +``` + +In your Logstash pipeline configuration, enable the `udp` input and set the input codec to `json`: + +```logstash +input { + udp { + port => 5000 + codec => json + } +} +``` + +## Documentation + + diff --git a/extensions/logspout/build.sh b/extensions/logspout/build.sh new file mode 100755 index 0000000..c3ff938 --- /dev/null +++ b/extensions/logspout/build.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +# source: https://github.com/gliderlabs/logspout/blob/621524e/custom/build.sh + +set -e +apk add --update go build-base git mercurial ca-certificates +cd /src +go build -ldflags "-X main.Version=$1" -o /bin/logspout +apk del go git mercurial build-base +rm -rf /root/go /var/cache/apk/* + +# backwards compatibility +ln -fs /tmp/docker.sock /var/run/docker.sock diff --git a/extensions/logspout/logspout-compose.yml b/extensions/logspout/logspout-compose.yml new file mode 100644 index 0000000..1c349f2 --- /dev/null +++ b/extensions/logspout/logspout-compose.yml @@ -0,0 +1,16 @@ +version: '3.2' + +services: + logspout: + build: + context: extensions/logspout + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + environment: + ROUTE_URIS: logstash://logstash:5000 + LOGSTASH_TAGS: docker-elk + networks: + - elk + depends_on: + - logstash + restart: on-failure diff --git a/extensions/logspout/modules.go b/extensions/logspout/modules.go new file mode 100644 index 0000000..f1a2258 --- /dev/null +++ b/extensions/logspout/modules.go @@ -0,0 +1,10 @@ +package main + +// installs the Logstash adapter for Logspout, and required dependencies +// https://github.com/looplab/logspout-logstash +import ( + _ "github.com/gliderlabs/logspout/healthcheck" + _ "github.com/gliderlabs/logspout/transports/tcp" + _ "github.com/gliderlabs/logspout/transports/udp" + _ "github.com/looplab/logspout-logstash" +) diff --git a/extensions/metricbeat/Dockerfile b/extensions/metricbeat/Dockerfile new file mode 100644 index 0000000..e55f625 --- /dev/null +++ b/extensions/metricbeat/Dockerfile @@ -0,0 +1,3 @@ +ARG ELK_VERSION + +FROM docker.elastic.co/beats/metricbeat:${ELK_VERSION} diff --git a/extensions/metricbeat/README.md b/extensions/metricbeat/README.md new file mode 100644 index 0000000..4da282a --- /dev/null +++ b/extensions/metricbeat/README.md @@ -0,0 +1,36 @@ +# Metricbeat + +Metricbeat is a lightweight shipper that you can install on your servers to periodically collect metrics from the +operating system and from services running on the server. Metricbeat takes the metrics and statistics that it collects +and ships them to the output that you specify, such as Elasticsearch or Logstash. + +## Usage + +To include Metricbeat in the stack, run Docker Compose from the root of the repository with an additional command line +argument referencing the `metricbeat-compose.yml` file: + +```console +$ docker-compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml up +``` + +## Configuring Metricbeat + +The Metricbeat configuration is stored in [`config/metricbeat.yml`](./config/metricbeat.yml). You can modify this file +with the help of the [Configuration reference][metricbeat-config]. + +Any change to the Metricbeat configuration requires a restart of the Metricbeat container: + +```console +$ docker-compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml restart metricbeat +``` + +Please refer to the following documentation page for more details about how to configure Metricbeat inside a +Docker container: [Run Metricbeat on Docker][metricbeat-docker]. + +## See also + +[Metricbeat documentation][metricbeat-doc] + +[metricbeat-config]: https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-reference-yml.html +[metricbeat-docker]: https://www.elastic.co/guide/en/beats/metricbeat/current/running-on-docker.html +[metricbeat-doc]: https://www.elastic.co/guide/en/beats/metricbeat/current/index.html diff --git a/extensions/metricbeat/config/metricbeat.yml b/extensions/metricbeat/config/metricbeat.yml new file mode 100644 index 0000000..eac94d6 --- /dev/null +++ b/extensions/metricbeat/config/metricbeat.yml @@ -0,0 +1,44 @@ +## Metricbeat configuration +## https://github.com/elastic/beats/blob/master/deploy/docker/metricbeat.docker.yml +# + +metricbeat.config: + modules: + path: ${path.config}/modules.d/*.yml + # Reload module configs as they change: + reload.enabled: false + +metricbeat.autodiscover: + providers: + - type: docker + hints.enabled: true + +metricbeat.modules: +- module: docker + metricsets: + - container + - cpu + - diskio + - healthcheck + - info + #- image + - memory + - network + hosts: ['unix:///var/run/docker.sock'] + period: 10s + enabled: true + +processors: + - add_cloud_metadata: ~ + +output.elasticsearch: + hosts: ['http://elasticsearch:9200'] + username: elastic + password: changeme + +## HTTP endpoint for health checking +## https://www.elastic.co/guide/en/beats/metricbeat/current/http-endpoint.html +# + +http.enabled: true +http.host: 0.0.0.0 diff --git a/extensions/metricbeat/metricbeat-compose.yml b/extensions/metricbeat/metricbeat-compose.yml new file mode 100644 index 0000000..c8f9032 --- /dev/null +++ b/extensions/metricbeat/metricbeat-compose.yml @@ -0,0 +1,46 @@ +version: '3.2' + +services: + metricbeat: + build: + context: extensions/metricbeat/ + args: + ELK_VERSION: $ELK_VERSION + # Run as 'root' instead of 'metricbeat' (uid 1000) to allow reading + # 'docker.sock' and the host's filesystem. + user: root + command: + # Log to stderr. + - -e + # Disable config file permissions checks. Allows mounting + # 'config/metricbeat.yml' even if it's not owned by root. + # see: https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html + - --strict.perms=false + # Mount point of the host’s filesystem. Required to monitor the host + # from within a container. + - --system.hostfs=/hostfs + volumes: + - type: bind + source: ./extensions/metricbeat/config/metricbeat.yml + target: /usr/share/metricbeat/metricbeat.yml + read_only: true + - type: bind + source: / + target: /hostfs + read_only: true + - type: bind + source: /sys/fs/cgroup + target: /hostfs/sys/fs/cgroup + read_only: true + - type: bind + source: /proc + target: /hostfs/proc + read_only: true + - type: bind + source: /var/run/docker.sock + target: /var/run/docker.sock + read_only: true + networks: + - elk + depends_on: + - elasticsearch diff --git a/kibana/Dockerfile b/kibana/Dockerfile new file mode 100644 index 0000000..2fb3659 --- /dev/null +++ b/kibana/Dockerfile @@ -0,0 +1,7 @@ +ARG ELK_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/kibana/kibana:${ELK_VERSION} + +# Add your kibana plugins setup here +# Example: RUN kibana-plugin install diff --git a/kibana/config/kibana.yml b/kibana/config/kibana.yml new file mode 100644 index 0000000..0e1dc60 --- /dev/null +++ b/kibana/config/kibana.yml @@ -0,0 +1,13 @@ +--- +## Default Kibana configuration from Kibana base image. +## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.ts +# +server.name: kibana +server.host: 0.0.0.0 +elasticsearch.hosts: [ "http://elasticsearch:9200" ] +monitoring.ui.container.elasticsearch.enabled: true + +## X-Pack security credentials +# +elasticsearch.username: elastic +elasticsearch.password: changeme diff --git a/logstash/Dockerfile b/logstash/Dockerfile new file mode 100644 index 0000000..6a444e7 --- /dev/null +++ b/logstash/Dockerfile @@ -0,0 +1,7 @@ +ARG ELK_VERSION + +# https://www.docker.elastic.co/ +FROM docker.elastic.co/logstash/logstash:${ELK_VERSION} + +# Add your logstash plugins setup here +# Example: RUN logstash-plugin install logstash-filter-json diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml new file mode 100644 index 0000000..a48c35f --- /dev/null +++ b/logstash/config/logstash.yml @@ -0,0 +1,12 @@ +--- +## Default Logstash configuration from Logstash base image. +## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml +# +http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] + +## X-Pack security credentials +# +xpack.monitoring.enabled: true +xpack.monitoring.elasticsearch.username: elastic +xpack.monitoring.elasticsearch.password: changeme diff --git a/logstash/pipeline/logstash.conf b/logstash/pipeline/logstash.conf new file mode 100644 index 0000000..7d5918b --- /dev/null +++ b/logstash/pipeline/logstash.conf @@ -0,0 +1,20 @@ +input { + beats { + port => 5044 + } + + tcp { + port => 5000 + } +} + +## Add your filters / logstash plugins configuration here + +output { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + } +}