forked from confluentinc/cp-demo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
196 lines (196 loc) · 8.21 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
version: "2.1"
services:
zookeeper:
image: confluentinc/cp-zookeeper:3.3.0
restart: always
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: "2181"
ZOOKEEPER_TICK_TIME: "2000"
ZOOKEEPER_SERVERS: "zookeeper:22888:23888"
ports:
- "2181:2181"
kafka1:
image: confluentinc/cp-enterprise-kafka:3.3.0
depends_on:
- zookeeper
ports:
- "9092:9092"
- "29092:29092"
environment:
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092,PLAINTEXT_HOST://localhost:29092
KAFKA_METRIC_REPORTERS: "io.confluent.metrics.reporter.ConfluentMetricsReporter"
KAFKA_BROKER_ID: 1
KAFKA_BROKER_RACK: "r1"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: "kafka1:9092"
CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: "zookeeper:2181"
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 2
CONFLUENT_METRICS_REPORTER_MAX_REQUEST_SIZE: 10485760
# To avoid race condition with control-center
CONFLUENT_METRICS_REPORTER_TOPIC_CREATE: "false"
KAFKA_DELETE_TOPIC_ENABLE: "true"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false"
#KAFKA_LOG4J_ROOT_LOGLEVEL: INFO
KAFKA_JMX_PORT: 9991
kafka2:
image: confluentinc/cp-enterprise-kafka:3.3.0
restart: always
depends_on:
- zookeeper
ports:
- "9093:9093"
- "29093:29093"
environment:
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9093,PLAINTEXT_HOST://localhost:29093
KAFKA_METRIC_REPORTERS: "io.confluent.metrics.reporter.ConfluentMetricsReporter"
KAFKA_BROKER_ID: 2
KAFKA_BROKER_RACK: "r1"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: "kafka2:9093"
CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: "zookeeper:2181"
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 2
CONFLUENT_METRICS_REPORTER_MAX_REQUEST_SIZE: 10485760
CONFLUENT_METRICS_REPORTER_TOPIC_CREATE: "false"
KAFKA_DELETE_TOPIC_ENABLE: "true"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false"
KAFKA_JMX_PORT: 9992
connect:
image: confluentinc/cp-kafka-connect:3.3.0
restart: always
ports:
- "8083:8083"
depends_on:
- zookeeper
- kafka1
- schemaregistry
volumes:
- $PWD/connect-plugins:/connect-plugins
- $PWD/connect-plugins/kafka-connect-transform-nullfilter/null-filter-4.0.0-SNAPSHOT.jar:/usr/share/java/kafka-connect-elasticsearch/null-filter-4.0.0-SNAPSHOT.jar
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka1:9092
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: "connect"
CONNECT_CONFIG_STORAGE_TOPIC: connect-config
CONNECT_OFFSET_STORAGE_TOPIC: connect-offsets
CONNECT_STATUS_STORAGE_TOPIC: connect-status
CONNECT_REPLICATION_FACTOR: 2
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 2
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 2
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 2
CONNECT_KEY_CONVERTER: "org.apache.kafka.connect.storage.StringConverter"
CONNECT_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
CONNECT_REST_ADVERTISED_HOST_NAME: "connect"
CONNECT_ZOOKEEPER_CONNECT: zookeeper:2181
CONNECT_PLUGIN_PATH: /connect-plugins
CONNECT_LOG4J_ROOT_LOGLEVEL: INFO
CONNECT_LOG4J_LOGGERS: org.reflections=ERROR
CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-3.3.0.jar
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.0
#restart: always
depends_on:
- connect
ports:
- "9200:9200"
- "9300:9300"
environment:
xpack.security.enabled: "false"
kibana:
image: docker.elastic.co/kibana/kibana:5.5.2
restart: always
depends_on:
- elasticsearch
ports:
- "5601:5601"
environment:
xpack.security.enabled: "false"
control-center:
image: confluentinc/cp-enterprise-control-center:3.3.1-alpha2
restart: always
depends_on:
- zookeeper
- kafka1
- kafka2
- connect
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: "kafka1:9092,kafka2:9093"
CONTROL_CENTER_ZOOKEEPER_CONNECT: "zookeeper:2181"
CONTROL_CENTER_REPLICATION_FACTOR: 2
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_REPLICATION: 2
CONTROL_CENTER_INTERNAL_TOPICS_REPLICATION: 2
CONTROL_CENTER_COMMAND_TOPIC_REPLICATION: 2
CONTROL_CENTER_METRICS_TOPIC_REPLICATION: 2
CONTROL_CENTER_STREAMS_NUM_STREAM_THREADS: 2
CONTROL_CENTER_CONNECT_CLUSTER: "connect:8083"
CONTROL_CENTER_STREAMS_CONSUMER_REQUEST_TIMEOUT_MS: "960032"
schemaregistry:
image: confluentinc/cp-schema-registry:3.3.0
restart: always
depends_on:
- zookeeper
environment:
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: "zookeeper:2181"
SCHEMA_REGISTRY_HOST_NAME: schemaregistry
SCHEMA_REGISTRY_LISTENERS: "http://0.0.0.0:8081"
ports:
- 8081:8081
kafka-client:
image: confluentinc/cp-enterprise-kafka:3.3.0
depends_on:
- kafka1
hostname: kafka-client
# We defined a dependency on "kafka", but `depends_on` will NOT wait for the
# dependencies to be "ready" before starting the "kafka-client"
# container; it waits only until the dependencies have started. Hence we
# must control startup order more explicitly.
# See https://docs.docker.com/compose/startup-order/
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka1:9092 1 60 && \
sleep 5 && \
kafka-topics --zookeeper zookeeper:2181 --topic wikipedia.parsed --create --replication-factor 2 --partitions 2 && \
kafka-topics --zookeeper zookeeper:2181 --topic wikipedia.failed --create --replication-factor 2 --partitions 2 && \
kafka-topics --zookeeper zookeeper:2181 --topic WIKIPEDIABOT --create --replication-factor 2 --partitions 2 && \
kafka-topics --zookeeper zookeeper:2181 --topic WIKIPEDIANOBOT --create --replication-factor 2 --partitions 2 && \
kafka-topics --zookeeper zookeeper:2181 --topic EN_WIKIPEDIA_GT_1 --create --replication-factor 2 --partitions 2 && \
kafka-topics --zookeeper zookeeper:2181 --topic EN_WIKIPEDIA_GT_1_COUNTS --create --replication-factor 2 --partitions 2 && \
exit'"
environment:
# The following settings are listed here only to satisfy the image's requirements.
# We override the image's `command` anyways, hence this container will not start a broker.
KAFKA_BROKER_ID: ignored
KAFKA_ZOOKEEPER_CONNECT: ignored
ports:
- "7073:7073"
ksql-cli:
image: "confluentinc/ksql-cli:0.1-pre10"
hostname: ksql-cli
depends_on:
- kafka1
ports:
- 9098:9098
volumes:
- $PWD/monitoring-interceptors/monitoring-interceptors-3.3.0.jar:/usr/share/java/ksql-cli/monitoring-interceptors-3.3.0.jar
- $PWD/scripts_ksql/ksqlproperties:/tmp/ksqlproperties
- $PWD/scripts_ksql/ksqlcommands:/tmp/ksqlcommands
command: "perl -e 'while(1){ sleep 99999 }'"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
KSQL_LOG4J_OPTS: "-Dlog4j.configuration=file:/etc/ksql/log4j-rolling.properties"
STREAMS_BOOTSTRAP_SERVERS: kafka1:9092
STREAMS_SCHEMA_REGISTRY_HOST: schema-registry
STREAMS_SCHEMA_REGISTRY_PORT: 8081
extra_hosts:
- "moby:127.0.0.1"