forked from zendesk/maxwell
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.properties.example
285 lines (215 loc) · 8.66 KB
/
config.properties.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
# tl;dr config
log_level=info
producer=kafka
kafka.bootstrap.servers=localhost:9092
# mysql login info
host=localhost
user=maxwell
password=maxwell
######### general stuff #############
# choose where to produce data to. currently this is one of: stdout|file|kafka|kinesis|pubsub|rabbitmq|redis
#producer=kafka
# set delivery timeout (including acknowledgement) in milliseconds for a COMMIT row message that has been sent by the producer,
# where producer is either kafka or kinesis
# maxwell may be terminated if the timeout has expired, when majority of the COMMIT row messages have been acknowledged
# set it to 0 to turn off this check
#producer_ack_timeout=120000 # default 0
# set the log level. note that you can configure things further in log4j2.xml
#log_level=DEBUG # [DEBUG, INFO, WARN, ERROR]
######### mysql stuff ###############
# mysql host to connect to
#host=hostname
# mysql port to connect to
#port=3306
# mysql user to connect as. This user must have REPLICATION SLAVE permissions,
# as well as full access to the `maxwell` (or schema_database) database
#user=maxwell
# mysql password
#password=maxwell
# options to pass into the jdbc connection, given as opt=val&opt2=val2
#jdbc_options=opt1=100&opt2=hello
# name of the mysql database where maxwell keeps its own state
#schema_database=maxwell
# maxwell can optionally replicate from a different server than where it stores
# schema and binlog position info. Specify that different server here:
#
#
#replication_host=other
#replication_user=username
#replication_password=password
#replication_port=3306
######### output format stuff ###############
# records include binlog position (default false)
#output_binlog_position=true
# records output null values (default true)
#output_nulls=true
# records include server_id (default false)
#output_server_id=true
# records include thread_id (default false)
#output_thread_id=true
# records include commit and xid (default true)
#output_commit_info=true
# produce DDL records to ddl_kafka_topic (default: false)
#output_ddl=true
######### kafka stuff ###############
# list of kafka brokers
#kafka.bootstrap.servers=hosta:9092,hostb:9092
# kafka topic to write to
# this can be static, e.g. 'maxwell', or dynamic, e.g. namespace_%{database}_%{table}
# in the latter case 'database' and 'table' will be replaced with the values for the row being processed
#kafka_topic=maxwell
# kafka topic to write DDL to
#ddl_kafka_topic=maxwell_ddl
# hash function to use. "default" is just the JVM's 'hashCode'
# function.
#kafka_partition_hash=default # [default, murmur3]
# how maxwell writes its kafka key.
#
# 'hash' looks like:
# {"database":"test","table":"tickets","pk.id":10001}
#
# 'array' looks like:
# ["test","tickets",[{"id":10001}]]
#
# currently the default is "hash"
#kafka_key_format=hash # [hash, array]
# other kafka options. Anything prefixed "kafka." will get
# passed directly into the kafka-producer's config.
#kafka.batch.size=16384
# a few defaults.
# These are 0.9-specific. They may or may not work with other versions.
kafka.compression.type=snappy
kafka.metadata.fetch.timeout.ms=5000
kafka.retries=0
kafka.acks=1
####### producer partitioning #######
# used by kafka and kinesis
# controls the input input into the hash function. Note that this defines
# which transactions keep ordering with respect to each other. Generally
# here you're making a trade-off between inter-row consistency and balanced
# partitions.
#producer_partition_by=database # [database, table, primary_key, column]
# required when using producer_partition_by=column
# otherwise the partitioner will revert to table
# can be a single or multiple columns, e.g. aggregate_id or aggregate_id,event_type
#producer_partition_columns=
# required when using producer_partition_by=column
# the fallback partitioning behavior when the specified column(s) do not exist
# can be any one of [database, table, primary_key]
#producer_partition_by_fallback=database
######### kinesis ####################
kinesis_stream=maxwell
# AWS places a 256 unicode character limit on the max key length of a record
# http://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html
#
# Setting this option to true enables hashing the key with the md5 algorithm
# before we send it to kinesis so all the keys work within the key size limit.
# Values: true, false
# Default: false
#kinesis_md5_keys=true
######### pub/sub #####################
#pubsub_project_id=maxwell
#pubsub_topic=maxwell
#ddl_pubsub_topic=maxwell_ddl
######### rabbitmq ###################
#rabbitmq_host=rabbitmq_hostname
#rabbitmq_user=guest
#rabbitmq_pass=guest
#rabbitmq_virtual_host=/
#rabbitmq_exchange=maxwell
#rabbitmq_exchange_type=fanout
#rabbitmq_exchange_durable=false
#rabbitmq_exchange_autodelete=false
#rabbitmq_routing_key_template=%db%.%table%
#rabbitmq_message_persistent=false
######### redis ######################
#redis_host=redis_host
#redis_port=6379
#redis_auth=redis_auth
#redis_database=0
#redis_pub_channel=maxwell
######### filter stuff ###############
# filter rows out of Maxwell's output.
# all filters may be given either as literal names, or as java-style regular expressions.
# This is a literal name: "exclude_tables=tblname".
# This is a regexp: "exclude_tables=/tblname_\\d+/"
# include *only* these databases
#include_dbs=db1,/db\\d+/
# exclude these databases (will override an include)
#exclude_dbs=db3,/db\\d+/
# include *only* these tables
#include_tables=tbl1,/tbl\\d+/
# exclude these tables
#exclude_tables=tbl1,/tbl\\d+/
# exclude these columns
#exclude_columns=col1,/col\\d+/
# "blacklist" these dbs -- this means maxwell will ignore schema
# changes happening to these databases. Can be useful if you have a
# high-churn schema that you want to completely ignore, but it's
# a bit dangerous: once you set this option, you must leave it set, or
# else maxwell will likely blow up.
#
# All of this is to say: don't set this unless you know what you're doing.
#blacklist_dbs=db1,/db\\d+/
#blacklist_tables=table1,table_no
#
######### encryption ###############
# convert the data field to a json string and encrypt it
#encrypt_data=true
# convert the whole payload to a json string and encrypt it
#encrypt_all=true
# specify the encryption key and secret key
#encryption_key=aaaaaaaaaaaaaaaa
#secret_key=RandomInitVector
######## monitoring stuff ###########
# Maxwell collects metrics via dropwizard. These can be exposed through the
# base logging mechanism (slf4j), JMX, HTTP or pushed to Datadog.
# Options: [jmx, slf4j, http, datadog]
# Supplying multiple is allowed.
#metrics_type=jmx,slf4j
# The prefix maxwell will apply to all metrics
#metrics_prefix=MaxwellMetrics # default MaxwellMetrics
# When metrics_type includes slf4j this is the frequency metrics are emitted to the log, in seconds
#metrics_slf4j_interval=60
# When metrics_type includes http or diagnostic is enabled, this is the port the server will bind to.
#http_port=8080
# When metrics_type includes http or diagnostic is enabled, this is the http path prefix, default /.
#http_path_prefix=/some/path/
# ** The following are Datadog specific. **
# When metrics_type includes datadog this is the way metrics will be reported.
# Options: [udp, http]
# Supplying multiple is not allowed.
#metrics_datadog_type=udp
# datadog tags that should be supplied
#metrics_datadog_tags=tag1:value1,tag2:value2
# The frequency metrics are pushed to datadog, in seconds
#metrics_datadog_interval=60
# required if metrics_datadog_type = http
#metrics_datadog_apikey=API_KEY
# required if metrics_datadog_type = udp
#metrics_datadog_host=localhost # default localhost
#metrics_datadog_port=8125 # default 8125
# Maxwell exposes http diagnostic endpoint to check below in parallel:
# 1. binlog replication lag
# 2. producer (currently kafka) lag
# To enable Maxwell diagnostic
#http_diagnostic=true # default false
# Diagnostic check timeout in milliseconds, required if diagnostic = true
#http_diagnostic_timeout=10000 # default 10000
######### misc stuff ###############
# maxwell's bootstrapping functionality has a couple of modes.
#
# In "async" mode, maxwell will output the replication stream while it
# simultaneously outputs the database to the topic. Note that it won't
# output replication data for any tables it is currently bootstrapping -- this
# data will be buffered and output after the bootstrap is complete.
#
# In "sync" mode, maxwell stops the replication stream while it
# outputs bootstrap data.
#
# async mode keeps ops live while bootstrapping, but carries the possibility of
# data loss (due to buffering transactions). sync mode is safer but you
# have to stop replication.
#bootstrapper=async [sync, async, none]
# output filename when using the "file" producer
#output_file=/path/to/file