Skip to content

Commit b63ae91

Browse files
committed
travis + conf file
1 parent dd1d39c commit b63ae91

File tree

2 files changed

+12
-12
lines changed

2 files changed

+12
-12
lines changed

.travis.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@ go:
66
before_install:
77
- go get github.com/mattn/goveralls
88
script:
9-
- go test -v ./... -coverprofile=psql-streamer.coverprofile
9+
- go test ./... -coverprofile=psql-streamer.coverprofile
1010
- $GOPATH/bin/goveralls -service=travis-ci -coverprofile psql-streamer.coverprofile

psql-streamer.toml

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,12 @@ dsn = "dbname=test sslmode=disable"
1919
publication = "pub1"
2020

2121
# Replication slot represents a unique replication client in the database.
22-
# PostgreSQL stores the current client WAL positions in the slot (they're updated by the status messages)
22+
# PostgreSQL stores the current client WAL positions in the slot (they're updated by the status messages from the client)
2323
# Be aware that PostgreSQL has a configurable limit on the number of replication slots.
2424
# Only one client can occupy the replication slot at any given time.
2525
replicationSlot = "db1"
2626

27-
# How frequently to retry sending to sinks in case of error
27+
# How frequently to retry sending to sinks in case of an error
2828
sendRetryInterval = "1s"
2929

3030
# How frequently to try to restart replication if it dies for some reason
@@ -33,7 +33,7 @@ startRetryInterval = "2s"
3333
# How many messages to get from PostgreSQL before pushing them into sinks
3434
batchSize = 400
3535

36-
# How long to wait for batchSize to fill before flushing the buffer
36+
# How long to wait for the batchSize to fill before flushing the buffer
3737
batchFlushInterval = "1s"
3838

3939
# PostgreSQL connection timeout
@@ -43,7 +43,7 @@ timeout = "2s"
4343
# The source will ask PostgreSQL to start streaming from this position.
4444
walPositionOverride = 0
4545

46-
# This affects for how long PostgreSQL retains it's WAL logs.
46+
# This affects for how long PostgreSQL will retain it's WAL logs.
4747
# Service will confirm current walPos minus walRetain bytes as flushed-to-disk to PostgreSQL.
4848
# IMPORTANT: the walRetain should be large enough to accomodate batchSize number of events,
4949
# or in case of crash you might lose some events (PostgreSQL will not allow to go backwards in log deep enough)
@@ -68,10 +68,10 @@ topics = [ "topic1", "topic2" ]
6868
# the events will be distributed evenly between consumers
6969
groupID = "bar"
7070

71-
# How frequently to retry sending events to sinks if they fail
71+
# How frequently to retry sending events to sinks in case of failure
7272
sendRetryInterval = "100ms"
7373

74-
# How many messages to get from Kafka before pushing them into sinks
74+
# How many messages to get from Kafka before pushing them into sinks.
7575
# This also sets Kafka library internal queue size
7676
batchSize = 400
7777

@@ -91,15 +91,15 @@ sources = [ "kafka1" ]
9191
# List of kafka brokers in host:port form
9292
hosts = [ "kafka1:9092", "kafka2:9092", "kafka3:9092" ]
9393

94-
# A list of handlers to enable
94+
# A list of handlers to enable (currently only "passthrough" is available)
9595
handlers = [ "passthrough" ]
9696

9797
# Mapping between table name of event and Kafka topic
9898
tableTopicMapping = { table1 = "topic1", table2 = "topic2" }
9999

100100
# This specifies the topic where the messages for which the mapping wasn't found will go.
101101
# If it's empty or undefined then they will be discarded.
102-
# At least one of tableTopicMapping or fallbackTopic should be specified.
102+
# At least one of 'tableTopicMapping' or 'fallbackTopic' should be specified.
103103
topicFallback = "garbageBin"
104104

105105
# Timeout for Kafka message sending
@@ -118,15 +118,15 @@ requiredAcks = -1
118118

119119
# Enable asynchronous operation. The Kafka library will use batches to dispatch events more efficiently.
120120
# Be careful - in this mode you can lose events because Kafka library will not report back the errors
121-
# and we'll report success to the source as if the message was sent successfully.
121+
# and we'll always report success to the source as if the messages were sent successfully.
122122
async = false
123123

124124
# Number of messages to buffer before sending to Kafka
125125
batchSize = 100
126126

127127
# How frequently to flush the queue if batchSize is not yet exceeded.
128-
# Be advised that this parameter is also used with `async = false` and messages will be sent no faster than 1sec/batchTimeout.
129-
# Decreasing this parameter leads to more frequent polling of queue which, consequently, increases CPU usage even if idle.
128+
# Be advised that this parameter is also used with 'async = false' and messages will be sent no faster than 1sec/batchTimeout.
129+
# Decreasing this parameter leads to more frequent polling of queue which increases CPU usage even when idle.
130130
# With 5ms each sink uses around 10% of one core on a MacBook's 3.3Ghz Core i5 7287U.
131131
batchTimeout = "10ms"
132132

0 commit comments

Comments
 (0)