-
Notifications
You must be signed in to change notification settings - Fork 56
/
Copy pathconfig_postgres.yml
138 lines (101 loc) · 6.42 KB
/
config_postgres.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#### Some of the properties are part of Debezium PostgreSQL Connector
#### https://debezium.io/documentation/reference/stable/connectors/postgresql.html
# name: Unique name for the connector. Attempting to register again with the same name will fail.
name: "debezium-embedded-postgres"
auto.create.tables.replicated: "true"
# database.hostname: IP address or hostname of the PostgreSQL database server.
database.hostname: "postgres"
# database.port: Integer port number of the PostgreSQL database server listening for client connections.
database.port: "5432"
# database.user: Name of the PostgreSQL database user to be used when connecting to the database.
database.user: "root"
# database.password: Password of the PostgreSQL database user to be used when connecting to the database.
database.password: "root"
# database.server.name: The name of the PostgreSQL database from which events are to be captured when not using snapshot mode.
database.server.name: "ER54"
# schema.include.list: An optional list of regular expressions that match schema names to be monitored;
schema.include.list: public,public2
slot.name: connector2
# plugin.name: The name of the PostgreSQL logical decoding plug-in installed on the PostgreSQL server. Supported values are decoderbufs, and pgoutput.
plugin.name: "pgoutput"
# table.include.list: An optional list of regular expressions that match fully-qualified table identifiers for tables to be monitored;
#table.include.list: "public.tm,public.tm2"
# clickhouse.server.url: Specify only the hostname of the Clickhouse Server.
clickhouse.server.url: "clickhouse"
# clickhouse.server.user: Clickhouse Server User
clickhouse.server.user: "root"
# clickhouse.server.password: Clickhouse Server Password
clickhouse.server.password: "root"
# clickhouse.server.port: Clickhouse Server Port
clickhouse.server.port: "8123"
# database.allowPublicKeyRetrieval: "true" https://rmoff.net/2019/10/23/debezium-mysql-v8-public-key-retrieval-is-not-allowed/
database.allowPublicKeyRetrieval: "true"
# snapshot.mode: Debezium can use different modes when it runs a snapshot. The snapshot mode is determined by the snapshot.mode configuration property.
snapshot.mode: "initial"
# offset.flush.interval.ms: The number of milliseconds to wait before flushing recent offsets to Kafka. This ensures that offsets are committed within the specified time interval.
offset.flush.interval.ms: 5000
# connector.class: The Java class for the connector. This must be set to io.debezium.connector.postgresql.PostgresConnector.
connector.class: "io.debezium.connector.postgresql.PostgresConnector"
# offset.storage: The Java class that implements the offset storage strategy. This must be set to io.debezium.storage.jdbc.offset.JdbcOffsetBackingStore.
offset.storage: "io.debezium.storage.jdbc.offset.JdbcOffsetBackingStore"
# offset.storage.jdbc.offset.table.name: The name of the database table where connector offsets are to be stored.
offset.storage.jdbc.offset.table.name: "altinity_sink_connector.replica_source_info"
# offset.storage.jdbc.url: The JDBC URL for the database where connector offsets are to be stored.
offset.storage.jdbc.url: "jdbc:clickhouse://clickhouse:8123/altinity_sink_connector"
# offset.storage.jdbc.user: The name of the database user to be used when connecting to the database where connector offsets are to be stored.
offset.storage.jdbc.user: "root"
# offset.storage.jdbc.password: The password of the database user to be used when connecting to the database where connector offsets are to be stored.
offset.storage.jdbc.password: "root"
# offset.storage.jdbc.offset.table.ddl: The DDL statement used to create the database table where connector offsets are to be stored.
offset.storage.jdbc.offset.table.ddl: "CREATE TABLE if not exists %s
(
`id` String,
`offset_key` String,
`offset_val` String,
`record_insert_ts` DateTime,
`record_insert_seq` UInt64,
`_version` UInt64 MATERIALIZED toUnixTimestamp64Nano(now64(9))
)
ENGINE = ReplacingMergeTree(_version)
ORDER BY id
SETTINGS index_granularity = 8192"
offset.storage.jdbc.offset.table.delete: "delete from %s where 1=1"
# enable.snapshot.ddl: If set to true, the connector wil parse the DDL statements as part of initial load.
enable.snapshot.ddl: "true"
# auto.create.tables: If set to true, the connector will create the database tables for the destination tables if they do not already exist.
auto.create.tables: "true"
# database.dbname: The name of the PostgreSQL database from which events are to be captured when not using snapshot mode.
database.dbname: "public"
clickhouse.jdbc.params: "keepalive.timeout=3,max_buffer_size=1000000,socket_timeout=30000,connection_timeout=30000"
# clickhouse.datetime.timezone: This timezone will override the default timezone of ClickHouse server. Timezone columns will be set to this timezone.
#clickhouse.datetime.timezone: "UTC"
# skip_replica_start: If set to true, the connector will skip replication on startup. sink-connector-client start_replica will start replication.
#skip_replica_start: "false"
# binary.handling.mode: The mode for handling binary values. Possible values are bytes, base64, and decode. The default is bytes.
#binary.handling.mode: "base64"
# ignore_delete: If set to true, the connector will ignore delete events. The default is false.
#ignore_delete: "true"
#Metrics (Prometheus target), required for Grafana Dashboard
metrics.enable: "true"
#disable.ddl: If set to true, the connector will ignore DDL events. The default is false.
#disable.ddl: "false"
#disable.drop.truncate: If set to true, the connector will ignore drop and truncate events. The default is false.
#disable.drop.truncate: "false"
replica.status.view: "CREATE VIEW %s.show_replica_status
(
`seconds_behind_source` Int32,
`utc_time` DateTime('UTC'),
`local_time` DateTime
) AS
SELECT
now() - fromUnixTimestamp(toInt32(JSONExtractUInt(offset_val, 'ts_usec') / 1000000)) AS seconds_behind_source,
toDateTime(fromUnixTimestamp(toInt32(JSONExtractUInt(offset_val, 'ts_usec') / 1000000)), 'UTC') AS utc_time,
fromUnixTimestamp(toInt32(JSONExtractUInt(offset_val, 'ts_usec') / 1000000)) AS local_time
FROM altinity_sink_connector.replica_source_info
SETTINGS final = 1"
# Required for fixing bug with freeze in debezium loop.
use.nongraceful.disconnect: "true"
database.keep.alive.interval.`ms: "30000" #Send keepalive every 30 seconds
database.connection.reconnect.backoff.ms: "1000"
database.connection.reconnect.backoff.max.ms: "10000"
database.ssl.mode: "disabled"