diff --git a/.github/workflows/testflow_tests.yml b/.github/workflows/testflow_tests.yml new file mode 100644 index 000000000..64e66ff90 --- /dev/null +++ b/.github/workflows/testflow_tests.yml @@ -0,0 +1,73 @@ +name: TestFlows Tests + +on: + push: + branches: [ main, develop] + pull_request: + branches: [ main , develop] + +env: + SINK_CONNECTOR_VERSION: "2023-02-07" + +jobs: + testflows: + runs-on: [self-hosted, linux, x64] + + steps: + - uses: actions/checkout@v2 + + - name: Install docker-compose + working-directory: tests/integration + run: pip3 install docker-compose==1.29.2 + + - name: Install testflows + working-directory: tests/integration + run: pip3 install testflows + + - name: Install awscli + working-directory: tests/integration + run: pip3 install awscli + + - name: Get current date + id: date + run: echo "date=$(date +'%Y-%m-%d_%H%M%S')" >> $GITHUB_OUTPUT + + - name: Add ~./local/bin to the PATH + if: always() + working-directory: tests/integration + run: echo ~/.local/bin >> $GITHUB_PATH + + - name: Run testflows tests + working-directory: tests/integration + run: python3 -u regression.py --only "/mysql to clickhouse replication/*" --clickhouse-binary-path=docker://clickhouse/clickhouse-server:22.8 --test-to-end -o classic --collect-service-logs --attr project="${GITHUB_REPOSITORY}" project.id="$GITHUB_RUN_NUMBER" user.name="$GITHUB_ACTOR" github_actions_run="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" sink_version="altinity/clickhouse-sink-connector:${SINK_CONNECTOR_VERSION}" s3_url="https://altinity-test-reports.s3.amazonaws.com/index.html#altinity-sink-connector/testflows/${{ steps.date.outputs.date }}_${{github.run.number}}/" --log logs/raw.log + + - name: Create tfs results report + if: always() + working-directory: tests/integration/logs + run: cat raw.log | tfs report results | tfs document convert > report.html + + - name: Create tfs coverage report + if: always() + working-directory: tests/integration/logs + run: cat raw.log | tfs report coverage ../requirements/requirements.py | tfs document convert > coverage.html + + - name: Upload artifacts to Altinity Test Reports S3 bucket + if: always() + working-directory: tests/integration/logs + run: aws s3 cp . s3://altinity-test-reports/altinity-sink-connector/testflows/${{ steps.date.outputs.date }}/ --recursive --exclude "*" --include "*.log" --include "*.html" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: 'eu-west-2' + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: testflows-artifacts-${{ steps.date.outputs.date }} + path: | + tests/integration/logs/*.log + if-no-files-found: error + retention-days: 60 + + + diff --git a/tests/Testflows/mysql_to_clickhouse_replication/regression.py b/tests/Testflows/mysql_to_clickhouse_replication/regression.py deleted file mode 100755 index 129d7a0d8..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/regression.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python3 -import os -import sys - -from testflows.core import * - -append_path(sys.path, "..") - -from helpers.argparser import argparser -from helpers.common import check_clickhouse_version -from helpers.common import create_cluster -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from mysql_to_clickhouse_replication.tests.steps_global import * - -xfails = { - # "data types/date time/*": [ - # (Fail, "https://github.com/Altinity/clickhouse-sink-connector/issues/8") - # ], - # "data types ac/date time/*": [ - # (Fail, "https://github.com/Altinity/clickhouse-sink-connector/issues/8") - # ], - "primary keys/*": [ - (Fail, "GitLab CI/CD only fail") - ], -# "data consistency/*": [ -# (Fail, "doesn't finished") -# ] -} -xflags = {} - - -@TestModule -@ArgumentParser(argparser) -@XFails(xfails) -@XFlags(xflags) -@Name("mysql to clickhouse replication") -@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication("1.0")) -@Specifications() -def regression( - self, - local, - clickhouse_binary_path, - clickhouse_version, - stress=None, - thread_fuzzer=None, - collect_service_logs=None -): - """ClickHouse regression for MySql to ClickHouse replication.""" - nodes = { - "debezium": ("debezium",), - "mysql-master": ("mysql-master",), - "clickhouse": ("clickhouse",), - "bash-tools": ("bash-tools",), - "schemaregistry": ("schemaregistry",), - "sink": ("sink",), - } - - self.context.clickhouse_version = clickhouse_version - - if check_clickhouse_version("<21.4")(self): - skip(reason="only supported on ClickHouse version >= 21.4") - - if stress is not None: - self.context.stress = stress - - if collect_service_logs is not None: - self.context.collect_service_logs = collect_service_logs - - env = "mysql_to_clickhouse_replication_env" - - with Given("docker-compose cluster"): - cluster = create_cluster( - local=local, - clickhouse_binary_path=clickhouse_binary_path, - thread_fuzzer=thread_fuzzer, - collect_service_logs=collect_service_logs, - stress=stress, - nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), env), - caller_dir=os.path.join(current_dir()), - ) - - self.context.cluster = cluster - - with And("I create test database in ClickHouse"): - create_database(name="test") - - Feature(run=load("mysql_to_clickhouse_replication.tests.sanity", "feature")) - Feature(run=load("mysql_to_clickhouse_replication.tests.data_types", "feature")) - Feature(run=load("mysql_to_clickhouse_replication.tests.data_types_ac", "feature")) - Feature(run=load("mysql_to_clickhouse_replication.tests.deduplication", "feature")) - Feature(run=load("mysql_to_clickhouse_replication.tests.primary_keys", "feature")) - Feature(run=load("mysql_to_clickhouse_replication.tests.autocreate", "feature")) - Feature( - run=load( - "mysql_to_clickhouse_replication.tests.table_schema_changes", "feature" - ) - ) - Feature(run=load("mysql_to_clickhouse_replication.tests.insert", "feature")) - Feature(run=load("mysql_to_clickhouse_replication.tests.data_consistency", "feature")) - Feature(run=load("mysql_to_clickhouse_replication.tests.multiple_tables", "feature")) - Feature(run=load("mysql_to_clickhouse_replication.tests.virtual_columns", "feature")) - Feature(run=load("mysql_to_clickhouse_replication.tests.delete", "feature")) - Feature(run=load("mysql_to_clickhouse_replication.tests.update", "feature")) - # Feature(run=load("mysql_to_clickhouse_replication.tests.sysbench", "feature")) - # Feature(run=load("mysql_to_clickhouse_replication.tests.manual_section", "feature")) - - -if __name__ == "__main__": - regression() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/requirements/__init__.py b/tests/Testflows/mysql_to_clickhouse_replication/requirements/__init__.py deleted file mode 100644 index 02f7d4301..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/requirements/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .requirements import * diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/autocreate.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/autocreate.py deleted file mode 100644 index dd464bee7..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/autocreate.py +++ /dev/null @@ -1,87 +0,0 @@ -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from helpers.common import * - - -@TestScenario -def create_all_data_types_not_null_table(self): - """Check auto-creation of replicated MySQL table - which contains all supported "NOT NULL" data types. - """ - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given( - f"I create MySQL table {table_name} with all supported NOT NULL data types" - ): - create_all_data_types_table(table_name=table_name, manual_ch_table_create=False) - - with When(f"I check MySql table {table_name} was created"): - mysql.query(f"SHOW CREATE TABLE {table_name};", message=f"{table_name}") - - with Then(f"I make insert to create ClickHouse table"): - mysql.query( - f"INSERT INTO {table_name} VALUES (1,2/3,1.23,999.00009,'2012-12-12','2018-09-08 17:51:04.777','17:51:04.777','17:51:04.777',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,'x','some_text','IVAN','some_blob','x_Mediumblob','some_Longblobblobblob','a','IVAN')" - ) - - with Then(f"I check that corresponding ClickHouse table was created"): - retry(clickhouse.query, timeout=30, delay=5)( - f"SHOW CREATE TABLE test.{table_name};", message=f"{table_name}" - ) - - -@TestScenario -def create_all_data_types_null_table(self): - """Check auto-creation of replicated MySQL table that - contains all supported "NULL" data types. - """ - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given( - f"I create MySQL table {table_name} with all data types and ClickHouse replica to it)" - ): - create_all_data_types_table_nullable(table_name=table_name, manual_ch_table_create=False) - - with When(f"I check MySql table {table_name} was created"): - mysql.query(f"SHOW CREATE TABLE {table_name};", message=f"{table_name}") - - with Then(f"I make insert to create ClickHouse table"): - mysql.query( - f"INSERT INTO {table_name} VALUES (1,2/3,1.23,999.00009,'2012-12-12','2018-09-08 17:51:04.777','17:51:04.777','17:51:04.777',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,'x','some_text','IVAN','some_blob','x_Mediumblob','some_Longblobblobblob','a','IVAN')" - ) - - with Then(f"I make check that clickhouse table was created too"): - retry(clickhouse.query, timeout=30, delay=5)( - f"SHOW CREATE TABLE test.{table_name};", message=f"{table_name}" - ) - - -@TestFeature -@Requirements() -@Name("autocreate") -def feature(self): - """Verify correct replication of all supported MySQL data types.""" - - with Given("I enable debezium and sink connectors after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/data_consistency.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/data_consistency.py deleted file mode 100644 index c08846e3f..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/data_consistency.py +++ /dev/null @@ -1,179 +0,0 @@ -import time - -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from testflows.connect import Shell -from helpers.common import * -from itertools import combinations - - -@TestOutline -def unstable_network_connection(self, service): - """Check for data consistency with unstable network connection to some service.""" - - with Given("Receive UID"): - time.sleep(25) - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name}"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE {table_name} (col1 int4, col2 int4 NOT NULL, col3 int4 default 777)" - f" ENGINE = InnoDB;", - ) - - with When("I insert data in MySql table with concurrent network fault"): - with Shell() as bash: - bash("docker network disconnect ", timeout=100) - mysql.query( - f"INSERT INTO {table_name} (col1,col2,col3) VALUES (5,6,777);" - ) - - with And("Enable network"): - self.context.cluster.node(f"{service}").start() - - with Then("I wait unique values from CLickHouse table equal to MySQL table"): - select(insert="5,6,777", table_name=table_name, statement="col1,col2,col3", - with_final=True, timeout=100) - - -@TestOutline -def restart(self, services, query=None): - """Check for data consistency with concurrently service restart.""" - - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name}"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE {table_name} " - "(id int(11) NOT NULL," - "k int(11) NOT NULL DEFAULT 0,c char(120) NOT NULL DEFAULT ''," - f"pad char(60) NOT NULL DEFAULT '', PRIMARY KEY (id)) ENGINE = InnoDB;" - ) - - with When("I insert data in MySql table with concurrently service restart"): - for node in services: - self.context.cluster.node(f"{node}").stop() - - with Step(f"I insert data in MySql table"): - mysql.query( - f"INSERT INTO {table_name} values (1,2,'a','b'), (2,3,'a','b');" - ) - - if query == "update": - with Then(f"I update data in MySql table"): - mysql.query( - f"UPDATE {table_name} SET k=k+5 WHERE id=1;" - ) - elif query == "delete": - with Then(f"I delete data in MySql table"): - mysql.query( - f"DELETE FROM {table_name} WHERE id=1;" - ) - - with And(f"Enable all services {services}"): - for node in services: - self.context.cluster.node(f"{node}").start() - - if query == "update": - with And("I check that ClickHouse has updated data as MySQL"): - for attempt in retries(count=10, timeout=100, delay=5): - with attempt: - clickhouse.query(f"OPTIMIZE TABLE test.{table_name} FINAL DEDUPLICATE") - - clickhouse.query( - f"SELECT * FROM test.{table_name} FINAL where _sign !=-1 FORMAT CSV", - message='1,7,"a","b"' - ) - elif query == "delete": - with And("I check that ClickHouse table has same number of rows as MySQL table"): - mysql_rows_after_delete = mysql.query(f"select count(*) from {table_name}").output.strip()[90:] - for attempt in retries(count=10, timeout=100, delay=5): - with attempt: - clickhouse.query(f"OPTIMIZE TABLE test.{table_name} FINAL DEDUPLICATE") - - clickhouse.query( - f"SELECT count(*) FROM test.{table_name} FINAL where _sign !=-1 FORMAT CSV", - message=mysql_rows_after_delete - ) - else: - for attempt in retries(count=10, timeout=100, delay=5): - with attempt: - clickhouse.query(f"OPTIMIZE TABLE test.{table_name} FINAL DEDUPLICATE") - - clickhouse.query( - f"SELECT id,k,c,pad FROM test.{table_name} FINAL where _sign !=-1 FORMAT CSV", - message='1,2,"a","b"\n2,3,"a","b"' - ) - - -@TestScenario -def kafka_restart(self): - """Kafka restart""" - restart(services=["kafka"]) - - -@TestScenario -def debezium_restart(self): - """Debezium restart""" - restart(services=["debezium"], query="update") - - -@TestScenario -def clickhouse_restart(self): - """ClickHouse restart""" - restart(services=["clickhouse"]) - - -@TestScenario -def schemaregistry_restart(self): - """Schemaregistry restart""" - xfail("doesn't create table") - restart(services=["schemaregistry"]) - - -@TestScenario -def sink_restart(self): - """Sink connector restart""" - restart(services=["sink"]) - - -@TestScenario -def combinatoric_restart(self): - """Check all possibilities of unavailable services""" - xfail("some timing problems") - nodes_list = ["sink", "debezium", "schemaregistry", "kafka", "clickhouse"] - for i in range(2, 6): - pairs = list(combinations(nodes_list, i)) - for pair in pairs: - restart(services=pair) - - -@TestFeature -@Name("data consistency") -def feature(self): - """Сheck data consistency when network or service faults are introduced.""" - with Given("I enable debezium and sink connectors after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() \ No newline at end of file diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/data_types_ac.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/data_types_ac.py deleted file mode 100644 index 053237cad..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/data_types_ac.py +++ /dev/null @@ -1,395 +0,0 @@ -import time - -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from helpers.common import * - - -@TestOutline -def check_datatype_replication( - self, - mysql_type, - ch_type, - values, - ch_values, - nullable=False, - hex_type=False, - auto_create_tables=True, -): - """Check replication of a given MySQL data type.""" - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=auto_create_tables, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name})"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS {table_name} " - f"(id INT AUTO_INCREMENT," - f"MyData {mysql_type}{' NOT NULL' if not nullable else ''}," - f" PRIMARY KEY (id))" - f" ENGINE = InnoDB;", - ) - - if not auto_create_tables: - with And(f"I create ClickHouse replica test.{table_name}"): - create_clickhouse_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS test.{table_name} " - f"(id Int32,{f'MyData Nullable({ch_type})' if nullable else f'MyData {ch_type}'}, sign " - f"Int8, ver UInt64) " - f"ENGINE = ReplacingMergeTree(ver) " - f"PRIMARY KEY id ORDER BY id SETTINGS " - f"index_granularity = 8192;", - ) - - with When(f"I insert data in MySql table {table_name}"): - for i, value in enumerate(values, 1): - mysql.query(f"INSERT INTO {table_name} VALUES ({i}, {value})") - with Then(f"I make check that ClickHouse table has same dataset"): - retry(clickhouse.query, timeout=50, delay=1)( - f"SELECT id,{'unhex(MyData)' if hex_type else 'MyData'} FROM test.{table_name} FINAL FORMAT CSV", - message=f"{ch_values[i - 1]}", - ) - - -@TestOutline(Scenario) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ("DECIMAL(2,1)", "DECIMAL(2,1)", ["2/3"], ["0.7"], False), - ("DECIMAL(30, 10)", "DECIMAL(30, 10)", ["1.232323233"], ["1.232323233"], False), - ("DECIMAL(9, 5)", "DECIMAL32(5)", ["1.23"], ["1.23"], False), - ("DECIMAL(18, 5)", "DECIMAL64(5)", ["1.23"], ["1.23"], False), - ("DECIMAL(38, 5)", "DECIMAL128(5)", ["1.23"], ["1.23"], False), - ("DECIMAL(65, 5)", "DECIMAL256(5)", ["1.23"], ["1.23"], False), - ("DECIMAL(2,1)", "DECIMAL(2,1)", ["NULL"], ["\\N"], True), - ("DECIMAL(30, 10)", "DECIMAL(30, 10)", ["NULL"], ["\\N"], True), - ("DECIMAL(9, 5)", "DECIMAL32(5)", ["NULL"], ["\\N"], True), - ("DECIMAL(18, 5)", "DECIMAL64(5)", ["NULL"], ["\\N"], True), - ("DECIMAL(38, 5)", "DECIMAL128(5)", ["NULL"], ["\\N"], True), - ("DECIMAL(65, 5)", "DECIMAL256(5)", ["NULL"], ["\\N"], True), - ], -) -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Decimal("1.0") -) -def decimal(self, mysql_type, ch_type, values, ch_values, nullable): - """Check replication of MySQl 'DECIMAL' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - auto_create_tables=True, - ) - - -@TestOutline(Scenario) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ("DOUBLE", "Float64", ["999.00009"], ["999.00009"], False), - ("DOUBLE", "Float64", ["NULL"], ["\\N"], True), - ], -) -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Double("1.0") -) -@Requirements() -def double(self, mysql_type, ch_type, values, ch_values, nullable): - """Check replication of MySQl 'DOUBLE' data type.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - auto_create_tables=True, - ) - - -@TestOutline(Scenario) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ("DATE", "Date32", ["'2012-12-12'"], ['"2012-12-12"'], False), - ( - "DATETIME", - "DateTime64", - ["'2018-09-08 17:51:04'"], - ['"2018-09-08 17:51:04.000"'], - False, - ), - ("TIME", "String", ["'17:51:04.777'"], ['"17:51:05"'], False), - # ("TIME(6)", "String", ["'17:51:04.777'"], ['"17:51:04.777000"'], False), - ("DATE", "Date32", ["NULL"], ["\\N"], True), - ("DATETIME", "DateTime64", ["NULL"], ["\\N"], True), - ("TIME", "String", ["NULL"], ["\\N"], True), - # ("TIME(6)", "String", ["NULL"], ["\\N"], True), - ], -) -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_DateTime("1.0") -) -def date_time(self, mysql_type, ch_type, values, ch_values, nullable): - """Check replication of MySQl 'DATE' and 'TIME' data type.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - auto_create_tables=True, - ) - - -# TODO: add xfail on bigint unsigned -@TestOutline(Scenario) -# @Repeat(3) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ( - "INT", - "Int32", - ["-2147483648", "0", "2147483647"], - ["-2147483648", "0", "2147483647"], - False, - ), - ("INT UNSIGNED", "UInt32", ["0", "4294967295"], ["0", "4294967295"], False), - ( - "BIGINT", - "Int64", - ["-9223372036854775808", "0", "9223372036854775807"], - ["-9223372036854775808", "0", "9223372036854775807"], - False, - ), - # ( - # "BIGINT UNSIGNED", - # "UInt64", - # ["0", "18446744073709551615"], - # ["0", "18446744073709551615"], - # False, - # ), - ("TINYINT", "Int8", ["-128", "127"], ["-128", "127"], False), - ("TINYINT UNSIGNED", "UInt8", ["0", "255"], ["0", "255"], False), - ( - "SMALLINT", - "Int16", - ["-32768", "0", "32767"], - ["-32768", "0", "32767"], - False, - ), - ("SMALLINT UNSIGNED", "UInt16", ["0", "65535"], ["0", "65535"], False), - ( - "MEDIUMINT", - "Int32", - ["-8388608", "0", "8388607"], - ["-8388608", "0", "8388607"], - False, - ), - ("MEDIUMINT UNSIGNED", "UInt32", ["0", "16777215"], ["0", "16777215"], False), - ("INT", "Int32", ["NULL"], ["\\N"], True), - ("INT UNSIGNED", "UInt32", ["NULL"], ["\\N"], True), - ("BIGINT", "Int64", ["NULL"], ["\\N"], True), - # ("BIGINT UNSIGNED", "UInt64", ["NULL"], ["\\N"], True), - ("TINYINT", "Int8", ["NULL"], ["\\N"], True), - ("TINYINT UNSIGNED", "UInt8", ["NULL"], ["\\N"], True), - ("SMALLINT", "Int16", ["NULL"], ["\\N"], True), - ("SMALLINT UNSIGNED", "UInt16", ["NULL"], ["\\N"], True), - ("MEDIUMINT", "Int32", ["NULL"], ["\\N"], True), - ("MEDIUMINT UNSIGNED", "UInt32", ["NULL"], ["\\N"], True), - ], -) -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_IntegerTypes("1.0") -) -def integer_types(self, mysql_type, ch_type, values, ch_values, nullable): - """Check replication of MySQl 'INT' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - auto_create_tables=True, - ) - - -@TestOutline(Scenario) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ("CHAR", "LowCardinality(String)", ["'x'"], ['"x"'], False), - ("TEXT", "String", ["'some_text'"], ['"some_text"'], False), - ("VARCHAR(4)", "String", ["'IVAN'"], ['"IVAN"'], False), - ("CHAR", "String", ["NULL"], ["\\N"], True), - ("TEXT", "String", ["NULL"], ["\\N"], True), - ("VARCHAR(4)", "String", ["NULL"], ["\\N"], True), - ], -) -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_String("1.0") -) -def string(self, mysql_type, ch_type, values, ch_values, nullable): - """Check replication of MySQl 'STRING' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - auto_create_tables=True, - ) - - -@TestOutline(Scenario) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ("BLOB", "String", ["'some_blob'"], ['"some_blob"'], False), - ("MEDIUMBLOB", "String", ["'x_Mediumblob'"], ['"x_Mediumblob"'], False), - ( - "LONGBLOB", - "String", - ["'some_Longblobblobblob'"], - ['"some_Longblobblobblob"'], - False, - ), - ("BLOB", "String", ["NULL"], ["\\N"], True), - ("MEDIUMBLOB", "String", ["NULL"], ["\\N"], True), - ("LONGBLOB", "String", ["NULL"], ["\\N"], True), - ], -) -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_BlobTypes("1.0") -) -def blob(self, mysql_type, ch_type, values, ch_values, nullable): - """Check replication of MySQl 'BLOB' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - hex_type=True, - auto_create_tables=True, - ) - - -@TestOutline(Scenario) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ("BINARY", "String", ["'a'"], ['"a"'], False), - ("VARBINARY(4)", "String", ["'IVAN'"], ['"IVAN"'], False), - ("BINARY", "String", ["NULL"], ["\\N"], True), - ("VARBINARY(4)", "String", ["NULL"], ["\\N"], True), - ], -) -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Binary("1.0") -) -def binary(self, mysql_type, ch_type, values, ch_values, nullable): - """Check replication of MySQl 'BINARY' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - hex_type=True, - auto_create_tables=True, - ) - - -@TestOutline(Scenario) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ("ENUM('hello','world')", "String", ["'hello'"], ['"hello"'], False), - ("ENUM('hello','world')", "String", ["NULL"], ["\\N"], True), - ], -) -@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_EnumToString("1.0")) -def enum(self, mysql_type, ch_type, values, ch_values, nullable): - """Check replication of MySQl 'ENUM' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - auto_create_tables=True, - ) - - -@TestOutline(Scenario) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ("JSON", "String", ["'{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\"}'"] - , ['{""key1"": ""value1"", ""key2"": ""value2""}'], - False), - ("JSON", "String", ["NULL"], ["\\N"], True), - ], -) -@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_JSON("1.0")) -def json(self, mysql_type, ch_type, values, ch_values, nullable): - """Check replication of MySQl 'JSON' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - auto_create_tables=True - ) - - -@TestOutline(Scenario) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ("Year", "Int32", ["1909"] - , ['1909'], - False), - ("Year", "Int32", ["NULL"], ["\\N"], True), - ], -) -@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Year("1.0")) -def year(self, mysql_type, ch_type, values, ch_values, nullable): - """Check replication of MySQl 'Year' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - auto_create_tables=True - ) - - -@TestFeature -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Nullable("1.0") -) -@Name("data types ac") -def feature(self): - """Verify correct replication of all supported MySQL data types with CLickHouse table auto creation.""" - - with Given("I enable debezium and sink connectors after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/deduplication.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/deduplication.py deleted file mode 100644 index a2bc7837a..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/deduplication.py +++ /dev/null @@ -1,79 +0,0 @@ -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from helpers.common import * - - -@TestOutline -def deduplication(self, inserts=False, big_insert=False, insert_number=1000): - """Check MySQL to Clickhouse connection for non-duplication data""" - - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name}"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS {table_name} " - f"(id INT AUTO_INCREMENT,age INT, PRIMARY KEY (id))" - f" ENGINE = InnoDB;", - ) - - with And( - f"I create ClickHouse replica test.{table_name} to MySQL table with auto remove in the end of the test" - ): - create_clickhouse_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS test.{table_name} " - f"(id Int32, age Int32) " - f"ENGINE = ReplacingMergeTree " - f"PRIMARY KEY id ORDER BY id SETTINGS " - f"index_granularity = 8192;", - ) - - if inserts: - with When(f"I insert {insert_number} rows of data in MySql table"): - for i in range(0, insert_number + 1): - mysql.query(f"insert into {table_name} values ({i},777)") - metric(name="map insert time", value=current_time(), units="sec") - elif big_insert: - with When(f"I make one insert on {insert_number} rows data in MySql table"): - mysql.query(f"insert into {table_name} " - f"values {','.join([f'({i},777)' for i in range(1, insert_number + 1)])}") - - with Then(f"I wait unique values from CLickHouse table equal to MySQL table"): - select(insert=insert_number, table_name=table_name, statement="count()", with_final=True, timeout=50) - - -@TestScenario -def deduplication_on_big_insert(self): - """Check MySQL to Clickhouse connection for non-duplication data on 10 000 inserts.""" - deduplication(big_insert=True, insert_number=100000) - - -@TestScenario -def deduplication_on_many_inserts(self): - """Check MySQL to Clickhouse connection for non-duplication data on big inserts.""" - deduplication(inserts=True, insert_number=10000) - - -@TestFeature -@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Consistency_Deduplication("1.0")) -@Name("deduplication") -def feature(self): - """MySql to ClickHouse replication tests to check - for non-duplication data on big inserts.""" - - with Given("I enable debezium and sink connectors after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/delete.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/delete.py deleted file mode 100644 index 0e316c2a3..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/delete.py +++ /dev/null @@ -1,108 +0,0 @@ -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from testflows.connect import Shell -from helpers.common import * - - -@TestOutline -def delete(self, primary_key, engine=True): - """Check `DELETE` query replicating from MySQl table to CH with different primary keys. - """ - - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name}"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE {table_name} " - "(id int(11) NOT NULL," - "k int(11) NOT NULL DEFAULT 0,c char(120) NOT NULL DEFAULT ''," - f"pad char(60) NOT NULL DEFAULT ''{primary_key}){' ENGINE = InnoDB;' if engine else ''}" - ) - - with When(f"I insert data in MySql table"): - mysql.query( - f"INSERT INTO {table_name} values (1,2,'a','b'), (2,3,'a','b');" - ) - with Then(f"I delete data in MySql table"): - mysql.query( - f"DELETE FROM {table_name} WHERE id=1;" - ) - - with And("I check that ClickHouse table has same number of rows as MySQL table"): - mysql_rows_after_delete = mysql.query(f"select count(*) from {table_name}").output.strip()[90:] - for attempt in retries(count=10, timeout=100, delay=5): - with attempt: - clickhouse.query(f"OPTIMIZE TABLE test.{table_name} FINAL DEDUPLICATE") - - clickhouse.query( - f"SELECT count(*) FROM test.{table_name} FINAL where _sign !=-1 FORMAT CSV", - message=mysql_rows_after_delete - ) - - -@TestScenario -def no_primary_key(self): - """Check for `DELETE` with no primary key with InnoDB engine. - """ - xfail("doesn't work in row") - delete(primary_key="", engine=True) - - -@TestScenario -def no_primary_key_innodb(self): - """Check for `DELETE` with no primary key without InnoDB engine. - """ - xfail("doesn't work in row") - delete(primary_key="", engine=False) - - -@TestScenario -def simple_primary_key(self): - """Check for `DELETE` with simple primary key without InnoDB engine. - """ - delete(primary_key=", PRIMARY KEY (id)", engine=False) - - -@TestScenario -def simple_primary_key_innodb(self): - """Check for `DELETE` with simple primary key with InnoDB engine. - """ - delete(primary_key=", PRIMARY KEY (id)", engine=True) - - -@TestScenario -def complex_primary_key(self): - """Check for `DELETE` with complex primary key without engine InnoDB. - """ - delete(primary_key=", PRIMARY KEY (id,k)", engine=False) - - -@TestScenario -def complex_primary_key_innodb(self): - """Check for `DELETE` with complex primary key with engine InnoDB. - """ - delete(primary_key=", PRIMARY KEY (id,k)", engine=True) - - -@TestFeature -@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Deletes("1.0")) -@Name("delete") -def feature(self): - """MySql to ClickHouse replication delete tests to test `DELETE` queries.""" - - with Given("I enable debezium connector after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/insert.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/insert.py deleted file mode 100644 index 3d1b296f0..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/insert.py +++ /dev/null @@ -1,84 +0,0 @@ -import time - -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * - - -@TestOutline -def mysql_to_clickhouse_postgres_inserts(self, input, output): - """`INSERT` check section""" - # xfail("`SELECT ... FINAL` eats rows") - - table_name = "users" - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name}"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE {table_name} (col1 int4, col2 int4 NOT NULL, col3 int4 default 777)" - f" ENGINE = InnoDB;", - ) - clickhouse.query("SYSTEM STOP MERGES") - - with When("I insert data in MySql table"): - mysql.query( - f"INSERT INTO {table_name} (col1,col2,col3) VALUES {input};" - ) - - with Then("I check data inserted correct"): - mysql_rows_after_delete = mysql.query(f"select col1,col2,col3 from {table_name}").output.strip()[90:] - for attempt in retries(count=10, timeout=100, delay=5): - with attempt: - clickhouse.query(f"OPTIMIZE TABLE test.{table_name} FINAL DEDUPLICATE") - - clickhouse.query( - f"SELECT col1,col2,col3 FROM test.{table_name} FINAL FORMAT CSV", - message=output - ) - - -@TestScenario -def null_default_insert(self): - """NULL and DEFAULT `INSERT` check.""" - mysql_to_clickhouse_postgres_inserts(input="(DEFAULT,5,DEFAULT)", output="\\N,5,777") - - -@TestScenario -def null_default_insert_2(self): - """NULL and DEFAULT `INSERT` check.""" - mysql_to_clickhouse_postgres_inserts(input="(DEFAULT,5,333)", output="\\N,5,333") - - -@TestScenario -def select_insert(self, auto_create_tables=True): - """NULL and DEFAULT `INSERT` check.""" - mysql_to_clickhouse_postgres_inserts(input="((select 2),7,DEFAULT)", output="2,7,777") - - -@TestScenario -def select_insert(self, auto_create_tables=True): - """NULL and DEFAULT `INSERT` check.""" - mysql_to_clickhouse_postgres_inserts(input="((select 2),7,DEFAULT)", output="2,7,777") - - -@TestScenario -def select_insert_2(self, auto_create_tables=True): - """NULL and DEFAULT `INSERT` check.""" - mysql_to_clickhouse_postgres_inserts(input="((select 2),(select i from (values(3)) as foo (i)),DEFAULT)", - output="2,3,777") - - -@TestFeature -@Name("insert") -def feature(self): - """Different `INSERT` tests section.""" - - with Given("I enable debezium and sink connectors after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/manual_section.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/manual_section.py deleted file mode 100644 index 4a3418607..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/manual_section.py +++ /dev/null @@ -1,121 +0,0 @@ -import time - -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from testflows.connect import Shell -from helpers.common import * - - -@TestOutline -def mysql_to_clickhouse_connection(self, auto_create_tables): - """Basic check MySQL to Clickhouse connection by small and simple data insert.""" - - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name}"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE {table_name} " - "(id int(11) NOT NULL AUTO_INCREMENT," - "k int(11) NOT NULL DEFAULT 0,c char(120) NOT NULL DEFAULT ''," - "pad char(60) NOT NULL DEFAULT '',PRIMARY KEY (id,k))" - " ENGINE= InnoDB" - " PARTITION BY RANGE (k)" - " (PARTITION p1 VALUES LESS THAN (499999),PARTITION p2 VALUES LESS THAN MAXVALUE);" - - ) - pause() - - with When(f"I insert data in MySql table"): - mysql.query( - f"INSERT INTO {table_name} values (1,2,'a','b'), (2,3,'a','b');" - ) - pause() - - if auto_create_tables: - with And("I check table creation"): - retry(clickhouse.query, timeout=30, delay=3)( - "SHOW TABLES FROM test", message=f"{table_name}" - ) - pause() - - - with And(f"I check that ClickHouse table has same number of rows as MySQL table"): - pass - - -@TestOutline -def mysql_to_clickhouse_connection2(self, auto_create_tables): - """Basic check MySQL to Clickhouse connection by small and simple data insert.""" - - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name}"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE {table_name} " - "(id int(11) NOT NULL AUTO_INCREMENT," - "k int(11) NOT NULL DEFAULT 0,c char(120) NOT NULL DEFAULT ''," - "pad char(60) NOT NULL DEFAULT '',PRIMARY KEY (id))" - " ENGINE= InnoDB;" - # " PARTITION BY RANGE (k)" - # " (PARTITION p1 VALUES LESS THAN (499999),PARTITION p2 VALUES LESS THAN MAXVALUE);" - - ) - pause() - - with When(f"I insert data in MySql table"): - mysql.query( - f"INSERT INTO {table_name} values (1,2,'a','b'), (2,3,'a','b');" - ) - pause() - - if auto_create_tables: - with And("I check table creation"): - retry(clickhouse.query, timeout=30, delay=3)( - "SHOW TABLES FROM test", message=f"{table_name}" - ) - pause() - - - with And(f"I check that ClickHouse table has same number of rows as MySQL table"): - pass - -@TestScenario -def mysql_to_clickhouse_connection_ac(self, auto_create_tables=True): - """Basic check MySQL to Clickhouse connection by small and simple data insert with auto table creation.""" - mysql_to_clickhouse_connection2(auto_create_tables=auto_create_tables) - # with Given("I collect Sink logs"): - # with Shell() as bash: - # cmd = bash("docker-compose logs sink > sink.log") - -@TestFeature -@Name("manual section") -def feature(self): - """MySql to ClickHouse replication sanity test that checks - basic replication using a simple table.""" - - with Given("I enable debezium connector after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/multiple_tables.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/multiple_tables.py deleted file mode 100644 index f9e2a281e..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/multiple_tables.py +++ /dev/null @@ -1,67 +0,0 @@ -import time - -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from testflows.connect import Shell -from helpers.common import * - - -@TestOutline -def multiple_table_creation(self, number_of_tables): - """ - Multiple tables auto creation - """ - mysql = self.context.cluster.node("mysql-master") - clickhouse = self.context.cluster.node("clickhouse") - - with Given("I create unique topics"): - table_name = f"{','.join([f'SERVER5432.test.users{i}' for i in range(1, number_of_tables + 1)])}" - - init_sink_connector(auto_create_tables=True, topics=table_name) - - for i in range(number_of_tables): - table_name = f"users{i}" - with Given(f"I create MySQL table {table_name}"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS {table_name} " - f"(id INT AUTO_INCREMENT,age INT, PRIMARY KEY (id))" - f" ENGINE = InnoDB;", - ) - - with When(f"I insert data in MySql table"): - mysql.query( - f"insert into {table_name} values (1,777)" - ) - - with Then("I count created tables"): - retry(clickhouse.query, timeout=50, delay=1)( - "SELECT count() FROM system.tables WHERE name ilike 'users%'", - message=f"{i+1}", - ) - - -@TestScenario -def tables_100(self): - """ - Creation of 10 tables (if --stress enabled 100 tables creation). - """ - if self.context.cluster.stress: - multiple_table_creation(number_of_tables=100) - else: - multiple_table_creation(number_of_tables=10) - - -@TestFeature -@Name("multiple tables") -def feature(self): - """ - Multiple tables creation. - """ - - with Given("I enable debezium connector after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() \ No newline at end of file diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/primary_keys.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/primary_keys.py deleted file mode 100644 index 9e2032d5e..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/primary_keys.py +++ /dev/null @@ -1,103 +0,0 @@ - -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from helpers.common import * - - -@TestOutline -def check_different_primary_keys(self, insert_values, output_values, mysql_primary_key, ch_primary_key, - auto_create_table=True, - timeout=70): - """Check replicating MySQl table with different primary keys.""" - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - with Given(f"I create MySQL table {table_name} with some primary key"): - init_sink_connector(auto_create_tables=auto_create_table, topics=f"SERVER5432.test.{table_name}") - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS {table_name} " - f"(id INT NOT NULL,Name VARCHAR(14) NOT NULL {mysql_primary_key})" - f" ENGINE = InnoDB;", - ) - - if not auto_create_table: - with And(f"I create ClickHouse replica test.{table_name} to MySQL table"): - create_clickhouse_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS test.{table_name} " - f"(id Int32, Name String) " - f"ENGINE = ReplacingMergeTree " - f"{ch_primary_key}" - f"index_granularity = 8192;", - ) - - with When(f"I insert data in MySql table {table_name}"): - mysql.query( - f"INSERT INTO {table_name} VALUES {insert_values}" - ) - - with Then(f"I check that ClickHouse table has same data as MySQL table"): - select(insert=output_values, table_name=table_name, statement="id, Name", with_final=True, timeout=50) - - -@TestScenario -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_PrimaryKey_Simple("1.0") -) -def simple_primary_key(self): - """Check replicating MySQl table with simple primary key.""" - check_different_primary_keys( - insert_values="(1, 'Ivan'),(3,'Sergio'),(4,'Alex'),(2,'Alex'),(5,'Andre')", - output_values='1,"Ivan"\n2,"Alex"\n3,"Sergio"\n4,"Alex"\n5,"Andre"', - mysql_primary_key=", PRIMARY KEY (id)", - ch_primary_key="PRIMARY KEY id ORDER BY id SETTINGS " - ) - - -@TestScenario -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_PrimaryKey_Composite("1.0") -) -def composite_primary_key(self): - """Check replicating MySQl table with composite key.""" - check_different_primary_keys( - insert_values="(1, 'Ivan'),(1,'Sergio'),(1,'Alex'),(2,'Alex'),(2,'Andre')", - output_values='1,"Alex"\n1,"Ivan"\n1,"Sergio"\n2,"Alex"\n2,"Andre"', - mysql_primary_key=", PRIMARY KEY (id, Name)", - ch_primary_key="PRIMARY KEY (id,Name) ORDER BY (id,Name) SETTINGS " - ) - - -@TestScenario -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_NoPrimaryKey("1.0") -) -def no_primary_key(self): - """Check replicating MySQl table without any primary key.""" - - xfail("https://github.com/Altinity/clickhouse-sink-connector/issues/39") - check_different_primary_keys( - insert_values="(1, 'Ivan'),(1,'Sergio'),(1,'Alex'),(2,'Alex'),(2,'Andre')", - output_values='1,"Ivan"\n1,"Sergio"\n1,"Alex"\n2,"Alex"\n2,"Andre"', - mysql_primary_key="", - ch_primary_key="PRIMARY KEY tuple() ORDER BY tuple() SETTINGS " - ) - - -@TestFeature -@Name("primary keys") -def feature(self): - """MySql to ClickHouse replication simple and composite primary keys tests.""" - - with Given("I enable debezium and sink connectors after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/sanity.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/sanity.py deleted file mode 100644 index 6715cb27d..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/sanity.py +++ /dev/null @@ -1,87 +0,0 @@ -import time - -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from testflows.connect import Shell -from helpers.common import * - - -@TestOutline -def mysql_to_clickhouse_connection(self, auto_create_tables): - """Basic check MySQL to Clickhouse connection by small and simple data insert.""" - - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name}"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS {table_name} " - f"(id INT AUTO_INCREMENT,age INT, PRIMARY KEY (id))" - f" ENGINE = InnoDB;", - ) - - if not auto_create_tables: - with And( - f"I create ClickHouse replica test.{table_name} to MySQL table with auto remove in the end of the test" - ): - create_clickhouse_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS test.{table_name} " - f"(id Int32, age Int32) " - f"ENGINE = ReplacingMergeTree " - f"PRIMARY KEY id ORDER BY id SETTINGS " - f"index_granularity = 8192;", - ) - - with When(f"I insert data in MySql table"): - mysql.query( - f"insert into {table_name} values (1,777),(2,777),(3,777),(4,777),(5,777),(6,777),(7,777)," - f"(8,777),(9,777)" - ) - - if auto_create_tables: - with And("I check table creation"): - retry(clickhouse.query, timeout=30, delay=3)( - "SHOW TABLES FROM test", message=f"{table_name}" - ) - - with And(f"I check that ClickHouse table has same number of rows as MySQL table"): - select(insert="9", table_name=table_name, statement="count()", with_final=True, timeout=50) - - -@TestScenario -def mysql_to_clickhouse_connection_ac(self, auto_create_tables=True): - """Basic check MySQL to Clickhouse connection by small and simple data insert with auto table creation.""" - mysql_to_clickhouse_connection(auto_create_tables=auto_create_tables) - # with Given("I collect Sink logs"): - # with Shell() as bash: - # cmd = bash("docker-compose logs sink > sink.log") - - -@TestScenario -def mysql_to_clickhouse_connection_mc(self, auto_create_tables=False): - """Basic check MySQL to Clickhouse connection by small and simple data insert with manual table creation.""" - mysql_to_clickhouse_connection(auto_create_tables=auto_create_tables) - - -@TestFeature -@Name("sanity") -def feature(self): - """MySql to ClickHouse replication sanity test that checks - basic replication using a simple table.""" - - with Given("I enable debezium connector after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/steps.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/steps.py deleted file mode 100644 index d2ad1f881..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/steps.py +++ /dev/null @@ -1,524 +0,0 @@ -import inspect -import os -import time - -from testflows.core import * - - -@TestStep(Given) -def init_debezium_connector(self, node=None): - """ - Initialize debezium connectors. - """ - if node is None: - node = self.context.cluster.node("bash-tools") - - debezium_settings_transfer_command_apicurio = """cat </dev/null | jq ." - ) - - -@TestStep(Given) -def init_sink_connector(self, node=None, auto_create_tables=False, topics="SERVER5432.sbtest.sbtest1,SERVER5432.test.users1,SERVER5432.test.users2,SERVER5432.test.users3, SERVER5432.test.users"): - """ - Initialize debezium and sink connectors. - """ - if node is None: - node = self.context.cluster.node("bash-tools") - - if auto_create_tables: - auto_create_tables_local = "true" - else: - auto_create_tables_local = "false" - - sink_settings_transfer_command_apicurio = ( - """cat </dev/null | jq ." - ) - - -@TestStep(Given) -def create_mysql_table(self, name=None, statement=None, node=None): - if node is None: - node = self.context.cluster.node("mysql-master") - if name is None: - name = "users" - if statement is None: - statement = f"CREATE TABLE IF NOT EXISTS {name} " - f"(id INT AUTO_INCREMENT,age INT, PRIMARY KEY (id))" - f" ENGINE = InnoDB;" - - try: - with Given(f"I create MySQL table {name}"): - node.query(statement) - yield - finally: - with Finally("I clean up by deleting table in MySQL"): - node.query(f"DROP TABLE IF EXISTS {name};") - self.context.cluster.node("clickhouse").query( - f"DROP TABLE IF EXISTS test.{name};" - ) - time.sleep(5) - - -@TestStep(Given) -def create_clickhouse_table(self, name=None, statement=None, node=None): - if node is None: - node = self.context.cluster.node("clickhouse") - if name is None: - name = "users" - if statement is None: - statement = f"CREATE TABLE IF NOT EXISTS test.{name} " - f"(id Int32, age Int32) " - f"ENGINE = MergeTree " - f"PRIMARY KEY id ORDER BY id SETTINGS " - f"index_granularity = 8192;" - - try: - with Given(f"I create ClickHouse table {name}"): - node.query(statement) - yield - finally: - with Finally("I clean up by deleting table in ClickHouse"): - node.query(f"DROP TABLE IF EXISTS test.{name};") - - -@TestStep(Given) -def create_all_data_types_table( - self, table_name=None, node=None, manual_ch_table_create=False -): - """Step to create table with all data types.""" - if node is None: - node = self.context.cluster.node("clickhouse") - if table_name is None: - table_name = "users" - - with Given(f"I create MySQL table {table_name} with all data types)"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS {table_name} " - f"(id INT AUTO_INCREMENT," - f"D4 DECIMAL(2,1) NOT NULL, D5 DECIMAL(30, 10) NOT NULL," - f" Doublex DOUBLE NOT NULL," - f" x_date DATE NOT NULL," - f"x_datetime6 DATETIME(6) NOT NULL," - f"x_time TIME NOT NULL," - f"x_time6 TIME(6) NOT NULL," - f"Intmin INT NOT NULL, Intmax INT NOT NULL," - f"UIntmin INT UNSIGNED NOT NULL, UIntmax INT UNSIGNED NOT NULL," - f"BIGIntmin BIGINT NOT NULL,BIGIntmax BIGINT NOT NULL," - f"UBIGIntmin BIGINT UNSIGNED NOT NULL,UBIGIntmax BIGINT UNSIGNED NOT NULL," - f"TIntmin TINYINT NOT NULL,TIntmax TINYINT NOT NULL," - f"UTIntmin TINYINT UNSIGNED NOT NULL,UTIntmax TINYINT UNSIGNED NOT NULL," - f"SIntmin SMALLINT NOT NULL,SIntmax SMALLINT NOT NULL," - f"USIntmin SMALLINT UNSIGNED NOT NULL,USIntmax SMALLINT UNSIGNED NOT NULL," - f"MIntmin MEDIUMINT NOT NULL,MIntmax MEDIUMINT NOT NULL," - f"UMIntmin MEDIUMINT UNSIGNED NOT NULL,UMIntmax MEDIUMINT UNSIGNED NOT NULL," - f" x_char CHAR NOT NULL," - f" x_text TEXT NOT NULL," - f" x_varchar VARCHAR(4) NOT NULL," - f" x_Blob BLOB NOT NULL," - f" x_Mediumblob MEDIUMBLOB NOT NULL," - f" x_Longblob LONGBLOB NOT NULL," - f" x_binary BINARY NOT NULL," - f" x_varbinary VARBINARY(4) NOT NULL," - f" PRIMARY KEY (id))" - f" ENGINE = InnoDB;", - ) - if manual_ch_table_create: - with And(f"I create ClickHouse replica test.{table_name}"): - create_clickhouse_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS test.{table_name} " - f"(id Int32," - f" D4 DECIMAL(2,1), D5 DECIMAL(30, 10)," - f" Doublex Float64," - f" x_date Date," - f" x_datetime6 String," - f" x_time String," - f" x_time6 String," - f" Intmin Int32, Intmax Int32," - f" UIntmin UInt32, UIntmax UInt32," - f" BIGIntmin Int64, BIGIntmax Int64," - f" UBIGIntmin UInt64, UBIGIntmax UInt64," - f" TIntmin Int8, TIntmax Int8," - f" UTIntmin UInt8, UTIntmax UInt8," - f" SIntmin Int16, SIntmax Int16," - f" USIntmin UInt16, USIntmax UInt16," - f" MIntmin Int32, MIntmax Int32," - f" UMIntmin UInt32, UMIntmax UInt32," - f" x_char LowCardinality(String)," - f" x_text String," - f" x_varchar String," - f" x_Blob String," - f" x_Mediumblob String," - f" x_Longblob String," - f" x_binary String," - f" x_varbinary String)" - f"ENGINE = MergeTree " - f"PRIMARY KEY id ORDER BY id SETTINGS " - f"index_granularity = 8192;", - ) - - -@TestStep(Given) -def create_all_data_types_table_nullable( - self, table_name=None, node=None, manual_ch_table_create=False -): - """Step to create table with all data types.""" - if node is None: - node = self.context.cluster.node("clickhouse") - if table_name is None: - table_name = "users" - with Given(f"I create MySQL table {table_name} with all data types)"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS {table_name} " - f"(id INT AUTO_INCREMENT," - f"D4 DECIMAL(2,1), D5 DECIMAL(30, 10)," - f" Doublex DOUBLE," - f" x_date DATE," - f"x_datetime6 DATETIME(6)," - f"x_time TIME," - f"x_time6 TIME(6)," - f"Intmin INT, Intmax INT," - f"UIntmin INT UNSIGNED, UIntmax INT UNSIGNED," - f"BIGIntmin BIGINT,BIGIntmax BIGINT," - f"UBIGIntmin BIGINT UNSIGNED,UBIGIntmax BIGINT UNSIGNED," - f"TIntmin TINYINT,TIntmax TINYINT," - f"UTIntmin TINYINT UNSIGNED,UTIntmax TINYINT UNSIGNED," - f"SIntmin SMALLINT,SIntmax SMALLINT," - f"USIntmin SMALLINT UNSIGNED,USIntmax SMALLINT UNSIGNED," - f"MIntmin MEDIUMINT,MIntmax MEDIUMINT," - f"UMIntmin MEDIUMINT UNSIGNED,UMIntmax MEDIUMINT UNSIGNED," - f" x_char CHAR," - f" x_text TEXT," - f" x_varchar VARCHAR(4)," - f" x_Blob BLOB," - f" x_Mediumblob MEDIUMBLOB," - f" x_Longblob LONGBLOB," - f" x_binary BINARY," - f" x_varbinary VARBINARY(4)," - f" PRIMARY KEY (id))" - f" ENGINE = InnoDB;", - ) - if manual_ch_table_create: - with And(f"I create ClickHouse replica test.{table_name}"): - create_clickhouse_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS test.{table_name} " - f"(id Int32," - f" D4 Nullable(DECIMAL(2,1)), D5 Nullable(DECIMAL(30, 10))," - f" Doublex Float64," - f" x_date Date," - f" x_datetime6 String," - f" x_time String," - f" x_time6 String," - f" Intmin Int32, Intmax Int32," - f" UIntmin UInt32, UIntmax UInt32," - f" BIGIntmin Int64, BIGIntmax Int64," - f" UBIGIntmin UInt64, UBIGIntmax UInt64," - f" TIntmin Int8, TIntmax Int8," - f" UTIntmin UInt8, UTIntmax UInt8," - f" SIntmin Int16, SIntmax Int16," - f" USIntmin UInt16, USIntmax UInt16," - f" MIntmin Int32, MIntmax Int32," - f" UMIntmin UInt32, UMIntmax UInt32," - f" x_char LowCardinality(String)," - f" x_text String," - f" x_varchar String," - f" x_Blob String," - f" x_Mediumblob String," - f" x_Longblob String," - f" x_binary String," - f" x_varbinary Nullable(String))" - f"ENGINE = MergeTree " - f"PRIMARY KEY id ORDER BY id SETTINGS " - f"index_granularity = 8192;", - ) - - -@TestStep(Given) -def sb_debizium_script_connector(self): - try: - time.sleep(10) - with Given( - "I start debezium connector", - description="""Sending debezium settings push command on bash_tools - and wait message that they applied correct""", - ): - retry(self.context.cluster.node("bash-tools").cmd, timeout=100, delay=3)( - f"./../manual_scripts/debezium-connector-setup-sysbench.sh", - message='{"error_code":409,"message":"Connector ' - 'test-connector already exists"}', - ) - yield - finally: - time.sleep(5) - with Finally("I delete debezium sysbench connections"): - with By("deleting debezium connector", flags=TE): - self.context.cluster.node("bash-tools").cmd( - 'curl -X DELETE -H "Accept:application/json" "http://debezium:8083/connectors/test-connector" ' - "2>/dev/null | jq ." - ) - with And("Drop CH table"): - self.context.cluster.node("clickhouse").query( - "DROP TABLE IF EXISTS test.sbtest1;" - ) - - -@TestStep(Given) -def select(self, insert, table_name=None, statement=None, node=None, with_final=False, with_optimize=False, - timeout=100): - """SELECT with an option to either with FINAL or loop SELECT + OPTIMIZE TABLE default simple 'SELECT' - :param insert: expected insert data - :param table_name: table name for select default "users" - :param statement: statement for select default "*" - :param node: node name - :param with_final: 'SELECT ... FINAL' - :param with_optimize: loop 'OPTIMIZE TABLE' + 'SELECT' - :param timeout: retry timeout - - """ - if node is None: - node = self.context.cluster.node("clickhouse") - if table_name is None: - table_name = "users" - if statement is None: - statement = "*" - - if with_final: - retry( - node.query, - timeout=timeout, - delay=10, - )(f"SELECT {statement} FROM test.{table_name} FINAL FORMAT CSV", message=f"{insert}", ) - elif with_optimize: - for attempt in retries(count=10, timeout=100, delay=5): - with attempt: - node.query(f"OPTIMIZE TABLE test.{table_name} FINAL DEDUPLICATE") - - node.query( - f"SELECT {statement} FROM test.{table_name} FORMAT CSV", message=f"{insert}" - ) - - else: - retry( - node.query, - timeout=timeout, - delay=10, - )(f"SELECT {statement} FROM test.{table_name} FORMAT CSV", message=f"{insert}", ) - - - - - - - - diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/table_schema_changes.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/table_schema_changes.py deleted file mode 100644 index 578b3c68b..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/table_schema_changes.py +++ /dev/null @@ -1,98 +0,0 @@ -import time - -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * - - -@TestOutline -def check_datatype_replication( - self, - mysql_type, - ch_type, - values, - ch_values, - nullable, - table_name, - hex_type=False, - auto_create_tables=True, -): - """Check replication of a given MySQL data type.""" - table_name = table_name - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - with Given(f"I create MySQL table {table_name})"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS {table_name} " - f"(id INT AUTO_INCREMENT," - f"MyData {mysql_type}{' NOT NULL' if not nullable else ''}," - f" PRIMARY KEY (id))" - f" ENGINE = InnoDB;", - ) - - if not auto_create_tables: - with And(f"I create ClickHouse replica test.{table_name}"): - create_clickhouse_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS test.{table_name} " - f"(id Int32,{f'MyData Nullable({ch_type})' if nullable else f'MyData {ch_type}'}, sign " - f"Int8, ver UInt64) " - f"ENGINE = ReplacingMergeTree(ver) " - f"PRIMARY KEY id ORDER BY id SETTINGS " - f"index_granularity = 8192;", - ) - - with When(f"I insert data in MySql table {table_name}"): - for i, value in enumerate(values, 1): - mysql.query(f"INSERT INTO {table_name} VALUES ({i}, {value})") - with Then(f"I make check that ClickHouse table has same dataset"): - retry(clickhouse.query, timeout=50, delay=1)( - f"SELECT id,{'unhex(MyData)' if hex_type else 'MyData'} FROM test.{table_name} FORMAT CSV", - message=f"{ch_values[i - 1]}", - ) - - -@TestOutline(Scenario) -@Examples( - "mysql_type ch_type values ch_values nullable", - [ - ("TEXT", "String", ["'some_text'"], ['"some_text"'], False), - ( - "INT", - "Int32", - ["-2147483648", "0", "2147483647"], - ["-2147483648", "0", "2147483647"], - False, - ), - ("INT UNSIGNED", "UInt32", ["0", "4294967295"], ["0", "4294967295"], False), - ("DECIMAL(30, 10)", "DECIMAL(30, 10)", ["1.232323233"], ["1.232323233"], False), - ], -) -def table_recreation_with_different_datatypes( - self, mysql_type, ch_type, values, ch_values, nullable -): - """Check MySQL table recreation with the same name but different column data types.""" - xfail("debezium data conflict crash") - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - table_name="users1", - ) - - -@TestFeature -@Name("table schema changes") -def feature(self): - """Test some table schema changes.""" - - with Given("I enable debezium and sink connectors after kafka starts up"): - init_debezium_connector() - init_sink_connector(auto_create_tables=True) - - for scenario in loads(current_module(), Scenario): - scenario() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/update.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/update.py deleted file mode 100644 index 90c97ecc4..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/update.py +++ /dev/null @@ -1,106 +0,0 @@ -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from testflows.connect import Shell -from helpers.common import * - - -@TestOutline -def update(self, primary_key, engine): - """Check `UPDATE` query replicating from MySQl table to CH with different primary keys.""" - - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name}"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE {table_name} " - "(id int(11) NOT NULL," - "k int(11) NOT NULL DEFAULT 0,c char(120) NOT NULL DEFAULT ''," - f"pad char(60) NOT NULL DEFAULT ''{primary_key}){' ENGINE = InnoDB;' if engine else ''}" - ) - - with When(f"I insert data in MySql table"): - mysql.query( - f"INSERT INTO {table_name} values (1,2,'a','b'), (2,3,'a','b');" - ) - with Then(f"I update data in MySql table"): - mysql.query( - f"UPDATE {table_name} SET k=k+5 WHERE id=1;" - ) - - with And("I check that ClickHouse has updated data as MySQL"): - for attempt in retries(count=10, timeout=100, delay=5): - with attempt: - clickhouse.query(f"OPTIMIZE TABLE test.{table_name} FINAL DEDUPLICATE") - - clickhouse.query( - f"SELECT * FROM test.{table_name} FINAL where _sign !=-1 FORMAT CSV", - message='1,7,"a","b"' - ) - - -@TestScenario -def no_primary_key(self): - """Check for `UPDATE` with no primary key without table engine. - """ - xfail("makes delete") - update(primary_key="", engine=False) - - -@TestScenario -def no_primary_key_innodb(self): - """Check for `UPDATE` with no primary key with table engine InnoDB. - """ - xfail("makes delete") - update(primary_key="", engine=True) - - -@TestScenario -def simple_primary_key(self): - """Check for `UPDATE` with simple primary key without table engine. - """ - update(primary_key=", PRIMARY KEY (id)", engine=False) - - -@TestScenario -def simple_primary_key_innodb(self): - """Check for `UPDATE` with simple primary key with table engine InnoDB. - """ - update(primary_key=", PRIMARY KEY (id)", engine=True) - - -@TestScenario -def complex_primary_key(self): - """Check for `UPDATE` with complex primary key without table engine. - """ - update(primary_key=", PRIMARY KEY (id,k)", engine=False) - - -@TestScenario -def complex_primary_key_innodb(self): - """Check for `UPDATE` with complex primary key with table engine InnoDB. - """ - update(primary_key=", PRIMARY KEY (id,k)", engine=True) - - -@TestFeature -@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Updates("1.0")) -@Name("update") -def feature(self): - """MySql to ClickHouse replication update tests to test `UPDATE` queries.""" - - with Given("I enable debezium connector after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() \ No newline at end of file diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/virtual_columns.py b/tests/Testflows/mysql_to_clickhouse_replication/tests/virtual_columns.py deleted file mode 100644 index af8259d0d..000000000 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/virtual_columns.py +++ /dev/null @@ -1,59 +0,0 @@ -import time - -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from helpers.common import * - - -@TestScenario -@Requirements( - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplacingMergeTree_VirtualColumnNames("1.0") -) -def virtual_column_names( - self, - mysql_type="DATETIME", - nullable=False, - auto_create_tables=True, -): - """Check correctness of virtual column names.""" - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" - - clickhouse = self.context.cluster.node("clickhouse") - mysql = self.context.cluster.node("mysql-master") - - init_sink_connector(auto_create_tables=auto_create_tables, topics=f"SERVER5432.test.{table_name}") - - with Given(f"I create MySQL table {table_name})"): - create_mysql_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS {table_name} " - f"(id INT AUTO_INCREMENT," - f"MyData {mysql_type}{' NOT NULL' if not nullable else ''}," - f" PRIMARY KEY (id))" - f" ENGINE = InnoDB;", - ) - - with When(f"I insert data in MySql table {table_name}"): - mysql.query(f"INSERT INTO {table_name} VALUES (1, '2018-09-08 17:51:05.777')") - - with Then(f"I make check that ClickHouse table virtual column names are correct"): - retry(clickhouse.query, timeout=50, delay=1)( - f"SHOW CREATE TABLE test.{table_name}", message="`_sign` Int8,\\n `_version` UInt64\\n" - ) - - -@TestFeature -@Name("virtual columns") -def feature(self): - """Section to check behavior of virtual columns.""" - - with Given("I enable debezium and sink connectors after kafka starts up"): - init_debezium_connector() - - for scenario in loads(current_module(), Scenario): - scenario() \ No newline at end of file diff --git a/tests/Testflows/README.md b/tests/integration/README.md similarity index 56% rename from tests/Testflows/README.md rename to tests/integration/README.md index 2f8facb3d..b0057b106 100644 --- a/tests/Testflows/README.md +++ b/tests/integration/README.md @@ -4,7 +4,7 @@ Regression testing of MySQL to ClickHouse Replication. Tests with using [TestFlows](https://testflows.com/) environment. -Make changes of clickhouse-sink-connector version in `mysql_to_clickhouse_replication/mysql_to_clickhouse_replication_env/docker-compose.yml` +Make changes of clickhouse-sink-connector version in `mysql_to_clickhouse_replication_env/docker-compose.yml` line ```commandline image: altinity/clickhouse-sink-connector: {version} @@ -12,13 +12,13 @@ image: altinity/clickhouse-sink-connector: {version} Where `version` is corresponds to clickhouse-sink-connector version you want to test. -Use `python3 -u mysql_to_clickhouse_replication/regression.py --only -"/mysql to clickhouse replication/{module name}/{test name}/*" --test-to-end -o classic` +Use +`python3 -u regression.py --only /mysql to clickhouse replication/{module name}/{test name}/*" --clickhouse-binary-path=docker://clickhouse/clickhouse-server:22.8.6.71-alpine --test-to-end -o classic` to run tests local. You can start all tests by -`python3 -u mysql_to_clickhouse_replication/regression.py --only "/mysql to clickhouse replication/*" --test-to-end -o classic` +`python3 -u regression.py --only "/mysql to clickhouse replication/*" --clickhouse-binary-path=docker://clickhouse/clickhouse-server:22.8.6.71-alpine --test-to-end -o classic` Example, @@ -30,3 +30,17 @@ python3 -u mysql_to_clickhouse_replication/regression.py --only "/mysql to click To save service logs to mysql_to_clickhouse_replication/_instances folder use `--collect-service-logs` To run all test with error ignore use `--test-to-end` + +### Sink connector version + +For local runs: +```bash + export SINK_CONNECTOR_VERSION="2023-02-07" +``` +For CI/CD change testflow_tests.yml: + +```yml +env: + SINK_CONNECTOR_VERSION: "2023-02-07" +``` + diff --git a/tests/Testflows/mysql_to_clickhouse_replication/__init__.py b/tests/integration/__init__.py similarity index 100% rename from tests/Testflows/mysql_to_clickhouse_replication/__init__.py rename to tests/integration/__init__.py diff --git a/tests/integration/configs/clickhouse/config.d/logs.xml b/tests/integration/configs/clickhouse/config.d/logs.xml new file mode 100644 index 000000000..6d73908cf --- /dev/null +++ b/tests/integration/configs/clickhouse/config.d/logs.xml @@ -0,0 +1,16 @@ + + + trace + /var/log/clickhouse-server/log.log + /var/log/clickhouse-server/log.err.log + 1000M + 10 + /var/log/clickhouse-server/stderr.log + /var/log/clickhouse-server/stdout.log + + + system + part_log
+ 500 +
+
diff --git a/tests/integration/configs/clickhouse/config.d/remote.xml b/tests/integration/configs/clickhouse/config.d/remote.xml new file mode 100644 index 000000000..926aaf09f --- /dev/null +++ b/tests/integration/configs/clickhouse/config.d/remote.xml @@ -0,0 +1,73 @@ + + + + + + + clickhouse1 + 9000 + + + + + + false + + clickhouse1 + 9000 + + + clickhouse2 + 9000 + + + clickhouse3 + 9000 + + + + + + + clickhouse1 + 9000 + + + + + clickhouse2 + 9000 + + + + + clickhouse3 + 9000 + + + + + + false + + clickhouse + 9000 + + + clickhouse1 + 9000 + + + + + clickhouse2 + 9000 + + + clickhouse3 + 9000 + + + + + diff --git a/tests/integration/configs/clickhouse/config.d/zookeeper.xml b/tests/integration/configs/clickhouse/config.d/zookeeper.xml new file mode 100644 index 000000000..1d5c0b6cb --- /dev/null +++ b/tests/integration/configs/clickhouse/config.d/zookeeper.xml @@ -0,0 +1,10 @@ + + + + + zookeeper + 2181 + + 15000 + + diff --git a/tests/Testflows/mysql_to_clickhouse_replication/configs/clickhouse/config.xml b/tests/integration/configs/clickhouse/config.xml similarity index 100% rename from tests/Testflows/mysql_to_clickhouse_replication/configs/clickhouse/config.xml rename to tests/integration/configs/clickhouse/config.xml diff --git a/tests/Testflows/mysql_to_clickhouse_replication/configs/clickhouse/users.xml b/tests/integration/configs/clickhouse/users.xml similarity index 100% rename from tests/Testflows/mysql_to_clickhouse_replication/configs/clickhouse/users.xml rename to tests/integration/configs/clickhouse/users.xml diff --git a/tests/Testflows/mysql_to_clickhouse_replication/configs/clickhouse/config.d/macros.xml b/tests/integration/configs/clickhouse0/config.d/macros.xml similarity index 80% rename from tests/Testflows/mysql_to_clickhouse_replication/configs/clickhouse/config.d/macros.xml rename to tests/integration/configs/clickhouse0/config.d/macros.xml index 2c59855d6..a9344363c 100644 --- a/tests/Testflows/mysql_to_clickhouse_replication/configs/clickhouse/config.d/macros.xml +++ b/tests/integration/configs/clickhouse0/config.d/macros.xml @@ -1,7 +1,7 @@ - + clickhouse 01 - + diff --git a/tests/integration/configs/clickhouse1/config.d/macros.xml b/tests/integration/configs/clickhouse1/config.d/macros.xml new file mode 100644 index 000000000..46879388c --- /dev/null +++ b/tests/integration/configs/clickhouse1/config.d/macros.xml @@ -0,0 +1,8 @@ + + + + clickhouse1 + 01 + + + diff --git a/tests/integration/configs/clickhouse2/config.d/macros.xml b/tests/integration/configs/clickhouse2/config.d/macros.xml new file mode 100644 index 000000000..658914602 --- /dev/null +++ b/tests/integration/configs/clickhouse2/config.d/macros.xml @@ -0,0 +1,8 @@ + + + + clickhouse2 + 02 + + + diff --git a/tests/integration/configs/clickhouse3/config.d/macros.xml b/tests/integration/configs/clickhouse3/config.d/macros.xml new file mode 100644 index 000000000..684de2307 --- /dev/null +++ b/tests/integration/configs/clickhouse3/config.d/macros.xml @@ -0,0 +1,8 @@ + + + + clickhouse3 + 02 + + + diff --git a/tests/Testflows/mysql_to_clickhouse_replication/mysql_to_clickhouse_replication_env/clickhouse-service.yml b/tests/integration/env/clickhouse-service.yml similarity index 72% rename from tests/Testflows/mysql_to_clickhouse_replication/mysql_to_clickhouse_replication_env/clickhouse-service.yml rename to tests/integration/env/clickhouse-service.yml index 1284ad4a5..692eb56ff 100644 --- a/tests/Testflows/mysql_to_clickhouse_replication/mysql_to_clickhouse_replication_env/clickhouse-service.yml +++ b/tests/integration/env/clickhouse-service.yml @@ -10,9 +10,11 @@ services: - "8123" - "2181" volumes: + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/logs.xml:/etc/clickhouse-server/config.d/logs.xml" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/remote.xml:/etc/clickhouse-server/config.d/remote.xml" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/zookeeper.xml:/etc/clickhouse-server/config.d/zookeeper.xml" - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml" - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml" - - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml" - "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse" - "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge" entrypoint: bash -c "tail -f /dev/null" diff --git a/tests/Testflows/mysql_to_clickhouse_replication/mysql_to_clickhouse_replication_env/docker-compose.yml b/tests/integration/env/docker-compose.yml similarity index 58% rename from tests/Testflows/mysql_to_clickhouse_replication/mysql_to_clickhouse_replication_env/docker-compose.yml rename to tests/integration/env/docker-compose.yml index 31c36c623..124c1c1c3 100644 --- a/tests/Testflows/mysql_to_clickhouse_replication/mysql_to_clickhouse_replication_env/docker-compose.yml +++ b/tests/integration/env/docker-compose.yml @@ -43,7 +43,11 @@ services: debezium: container_name: debezium hostname: debezium - image: debezium/connect:1.9.5.Final +# image: debezium/connect:1.9.5.Final + build: + context: ../../../docker/debezium_jmx + args: + DEBEZIUM_VERSION: 2.1.0.Alpha1 restart: "no" expose: - "8083" @@ -79,7 +83,7 @@ services: sink: container_name: sink hostname: sink - image: altinity/clickhouse-sink-connector:2022-10-03 + image: "altinity/clickhouse-sink-connector:${SINK_CONNECTOR_VERSION}" restart: "no" expose: - "8083" @@ -99,14 +103,20 @@ services: depends_on: - kafka + zookeeper: + extends: + file: zookeeper-service.yml + service: zookeeper + + clickhouse: extends: file: clickhouse-service.yml service: clickhouse hostname: clickhouse # environment: -# - CLICKHOUSE_USER=root -# - CLICKHOUSE_PASSWORD=root +# - CLICKHOUSE_USER=1000 +# - CLICKHOUSE_PASSWORD=1000 # - CLICKHOUSE_DB=test ulimits: nofile: @@ -117,6 +127,10 @@ services: - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse/logs/:/var/log/clickhouse-server/" - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse/etc/:/etc/clickhouse-server/" - "${CLICKHOUSE_TESTS_DIR}/_instances/share_folder:/tmp/share_folder" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse0/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml" + depends_on: + zookeeper: + condition: service_healthy bash-tools: @@ -132,4 +146,58 @@ services: - ../tests/manual_scripts:/manual_scripts - "${CLICKHOUSE_TESTS_DIR}/_instances/share_folder:/tmp/share_folder" + clickhouse1: + extends: + file: clickhouse-service.yml + service: clickhouse + hostname: clickhouse1 + volumes: + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/" + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml" + depends_on: + zookeeper: + condition: service_healthy + + clickhouse2: + extends: + file: clickhouse-service.yml + service: clickhouse + hostname: clickhouse2 + volumes: + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/" + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml" + depends_on: + zookeeper: + condition: service_healthy + + clickhouse3: + extends: + file: clickhouse-service.yml + service: clickhouse + hostname: clickhouse3 + volumes: + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/" + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml" + depends_on: + zookeeper: + condition: service_healthy + + # dummy service which does nothing, but allows to postpone + # 'docker-compose up -d' till all dependecies will go healthy + all_services_ready: + image: hello-world + depends_on: + clickhouse: + condition: service_healthy + clickhouse1: + condition: service_healthy + clickhouse2: + condition: service_healthy + clickhouse3: + condition: service_healthy + zookeeper: + condition: service_healthy diff --git a/tests/Testflows/mysql_to_clickhouse_replication/mysql_to_clickhouse_replication_env/mysqld.cnf b/tests/integration/env/mysqld.cnf similarity index 100% rename from tests/Testflows/mysql_to_clickhouse_replication/mysql_to_clickhouse_replication_env/mysqld.cnf rename to tests/integration/env/mysqld.cnf diff --git a/tests/integration/env/zookeeper-service.yml b/tests/integration/env/zookeeper-service.yml new file mode 100755 index 000000000..f27405b97 --- /dev/null +++ b/tests/integration/env/zookeeper-service.yml @@ -0,0 +1,18 @@ +version: '2.3' + +services: + zookeeper: + image: zookeeper:3.6.2 + expose: + - "2181" + environment: + ZOO_TICK_TIME: 500 + ZOO_MY_ID: 1 + healthcheck: + test: echo stat | nc localhost 2181 + interval: 3s + timeout: 2s + retries: 5 + start_period: 2s + security_opt: + - label:disable diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/__init__.py b/tests/integration/helpers/__init__.py similarity index 100% rename from tests/Testflows/mysql_to_clickhouse_replication/tests/__init__.py rename to tests/integration/helpers/__init__.py diff --git a/tests/Testflows/helpers/argparser.py b/tests/integration/helpers/argparser.py similarity index 100% rename from tests/Testflows/helpers/argparser.py rename to tests/integration/helpers/argparser.py diff --git a/tests/Testflows/helpers/cluster.py b/tests/integration/helpers/cluster.py similarity index 85% rename from tests/Testflows/helpers/cluster.py rename to tests/integration/helpers/cluster.py index 2fac39ede..08b1a65bc 100755 --- a/tests/Testflows/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -1,17 +1,18 @@ import os -import uuid import time +import uuid import inspect -import threading import tempfile +import threading import testflows.settings as settings -from testflows.core import * from testflows.asserts import error from testflows.connect import Shell as ShellBase +from testflows.core import * from testflows.uexpect import ExpectTimeoutError -from helpers.common import check_clickhouse_version, current_cpu + +from integration.helpers.common import check_clickhouse_version, current_cpu MESSAGES_TO_RETRY = [ "DB::Exception: ZooKeeper session has been expired", @@ -104,6 +105,18 @@ def start(self, timeout=300, retry_count=5): steps=False, ) + def kill(self, timeout=300, retry_count=5, safe=True): + """Kill node.""" + self.close_bashes() + + retry(self.cluster.command, retry_count)( + None, + f"{self.cluster.docker_compose} kill {self.name}", + timeout=timeout, + exitcode=0, + steps=False, + ) + def stop(self, timeout=300, retry_count=5, safe=True): """Stop node.""" self.close_bashes() @@ -120,17 +133,17 @@ def command(self, *args, **kwargs): return self.cluster.command(self.name, *args, **kwargs) def cmd( - self, - cmd, - message=None, - exitcode=None, - steps=True, - shell_command="bash --noediting", - no_checks=False, - raise_on_exception=False, - step=By, - *args, - **kwargs, + self, + cmd, + message=None, + exitcode=None, + steps=True, + shell_command="bash --noediting", + no_checks=False, + raise_on_exception=False, + step=By, + *args, + **kwargs, ): """Execute and check command. :param cmd: command @@ -140,7 +153,7 @@ def cmd( command = f"{cmd}" with step( - "executing command", description=command, format_description=False + "executing command", description=command, format_description=False ) if steps else NullStep(): try: r = self.cluster.bash(self.name, command=shell_command)( @@ -159,7 +172,7 @@ def cmd( if message is not None: with Then( - f"output should contain message", description=message + f"output should contain message", description=message ) if steps else NullStep(): assert message in r.output, error(r.output) @@ -170,21 +183,21 @@ class Cluster(object): """Simple object around docker-compose cluster.""" def __init__( - self, - local=False, - clickhouse_binary_path=None, - clickhouse_odbc_bridge_binary_path=None, - configs_dir=None, - nodes=None, - docker_compose="docker-compose", - docker_compose_project_dir=None, - docker_compose_file="docker-compose.yml", - environ=None, - thread_fuzzer=False, - collect_service_logs=False, - frame=None, - caller_dir=None, - stress=None + self, + local=False, + clickhouse_binary_path=None, + clickhouse_odbc_bridge_binary_path=None, + configs_dir=None, + nodes=None, + docker_compose="docker-compose", + docker_compose_project_dir=None, + docker_compose_file="docker-compose.yml", + environ=None, + thread_fuzzer=False, + collect_service_logs=False, + frame=None, + caller_dir=None, + stress=None, ): self._bash = {} @@ -249,8 +262,8 @@ def __init__( break if parsed_version: if not ( - parsed_version.startswith(".") - or parsed_version.endswith(".") + parsed_version.startswith(".") + or parsed_version.endswith(".") ): current().context.clickhouse_version = parsed_version @@ -265,12 +278,12 @@ def __init__( self.lock = threading.Lock() def get_clickhouse_binary_from_docker_container( - self, - docker_image, - container_clickhouse_binary_path="/usr/bin/clickhouse", - container_clickhouse_odbc_bridge_binary_path="/usr/bin/clickhouse-odbc-bridge", - host_clickhouse_binary_path=None, - host_clickhouse_odbc_bridge_binary_path=None, + self, + docker_image, + container_clickhouse_binary_path="/usr/bin/clickhouse", + container_clickhouse_odbc_bridge_binary_path="/usr/bin/clickhouse-odbc-bridge", + host_clickhouse_binary_path=None, + host_clickhouse_odbc_bridge_binary_path=None, ): """Get clickhouse-server and clickhouse-odbc-bridge binaries from some Docker container. @@ -286,12 +299,12 @@ def get_clickhouse_binary_from_docker_container( if host_clickhouse_odbc_bridge_binary_path is None: host_clickhouse_odbc_bridge_binary_path = ( - host_clickhouse_binary_path + "_odbc_bridge" + host_clickhouse_binary_path + "_odbc_bridge" ) with Given( - "I get ClickHouse server binary from docker container", - description=f"{docker_image}", + "I get ClickHouse server binary from docker container", + description=f"{docker_image}", ): with Shell() as bash: bash.timeout = 300 @@ -493,9 +506,12 @@ def __exit__(self, type, value, traceback): for service_list in self.nodes: for service_node in self.nodes[service_list]: with By(f"getting log for {service_node}"): - log_path = f"../_instances" - snode = bash(f"docker-compose logs {service_node} " - f"> {log_path}/{service_node}.log", timeout=1000) + log_path = f"../logs/" + snode = bash( + f"docker-compose logs {service_node} " + f"> {log_path}/{service_node}.log", + timeout=1000, + ) if snode.exitcode != 0: break self.down() @@ -641,16 +657,16 @@ def up(self, timeout=30 * 60): continue if ( - cmd.exitcode == 0 - and "is unhealthy" not in cmd.output - and "Exit" not in ps_cmd.output + cmd.exitcode == 0 + and "is unhealthy" not in cmd.output + and "Exit" not in ps_cmd.output ): break if ( - cmd.exitcode != 0 - or "is unhealthy" in cmd.output - or "Exit" in ps_cmd.output + cmd.exitcode != 0 + or "is unhealthy" in cmd.output + or "Exit" in ps_cmd.output ): fail("could not bring up docker-compose cluster") @@ -663,17 +679,17 @@ def up(self, timeout=30 * 60): self.running = True def command( - self, - node, - command, - message=None, - exitcode=None, - steps=True, - bash=None, - no_checks=False, - use_error=True, - *args, - **kwargs, + self, + node, + command, + message=None, + exitcode=None, + steps=True, + bash=None, + no_checks=False, + use_error=True, + *args, + **kwargs, ): """Execute and check command. :param node: name of the service @@ -683,7 +699,7 @@ def command( :param steps: don't break command into steps, default: True """ with By( - "executing command", description=command, format_description=False + "executing command", description=command, format_description=False ) if steps else NullStep(): if bash is None: bash = self.bash(node) @@ -698,15 +714,15 @@ def command( if exitcode is not None: with Then( - f"exitcode should be {exitcode}", format_name=False + f"exitcode should be {exitcode}", format_name=False ) if steps else NullStep(): assert r.exitcode == exitcode, error(r.output) if message is not None: with Then( - f"output should contain message", - description=message, - format_description=False, + f"output should contain message", + description=message, + format_description=False, ) if steps else NullStep(): assert message in r.output, error(r.output) @@ -717,23 +733,23 @@ class DatabaseNode(Node): """Common tools for Database nodes.""" def query( - self, - sql, - client_command, - message=None, - exitcode=None, - steps=True, - no_checks=False, - raise_on_exception=False, - step=By, - settings=None, - retry_count=5, - messages_to_retry=None, - retry_delay=5, - secure=False, - max_query_output_in_bytes="-0", - *args, - **kwargs, + self, + sql, + client_command, + message=None, + exitcode=None, + steps=True, + no_checks=False, + raise_on_exception=False, + step=By, + settings=None, + retry_count=5, + messages_to_retry=None, + retry_delay=5, + secure=False, + max_query_output_in_bytes="-0", + *args, + **kwargs, ): """Execute and check query. :param sql: sql query @@ -784,9 +800,9 @@ def query( {command} """ with step( - "executing command", - description=description, - format_description=False, + "executing command", + description=description, + format_description=False, ) if steps else NullStep(): try: r = self.cluster.bash(None)(command, *args, **kwargs) @@ -805,7 +821,7 @@ def query( command = f'echo -e "{sql}" | {client_command}{client_options} 2>&1' with step( - "executing command", description=command, format_description=False + "executing command", description=command, format_description=False ) if steps else NullStep(): try: r = self.cluster.bash(self.name)(command, *args, **kwargs) @@ -840,7 +856,7 @@ def query( if message is not None: with Then( - f"output should contain message", description=message + f"output should contain message", description=message ) if steps else NullStep(): assert message in r.output, error(r.output) @@ -858,22 +874,23 @@ class MySQLNode(DatabaseNode): """MySQL server node.""" def query( - self, - sql, - message=None, - exitcode=None, - steps=True, - no_checks=False, - raise_on_exception=False, - step=By, - settings=None, - retry_count=5, - messages_to_retry=None, - retry_delay=5, - max_query_output_in_bytes="-0", - client_command=None, - *args, - **kwargs, ): + self, + sql, + message=None, + exitcode=None, + steps=True, + no_checks=False, + raise_on_exception=False, + step=By, + settings=None, + retry_count=5, + messages_to_retry=None, + retry_delay=5, + max_query_output_in_bytes="-0", + client_command=None, + *args, + **kwargs, + ): """Execute and check query. :param sql: sql query :param message: expected message that should be in the output, default: None @@ -890,14 +907,24 @@ def query( :param client_command: database client command, default: None (use default) """ if client_command is None: - client_command = "mysql -h mysql-master -u root --password=root --database=test" + client_command = ( + "mysql -h mysql-master -u root --password=root --database=test" + ) - return super(MySQLNode, self).query(sql=sql, client_command=client_command, message=message, exitcode=exitcode, - steps=steps, - no_checks=no_checks, step=step, settings=settings, retry_count=retry_count, - messages_to_retry=messages_to_retry, - retry_delay=retry_delay, - max_query_output_in_bytes=max_query_output_in_bytes) + return super(MySQLNode, self).query( + sql=sql, + client_command=client_command, + message=message, + exitcode=exitcode, + steps=steps, + no_checks=no_checks, + step=step, + settings=settings, + retry_count=retry_count, + messages_to_retry=messages_to_retry, + retry_delay=retry_delay, + max_query_output_in_bytes=max_query_output_in_bytes, + ) class ClickHouseNode(DatabaseNode): @@ -952,10 +979,10 @@ def wait_clickhouse_healthy(self, timeout=300): for attempt in retries(timeout=timeout, delay=1): with attempt: if ( - self.query( - "SELECT version()", no_checks=1, steps=False - ).exitcode - != 0 + self.query( + "SELECT version()", no_checks=1, steps=False + ).exitcode + != 0 ): fail("ClickHouse server is not healthy") node_version = self.query( @@ -995,8 +1022,8 @@ def stop_clickhouse(self, timeout=300, safe=True): if i > 0 and i % 20 == 0: self.command(f"kill -KILL {pid}", steps=False) if ( - self.command(f"ps {pid}", steps=False, no_checks=True).exitcode - != 1 + self.command(f"ps {pid}", steps=False, no_checks=True).exitcode + != 1 ): fail("pid still alive") @@ -1004,12 +1031,12 @@ def stop_clickhouse(self, timeout=300, safe=True): self.command("rm -rf /tmp/clickhouse-server.pid", exitcode=0, steps=False) def start_clickhouse( - self, - timeout=300, - wait_healthy=True, - retry_count=5, - user=None, - thread_fuzzer=False, + self, + timeout=300, + wait_healthy=True, + retry_count=5, + user=None, + thread_fuzzer=False, ): """Start ClickHouse server.""" pid = self.clickhouse_pid() @@ -1045,10 +1072,10 @@ def start_clickhouse( for attempt in retries(timeout=timeout, delay=1): with attempt: if ( - self.command( - "ls /tmp/clickhouse-server.pid", steps=False, no_checks=True - ).exitcode - != 0 + self.command( + "ls /tmp/clickhouse-server.pid", steps=False, no_checks=True + ).exitcode + != 0 ): fail("no pid file yet") @@ -1056,7 +1083,7 @@ def start_clickhouse( self.wait_clickhouse_healthy(timeout=timeout) def restart_clickhouse( - self, timeout=300, safe=True, wait_healthy=True, retry_count=5, user=None + self, timeout=300, safe=True, wait_healthy=True, retry_count=5, user=None ): """Restart ClickHouse server.""" if self.clickhouse_pid(): @@ -1074,12 +1101,12 @@ def stop(self, timeout=300, safe=True, retry_count=5): ) def start( - self, - timeout=300, - start_clickhouse=True, - wait_healthy=True, - retry_count=5, - user=None, + self, + timeout=300, + start_clickhouse=True, + wait_healthy=True, + retry_count=5, + user=None, ): """Start node.""" super(ClickHouseNode, self).start(timeout=timeout, retry_count=retry_count) @@ -1092,13 +1119,13 @@ def start( ) def restart( - self, - timeout=300, - safe=True, - start_clickhouse=True, - wait_healthy=True, - retry_count=5, - user=None, + self, + timeout=300, + safe=True, + start_clickhouse=True, + wait_healthy=True, + retry_count=5, + user=None, ): """Restart node.""" if self.clickhouse_pid(): @@ -1110,15 +1137,15 @@ def restart( self.start_clickhouse(timeout=timeout, wait_healthy=wait_healthy, user=user) def hash_query( - self, - sql, - hash_utility="sha1sum", - steps=True, - step=By, - settings=None, - secure=False, - *args, - **kwargs, + self, + sql, + hash_utility="sha1sum", + steps=True, + step=By, + settings=None, + secure=False, + *args, + **kwargs, ): """Execute sql query inside the container and return the hash of the output. @@ -1148,9 +1175,9 @@ def hash_query( {command} """ with step( - "executing command", - description=description, - format_description=False, + "executing command", + description=description, + format_description=False, ) if steps else NullStep(): try: r = self.cluster.bash(None)(command, *args, **kwargs) @@ -1162,7 +1189,7 @@ def hash_query( name, value = setting command += f' --{name} "{value}"' with step( - "executing command", description=command, format_description=False + "executing command", description=command, format_description=False ) if steps else NullStep(): try: r = self.cluster.bash(self.name)(command, *args, **kwargs) @@ -1175,15 +1202,15 @@ def hash_query( return r.output def diff_query( - self, - sql, - expected_output, - steps=True, - step=By, - settings=None, - secure=False, - *args, - **kwargs, + self, + sql, + expected_output, + steps=True, + step=By, + settings=None, + secure=False, + *args, + **kwargs, ): """Execute inside the container but from the host and compare its output to file that is located on the host. @@ -1217,9 +1244,9 @@ def diff_query( {command} """ with step( - "executing command", - description=description, - format_description=False, + "executing command", + description=description, + format_description=False, ) if steps else NullStep(): try: r = self.cluster.bash(None)(command, *args, **kwargs) @@ -1231,7 +1258,7 @@ def diff_query( name, value = setting command += f' --{name} "{value}"' with step( - "executing command", description=command, format_description=False + "executing command", description=command, format_description=False ) if steps else NullStep(): try: r = self.cluster.bash(None)(command, *args, **kwargs) @@ -1242,23 +1269,24 @@ def diff_query( assert r.exitcode == 0, error(r.output) def query( - self, - sql, - message=None, - exitcode=None, - steps=True, - no_checks=False, - raise_on_exception=False, - step=By, - settings=None, - retry_count=5, - messages_to_retry=None, - retry_delay=5, - max_query_output_in_bytes="-0", - client_command=None, - secure=False, - *args, - **kwargs, ): + self, + sql, + message=None, + exitcode=None, + steps=True, + no_checks=False, + raise_on_exception=False, + step=By, + settings=None, + retry_count=5, + messages_to_retry=None, + retry_delay=5, + max_query_output_in_bytes="-0", + client_command=None, + secure=False, + *args, + **kwargs, + ): """Execute and check query. :param sql: sql query :param message: expected message that should be in the output, default: None @@ -1280,9 +1308,18 @@ def query( if secure: client_command += " -s" - return super(ClickHouseNode, self).query(sql=sql, client_command=client_command, message=message, - exitcode=exitcode, steps=steps, - no_checks=no_checks, step=step, settings=settings, - retry_count=retry_count, messages_to_retry=messages_to_retry, - retry_delay=retry_delay, secure=secure, - max_query_output_in_bytes=max_query_output_in_bytes) + return super(ClickHouseNode, self).query( + sql=sql, + client_command=client_command, + message=message, + exitcode=exitcode, + steps=steps, + no_checks=no_checks, + step=step, + settings=settings, + retry_count=retry_count, + messages_to_retry=messages_to_retry, + retry_delay=retry_delay, + secure=secure, + max_query_output_in_bytes=max_query_output_in_bytes, + ) diff --git a/tests/Testflows/helpers/common.py b/tests/integration/helpers/common.py similarity index 88% rename from tests/Testflows/helpers/common.py rename to tests/integration/helpers/common.py index 69a7833a8..dd480c55c 100644 --- a/tests/Testflows/helpers/common.py +++ b/tests/integration/helpers/common.py @@ -1,15 +1,14 @@ import os -import uuid -import time import platform +import time +import uuid import xml.etree.ElementTree as xmltree -from collections import namedtuple import testflows.settings as settings -from testflows.core import * +from testflows._core.testtype import TestSubType from testflows.asserts import error +from testflows.core import * from testflows.core.name import basename, parentname -from testflows._core.testtype import TestSubType def current_cpu(): @@ -77,11 +76,11 @@ def getuid(with_test_name=False): @TestStep(Given) def instrument_clickhouse_server_log( - self, - node=None, - test=None, - clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log", - always_dump=False, + self, + node=None, + test=None, + clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log", + always_dump=False, ): """Instrument clickhouse-server.log for the current test (default) by adding start and end messages that include test name to log @@ -99,8 +98,8 @@ def instrument_clickhouse_server_log( with By("getting current log size"): cmd = node.command(f"stat --format=%s {clickhouse_server_log}") if ( - cmd.output - == f"stat: cannot stat '{clickhouse_server_log}': No such file or directory" + cmd.output + == f"stat: cannot stat '{clickhouse_server_log}': No such file or directory" ): start_logsize = 0 else: @@ -118,7 +117,7 @@ def instrument_clickhouse_server_log( return with Finally( - "adding test name end message to the clickhouse-server.log", flags=TE + "adding test name end message to the clickhouse-server.log", flags=TE ): node.command( f'echo -e "\\n-- end: {test.name} --\\n" >> {clickhouse_server_log}' @@ -185,7 +184,7 @@ def __init__(self, name, attributes): def create_xml_config_content( - entries, config_file, config_d_dir="/etc/clickhouse-server/config.d" + entries, config_file, config_d_dir="/etc/clickhouse-server/config.d" ): """Create XML configuration file from a dictionary. @@ -235,7 +234,7 @@ def create_xml_tree(entries, root): def add_invalid_config( - config, message, recover_config=None, tail=30, timeout=300, restart=True, user=None + config, message, recover_config=None, tail=30, timeout=300, restart=True, user=None ): """Check that ClickHouse errors when trying to load invalid configuration file.""" cluster = current().context.cluster @@ -253,8 +252,8 @@ def add_invalid_config( node.command(command, steps=False, exitcode=0) with Then( - f"{config.preprocessed_name} should be updated", - description=f"timeout {timeout}", + f"{config.preprocessed_name} should be updated", + description=f"timeout {timeout}", ): started = time.time() command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}" @@ -326,14 +325,14 @@ def add_invalid_config( def add_config( - config, - timeout=300, - restart=False, - modify=False, - node=None, - user=None, - wait_healthy=True, - check_preprocessed=True, + config, + timeout=300, + restart=False, + modify=False, + node=None, + user=None, + wait_healthy=True, + check_preprocessed=True, ): """Add dynamic configuration file to ClickHouse. @@ -441,8 +440,8 @@ def wait_for_config_to_be_loaded(user=None): if check_preprocessed: with Then( - f"{config.preprocessed_name} should be updated", - description=f"timeout {timeout}", + f"{config.preprocessed_name} should be updated", + description=f"timeout {timeout}", ): check_preprocessed_config_is_updated() @@ -466,8 +465,8 @@ def wait_for_config_to_be_loaded(user=None): node.command(f"rm -rf {config.path}", exitcode=0) with Then( - f"{config.preprocessed_name} should be updated", - description=f"timeout {timeout}", + f"{config.preprocessed_name} should be updated", + description=f"timeout {timeout}", ): check_preprocessed_config_is_updated(after_removal=True) @@ -477,14 +476,14 @@ def wait_for_config_to_be_loaded(user=None): @TestStep(Given) def copy( - self, - dest_node, - src_path, - dest_path, - bash=None, - binary=False, - eof="EOF", - src_node=None, + self, + dest_node, + src_path, + dest_path, + bash=None, + binary=False, + eof="EOF", + src_node=None, ): """Copy file from source to destination node.""" if binary: @@ -506,7 +505,7 @@ def copy( @TestStep(Given) def add_user_to_group_on_node( - self, node=None, group="clickhouse", username="clickhouse" + self, node=None, group="clickhouse", username="clickhouse" ): """Add user {username} into group {group}.""" if node is None: @@ -596,37 +595,37 @@ def set_envs_on_node(self, envs, node=None): @TestStep(Given) def create_cluster( - self, - local=False, - clickhouse_binary_path=None, - clickhouse_odbc_bridge_binary_path=None, - configs_dir=None, - nodes=None, - docker_compose="docker-compose", - docker_compose_project_dir=None, - docker_compose_file="docker-compose.yml", - environ=None, - thread_fuzzer=False, - collect_service_logs=None, - stress=None, - caller_dir=None, - frame=None, + self, + local=False, + clickhouse_binary_path=None, + clickhouse_odbc_bridge_binary_path=None, + configs_dir=None, + nodes=None, + docker_compose="docker-compose", + docker_compose_project_dir=None, + docker_compose_file="docker-compose.yml", + environ=None, + thread_fuzzer=False, + collect_service_logs=None, + stress=None, + caller_dir=None, + frame=None, ): """Create docker-compose test environment cluster.""" with Cluster( - local=local, - clickhouse_binary_path=clickhouse_binary_path, - clickhouse_odbc_bridge_binary_path=clickhouse_odbc_bridge_binary_path, - configs_dir=configs_dir, - nodes=nodes, - docker_compose=docker_compose, - docker_compose_project_dir=docker_compose_project_dir, - docker_compose_file=docker_compose_file, - environ=environ, - thread_fuzzer=thread_fuzzer, - collect_service_logs=collect_service_logs, - stress=stress, - frame=frame, - caller_dir=caller_dir, + local=local, + clickhouse_binary_path=clickhouse_binary_path, + clickhouse_odbc_bridge_binary_path=clickhouse_odbc_bridge_binary_path, + configs_dir=configs_dir, + nodes=nodes, + docker_compose=docker_compose, + docker_compose_project_dir=docker_compose_project_dir, + docker_compose_file=docker_compose_file, + environ=environ, + thread_fuzzer=thread_fuzzer, + collect_service_logs=collect_service_logs, + stress=stress, + frame=frame, + caller_dir=caller_dir, ) as cluster: yield cluster diff --git a/tests/Testflows/image/Dockerfile b/tests/integration/image/Dockerfile similarity index 100% rename from tests/Testflows/image/Dockerfile rename to tests/integration/image/Dockerfile diff --git a/tests/Testflows/image/build.sh b/tests/integration/image/build.sh similarity index 100% rename from tests/Testflows/image/build.sh rename to tests/integration/image/build.sh diff --git a/tests/Testflows/image/dockerd-start.sh b/tests/integration/image/dockerd-start.sh similarity index 100% rename from tests/Testflows/image/dockerd-start.sh rename to tests/integration/image/dockerd-start.sh diff --git a/tests/integration/logs/README.md b/tests/integration/logs/README.md new file mode 100644 index 000000000..089882dc1 --- /dev/null +++ b/tests/integration/logs/README.md @@ -0,0 +1 @@ +Just folder for CI/CD logs :) \ No newline at end of file diff --git a/tests/integration/regression.py b/tests/integration/regression.py new file mode 100755 index 000000000..0b72b3690 --- /dev/null +++ b/tests/integration/regression.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 + +import os +import sys + + +from testflows.core import * + + +append_path(sys.path, "..") + +from integration.helpers.argparser import argparser +from integration.helpers.common import check_clickhouse_version +from integration.helpers.common import create_cluster +from integration.requirements.requirements import * +from integration.tests.steps.steps_global import * + +xfails = { + "schema changes/table recreation with different datatypes": [ + (Fail, "debezium data conflict crash") + ], + "schema changes/consistency": [(Fail, "doesn't finished")], + "primary keys/no primary key": [ + (Fail, "https://github.com/Altinity/clickhouse-sink-connector/issues/39") + ], + "delete/no primary key innodb": [(Fail, "doesn't work in raw")], + "delete/no primary key": [(Fail, "doesn't work in raw")], + "update/no primary key innodb": [(Fail, "makes delete")], + "update/no primary key": [(Fail, "makes delete")], + "truncate/no primary key innodb": [(Fail, "doesn't work")], + "truncate/no primary key": [(Fail, "doesn't work")], + "consistency": [(Fail, "doesn't finished")], + "partition limits": [(Fail, "doesn't ready")], + "types/json": [(Fail, "doesn't work in raw")], + "types/double": [ + (Fail, "https://github.com/Altinity/clickhouse-sink-connector/issues/170") + ], + "types/bigint": [ + (Fail, "https://github.com/Altinity/clickhouse-sink-connector/issues/15") + ], +} +xflags = {} + + +@TestModule +@ArgumentParser(argparser) +@XFails(xfails) +@XFlags(xflags) +@Name("mysql to clickhouse replication") +@Requirements( + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication("1.0"), + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Consistency_Select("1.0"), + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLVersions("1.0"), + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplacingMergeTree( + "1.0" + ), + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplicatedReplacingMergeTree( + "1.0" + ) +) +@Specifications(SRS030_MySQL_to_ClickHouse_Replication) +def regression( + self, + local, + clickhouse_binary_path, + clickhouse_version, + stress=None, + thread_fuzzer=None, + collect_service_logs=None, +): + """ClickHouse regression for MySql to ClickHouse replication.""" + nodes = { + "debezium": ("debezium",), + "mysql-master": ("mysql-master",), + "clickhouse": ("clickhouse", "clickhouse1", "clickhouse2", "clickhouse3"), + "bash-tools": ("bash-tools",), + "schemaregistry": ("schemaregistry",), + "sink": ("sink",), + "zookeeper": ("zookeeper",), + } + + self.context.clickhouse_version = clickhouse_version + + if stress is not None: + self.context.stress = stress + + if collect_service_logs is not None: + self.context.collect_service_logs = collect_service_logs + + env = "env" + + with Given("docker-compose cluster"): + cluster = create_cluster( + local=local, + clickhouse_binary_path=clickhouse_binary_path, + thread_fuzzer=thread_fuzzer, + collect_service_logs=collect_service_logs, + stress=stress, + nodes=nodes, + docker_compose_project_dir=os.path.join(current_dir(), env), + caller_dir=os.path.join(current_dir()), + ) + + self.context.cluster = cluster + + if check_clickhouse_version("<21.4")(self): + skip(reason="only supported on ClickHouse version >= 21.4") + + self.context.node = cluster.node("clickhouse1") + + with And("I create test database in ClickHouse"): + create_database(name="test") + + modules = [ + "sanity", + "autocreate", + "insert", + "update", + "delete", + "truncate", + "deduplication", + "types", + "primary_keys", + "schema_changes", + "multiple_tables", + "virtual_columns", + "partition_limits", + "columns_inconsistency" + + ] + for module in modules: + Feature(run=load(f"tests.{module}", "module")) + + Feature(run=load("tests.consistency", "module")) + Feature(run=load("tests.sysbench", "module")) + Feature(run=load("tests.manual_section", "module")) + + +if __name__ == "__main__": + regression() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/requirements/requirements.md b/tests/integration/requirements/requirements.md similarity index 70% rename from tests/Testflows/mysql_to_clickhouse_replication/requirements/requirements.md rename to tests/integration/requirements/requirements.md index 10c723a02..5357a539a 100644 --- a/tests/Testflows/mysql_to_clickhouse_replication/requirements/requirements.md +++ b/tests/integration/requirements/requirements.md @@ -13,20 +13,23 @@ * 4.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication](#rqsrs-030clickhousemysqltoclickhousereplication) * 4.2 [Consistency](#consistency) * 4.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency](#rqsrs-030clickhousemysqltoclickhousereplicationconsistency) - * 4.2.2 [Deduplication](#deduplication) - * 4.2.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Deduplication](#rqsrs-030clickhousemysqltoclickhousereplicationconsistencydeduplication) - * 4.2.3 [Selects](#selects) - * 4.2.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Select](#rqsrs-030clickhousemysqltoclickhousereplicationconsistencyselect) - * 4.2.4 [Only Once Guarantee](#only-once-guarantee) - * 4.2.4.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.OnlyOnceGuarantee](#rqsrs-030clickhousemysqltoclickhousereplicationonlyonceguarantee) + * 4.2.2 [Multiple MySQL Masters](#multiple-mysql-masters) + * 4.2.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.MultipleMySQLMasters](#rqsrs-030clickhousemysqltoclickhousereplicationconsistencymultiplemysqlmasters) + * 4.2.3 [Deduplication](#deduplication) + * 4.2.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Deduplication](#rqsrs-030clickhousemysqltoclickhousereplicationconsistencydeduplication) + * 4.2.4 [Selects](#selects) + * 4.2.4.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Select](#rqsrs-030clickhousemysqltoclickhousereplicationconsistencyselect) + * 4.2.5 [Only Once Guarantee](#only-once-guarantee) + * 4.2.5.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.OnlyOnceGuarantee](#rqsrs-030clickhousemysqltoclickhousereplicationonlyonceguarantee) * 4.3 [Transactions](#transactions) * 4.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Transactions](#rqsrs-030clickhousemysqltoclickhousereplicationtransactions) * 4.4 [Supported Versions](#supported-versions) * 4.4.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLVersions](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlversions) * 4.5 [Supported Storage Engines](#supported-storage-engines) - * 4.5.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree ](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginesreplacingmergetree-) + * 4.5.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginesreplacingmergetree) * 4.5.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree.VirtualColumnNames](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginesreplacingmergetreevirtualcolumnnames) - * 4.5.2 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.CollapsingMergeTree ](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginescollapsingmergetree-) + * 4.5.2 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginesreplicatedreplacingmergetree) + * 4.5.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree.DifferentVersionColumnNames](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginesreplicatedreplacingmergetreedifferentversioncolumnnames) * 4.6 [Data Types](#data-types) * 4.6.1 [Integer Types](#integer-types) * 4.6.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.IntegerTypes](#rqsrs-030clickhousemysqltoclickhousereplicationdatatypesintegertypes) @@ -51,50 +54,63 @@ * 4.6.10.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.JSON](#rqsrs-030clickhousemysqltoclickhousereplicationdatatypesjson) * 4.6.11 [Year](#year) * 4.6.11.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.Year](#rqsrs-030clickhousemysqltoclickhousereplicationdatatypesyear) + * 4.6.12 [Bytes](#bytes) + * 4.6.12.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.Bytes](#rqsrs-030clickhousemysqltoclickhousereplicationdatatypesbytes) * 4.7 [Queries](#queries) - * 4.7.1 [Inserts](#inserts) - * 4.7.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Inserts](#rqsrs-030clickhousemysqltoclickhousereplicationinserts) - * 4.7.2 [Updates](#updates) - * 4.7.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Updates](#rqsrs-030clickhousemysqltoclickhousereplicationupdates) - * 4.7.3 [Deletes](#deletes) - * 4.7.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Deletes](#rqsrs-030clickhousemysqltoclickhousereplicationdeletes) - * 4.7.4 [Auto Create Table](#auto-create-table) - * 4.7.4.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoCreateTable](#rqsrs-030clickhousemysqltoclickhousereplicationautocreatetable) - * 4.7.5 [Auto Drop Table](#auto-drop-table) - * 4.7.5.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoDropTable](#rqsrs-030clickhousemysqltoclickhousereplicationautodroptable) - * 4.7.6 [Modify Column](#modify-column) - * 4.7.6.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ModifyColumn](#rqsrs-030clickhousemysqltoclickhousereplicationmodifycolumn) - * 4.7.7 [Add Column](#add-column) - * 4.7.7.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AddColumn](#rqsrs-030clickhousemysqltoclickhousereplicationaddcolumn) - * 4.7.8 [Remove Column](#remove-column) - * 4.7.8.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.RemoveColumn](#rqsrs-030clickhousemysqltoclickhousereplicationremovecolumn) - * 4.8 [Primary Key](#primary-key) - * 4.8.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.NoPrimaryKey](#rqsrs-030clickhousemysqltoclickhousereplicationnoprimarykey) - * 4.8.2 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Simple](#rqsrs-030clickhousemysqltoclickhousereplicationprimarykeysimple) - * 4.8.3 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Composite](#rqsrs-030clickhousemysqltoclickhousereplicationprimarykeycomposite) - * 4.9 [Multiple Upstream Servers](#multiple-upstream-servers) - * 4.9.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleUpstreamServers](#rqsrs-030clickhousemysqltoclickhousereplicationmultipleupstreamservers) - * 4.10 [Multiple Downstream Servers](#multiple-downstream-servers) - * 4.10.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleDownstreamServers](#rqsrs-030clickhousemysqltoclickhousereplicationmultipledownstreamservers) - * 4.11 [Archival Mode](#archival-mode) - * 4.11.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ArchivalMode](#rqsrs-030clickhousemysqltoclickhousereplicationarchivalmode) - * 4.12 [Bootstrapping Mode](#bootstrapping-mode) - * 4.12.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BootstrappingMode](#rqsrs-030clickhousemysqltoclickhousereplicationbootstrappingmode) - * 4.13 [Binlog Position](#binlog-position) - * 4.13.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BinlogPosition](#rqsrs-030clickhousemysqltoclickhousereplicationbinlogposition) - * 4.14 [Column Mapping And Transformation Rules](#column-mapping-and-transformation-rules) - * 4.14.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnMappingAndTransformationRules](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnmappingandtransformationrules) - * 4.15 [Latency](#latency) - * 4.15.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Latency](#rqsrs-030clickhousemysqltoclickhousereplicationlatency) - * 4.16 [Performance ](#performance-) - * 4.16.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance](#rqsrs-030clickhousemysqltoclickhousereplicationperformance) - * 4.16.2 [Large Daily Data Volumes](#large-daily-data-volumes) - * 4.16.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance.LargeDailyDataVolumes](#rqsrs-030clickhousemysqltoclickhousereplicationperformancelargedailydatavolumes) - * 4.17 [Settings](#settings) - * 4.17.1 [clickhouse.topic2table.map](#clickhousetopic2tablemap) - * 4.17.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Settings.Topic2TableMap](#rqsrs-030clickhousemysqltoclickhousereplicationsettingstopic2tablemap) - * 4.18 [Prometheus ](#prometheus-) - * 4.18.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Prometheus](#rqsrs-030clickhousemysqltoclickhousereplicationprometheus) + * 4.7.1 [Test Feature Diagram](#test-feature-diagram) + * 4.7.2 [Inserts](#inserts) + * 4.7.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts](#rqsrs-030clickhousemysqltoclickhousereplicationqueriesinserts) + * 4.7.2.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts.PartitionLimits](#rqsrs-030clickhousemysqltoclickhousereplicationqueriesinsertspartitionlimits) + * 4.7.3 [Updates](#updates) + * 4.7.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Updates](#rqsrs-030clickhousemysqltoclickhousereplicationqueriesupdates) + * 4.7.4 [Deletes](#deletes) + * 4.7.4.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Deletes](#rqsrs-030clickhousemysqltoclickhousereplicationqueriesdeletes) + * 4.8 [Table Schema Creation](#table-schema-creation) + * 4.8.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation](#rqsrs-030clickhousemysqltoclickhousereplicationtableschemacreation) + * 4.8.2 [Auto Create](#auto-create) + * 4.8.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoCreate](#rqsrs-030clickhousemysqltoclickhousereplicationtableschemacreationautocreate) + * 4.8.2.2 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.MultipleAutoCreate](#rqsrs-030clickhousemysqltoclickhousereplicationtableschemacreationmultipleautocreate) + * 4.8.3 [Auto Drop](#auto-drop) + * 4.8.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoDrop](#rqsrs-030clickhousemysqltoclickhousereplicationtableschemacreationautodrop) + * 4.9 [Columns](#columns) + * 4.9.1 [Modify](#modify) + * 4.9.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Modify](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnsmodify) + * 4.9.2 [Add](#add) + * 4.9.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Add](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnsadd) + * 4.9.3 [Remove](#remove) + * 4.9.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Remove](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnsremove) + * 4.10 [Primary Key](#primary-key) + * 4.10.1 [No Primary Key](#no-primary-key) + * 4.10.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.No](#rqsrs-030clickhousemysqltoclickhousereplicationprimarykeyno) + * 4.10.2 [Simple Primary Key](#simple-primary-key) + * 4.10.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Simple](#rqsrs-030clickhousemysqltoclickhousereplicationprimarykeysimple) + * 4.10.3 [Composite Primary Key](#composite-primary-key) + * 4.10.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Composite](#rqsrs-030clickhousemysqltoclickhousereplicationprimarykeycomposite) + * 4.11 [Multiple Upstream Servers](#multiple-upstream-servers) + * 4.11.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleUpstreamServers](#rqsrs-030clickhousemysqltoclickhousereplicationmultipleupstreamservers) + * 4.12 [Multiple Downstream Servers](#multiple-downstream-servers) + * 4.12.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleDownstreamServers](#rqsrs-030clickhousemysqltoclickhousereplicationmultipledownstreamservers) + * 4.13 [Archival Mode](#archival-mode) + * 4.13.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ArchivalMode](#rqsrs-030clickhousemysqltoclickhousereplicationarchivalmode) + * 4.14 [Bootstrapping Mode](#bootstrapping-mode) + * 4.14.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BootstrappingMode](#rqsrs-030clickhousemysqltoclickhousereplicationbootstrappingmode) + * 4.15 [Binlog Position](#binlog-position) + * 4.15.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BinlogPosition](#rqsrs-030clickhousemysqltoclickhousereplicationbinlogposition) + * 4.16 [Column Mapping And Transformation Rules](#column-mapping-and-transformation-rules) + * 4.16.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnMappingAndTransformationRules](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnmappingandtransformationrules) + * 4.17 [Columns Inconsistency](#columns-inconsistency) + * 4.17.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnsInconsistency](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnsinconsistency) + * 4.18 [Latency](#latency) + * 4.18.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Latency](#rqsrs-030clickhousemysqltoclickhousereplicationlatency) + * 4.19 [Performance ](#performance-) + * 4.19.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance](#rqsrs-030clickhousemysqltoclickhousereplicationperformance) + * 4.19.2 [Large Daily Data Volumes](#large-daily-data-volumes) + * 4.19.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance.LargeDailyDataVolumes](#rqsrs-030clickhousemysqltoclickhousereplicationperformancelargedailydatavolumes) + * 4.20 [Settings](#settings) + * 4.20.1 [clickhouse.topic2table.map](#clickhousetopic2tablemap) + * 4.20.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Settings.Topic2TableMap](#rqsrs-030clickhousemysqltoclickhousereplicationsettingstopic2tablemap) + * 4.21 [Prometheus ](#prometheus-) + * 4.21.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Prometheus](#rqsrs-030clickhousemysqltoclickhousereplicationprometheus) ## Introduction @@ -209,6 +225,13 @@ version: 1.0 [Altinity Sink Connector] SHALL support consistent data replication from [MySQL] to [CLickHouse]. +#### Multiple MySQL Masters + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.MultipleMySQLMasters + +[Altinity Sink Connector] SHALL support consistent data replication from [MySQL] to [CLickHouse] when one or more MySQL +masters are going down. + #### Deduplication ##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Deduplication @@ -268,7 +291,7 @@ version: 1.0 ### Supported Storage Engines -#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree +#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree version: 1.0 [Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to @@ -278,13 +301,20 @@ version: 1.0 version: 1.0 [Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to -"ReplacingMergeTree" [ClickHouse] table engine and virtual column names should be "_version" and "_sign". +"ReplacingMergeTree" [ClickHouse] table engine and virtual column names by default should be "_version" and "_sign". + + +#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree +version: 1.0 + +[Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to +"ReplicatedReplacingMergeTree" [ClickHouse] table engine. -#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.CollapsingMergeTree +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree.DifferentVersionColumnNames version: 1.0 [Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to -"CollapsingMergeTree" [ClickHouse] table engine. +"ReplicatedReplacingMergeTree" [ClickHouse] table engine with different version column names. ### Data Types @@ -478,80 +508,178 @@ Data types connection table: |:------|:----------:| | Year | Int32 | +#### Bytes + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.Bytes +version: 1.0 + +[Altinity Sink Connector] SHALL support data replication to [CLickHouse] of tables that contain columns with 'BIT(m)' +data types where m: 2 - 64 as they supported by [MySQL]. + +Data types connection table: + +| MySQL | ClickHouse | +|:-------|:----------:| +| BIT(m) | String | + + ### Queries +#### Test Feature Diagram + +```mermaid +flowchart TB; + + classDef yellow fill:#ffff33,stroke:#333,stroke-width:4px,color:black; + classDef yellow2 fill:#ffff33,stroke:#333,stroke-width:4px,color:red; + classDef green fill:#00ff33,stroke:#333,stroke-width:4px,color:black; + classDef red fill:red,stroke:#333,stroke-width:4px,color:black; + classDef blue fill:blue,stroke:#333,stroke-width:4px,color:white; + + subgraph O["Queries Test Feature Diagram"] + A-->D-->C-->B + + 1A---2A---3A + 1D---2D + 1C---2C---3C + 1B---2B---3B---4B---5B---6B---7B + + subgraph A["User input MySQL"] + + 1A["INSERT"]:::green + 2A["DELETE"]:::green + 3A["UPDATE"]:::green + + end + + subgraph D["Engines"] + 1D["with table Engine"]:::yellow + 2D["without table Engine"]:::yellow + end + + subgraph C["Different primary keys"] + 1C["simple primary key"]:::blue + 2C["composite primary key"]:::blue + 3C["no primary key"]:::blue + end + + subgraph B["Different cases"] + 1B["one part one partition"]:::green + 2B["multiple parts one partition"]:::green + 3B["multiple partitions"]:::green + 4B["very large data set"]:::green + 5B["lots of small data sets"]:::green + 6B["table with large number of partitions"]:::green + 7B["table with large number of parts in partition"]:::green + end + + + + + end +``` + #### Inserts -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Inserts +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts version: 1.0 [Altinity Sink Connector] SHALL support new data inserts replication from [MySQL] to [CLickHouse]. +###### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts.PartitionLimits +version: 1.0 + +[Altinity Sink Connector] SHALL support correct data inserts replication from [MySQL] to [CLickHouse] when partition +limits are hitting or avoid such situations. + + #### Updates -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Updates +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Updates version: 1.0 [Altinity Sink Connector] SHALL support data updates replication from [MySQL] to [CLickHouse]. #### Deletes -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Deletes +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Deletes version: 1.0 [Altinity Sink Connector] SHALL support data deletes replication from [MySQL] to [CLickHouse]. -#### Auto Create Table +### Table Schema Creation + +#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation +version: 1.0 + +[Altinity Sink Connector]SHALL support the following ways to replicate schema from [MySQL] to [CLickHouse]: +* auto-create option +* `clickhouse_loader` script +* `chump` utility -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoCreateTable +#### Auto Create + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoCreate version: 1.0 [Altinity Sink Connector] SHALL support auto table creation from [MySQL] to [CLickHouse]. -#### Auto Drop Table +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.MultipleAutoCreate +version: 1.0 + +[Altinity Sink Connector] SHALL support auto creation of multiple tables from [MySQL] to [CLickHouse]. + +#### Auto Drop -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoDropTable +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoDrop version: 1.0 [Altinity Sink Connector] SHALL support `DROP TABLE` query from [MySQL] to [CLickHouse]. -#### Modify Column +### Columns -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ModifyColumn +#### Modify + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Modify version: 1.0 [Altinity Sink Connector] SHALL support `MODIFY COLUMN` query from [MySQL] to [CLickHouse]. -#### Add Column +#### Add -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AddColumn +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Add version: 1.0 [Altinity Sink Connector] SHALL support `ADD COLUMN` query from [MySQL] to [CLickHouse]. -#### Remove Column +#### Remove -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.RemoveColumn +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Remove version: 1.0 [Altinity Sink Connector] SHALL support `REMOVE COLUMN` query from [MySQL] to [CLickHouse]. ### Primary Key -#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.NoPrimaryKey +#### No Primary Key + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.No version: 1.0 [Altinity Sink Connector] query SHALL support [MySQL] data replication to [CLickHouse] on queries to tables with no `PRIMARY KEY`. -#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Simple +#### Simple Primary Key + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Simple version: 1.0 [Altinity Sink Connector] query SHALL support [MySQL] data replication to [CLickHouse] on queries with the same order as simple `PRIMARY KEY` does. +#### Composite Primary Key -#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Composite +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Composite version: 1.0 [Altinity Sink Connector] query SHALL support [MySQL] data replication to [CLickHouse] on queries with the same order @@ -604,6 +732,14 @@ version: 1.0 [Altinity Sink Connector] SHALL support [MySQL] replication to [CLickHouse] with support for defining column mapping and transformations rules. +### Columns Inconsistency + +#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnsInconsistency +version: 1.0 + +[Altinity Sink Connector] SHALL support [MySQL] replication to [CLickHouse] replica table when it has fewer columns. +[MySQL] replication to [CLickHouse] is not available in all other cases of columns inconsistency . + ### Latency #### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Latency diff --git a/tests/Testflows/mysql_to_clickhouse_replication/requirements/requirements.py b/tests/integration/requirements/requirements.py similarity index 73% rename from tests/Testflows/mysql_to_clickhouse_replication/requirements/requirements.py rename to tests/integration/requirements/requirements.py index 38db4c7d5..10619a6ca 100644 --- a/tests/Testflows/mysql_to_clickhouse_replication/requirements/requirements.py +++ b/tests/integration/requirements/requirements.py @@ -1,6 +1,6 @@ # These requirements were auto generated # from software requirements specification (SRS) -# document by TestFlows v1.9.220712.1163352. +# document by TestFlows v1.9.230125.1024636. # Do not edit by hand but re-generate instead # using 'tfs requirements generate' command. from testflows.core import Specification @@ -37,7 +37,7 @@ ), link=None, level=4, - num='4.2.2.1' + num='4.2.3.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Consistency_Select = Requirement( @@ -53,7 +53,7 @@ ), link=None, level=4, - num='4.2.3.1' + num='4.2.4.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_OnlyOnceGuarantee = Requirement( @@ -88,7 +88,7 @@ ), link=None, level=4, - num='4.2.4.1' + num='4.2.5.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Transactions = Requirement( @@ -127,8 +127,8 @@ num='4.4.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplacingMergeTree_ = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree ', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplacingMergeTree = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree', version='1.0', priority=None, group=None, @@ -153,7 +153,8 @@ uid=None, description=( '[Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to\n' - '"ReplacingMergeTree" [ClickHouse] table engine and virtual column names should be "_version" and "_sign".\n' + '"ReplacingMergeTree" [ClickHouse] table engine and virtual column names by default should be "_version" and "_sign".\n' + '\n' '\n' ), link=None, @@ -161,8 +162,8 @@ num='4.5.1.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_CollapsingMergeTree_ = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.CollapsingMergeTree ', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplicatedReplacingMergeTree = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree', version='1.0', priority=None, group=None, @@ -170,7 +171,7 @@ uid=None, description=( '[Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to\n' - '"CollapsingMergeTree" [ClickHouse] table engine.\n' + '"ReplicatedReplacingMergeTree" [ClickHouse] table engine.\n' '\n' ), link=None, @@ -178,6 +179,23 @@ num='4.5.2' ) +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplicatedReplacingMergeTree_DifferentVersionColumnNames = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree.DifferentVersionColumnNames', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to\n' + '"ReplicatedReplacingMergeTree" [ClickHouse] table engine with different version column names.\n' + '\n' + ), + link=None, + level=4, + num='4.5.2.1' +) + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_IntegerTypes = Requirement( name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.IntegerTypes', version='1.0', @@ -478,8 +496,32 @@ num='4.6.11.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Inserts = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Inserts', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Bytes = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.Bytes', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + "[Altinity Sink Connector] SHALL support data replication to [CLickHouse] of tables that contain columns with 'BIT(m)'\n" + 'data types where m: 2 - 64 as they supported by [MySQL].\n' + '\n' + 'Data types connection table:\n' + '\n' + '| MySQL | ClickHouse |\n' + '|:-------|:----------:|\n' + '| BIT(m) | String |\n' + '\n' + '\n' + ), + link=None, + level=4, + num='4.6.12.1' +) + +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Inserts = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts', version='1.0', priority=None, group=None, @@ -491,11 +533,29 @@ ), link=None, level=4, - num='4.7.1.1' + num='4.7.2.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Updates = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Updates', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Inserts_PartitionLimits = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts.PartitionLimits', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[Altinity Sink Connector] SHALL support correct data inserts replication from [MySQL] to [CLickHouse] when partition \n' + 'limits are hitting or avoid such situations.\n' + '\n' + '\n' + ), + link=None, + level=5, + num='4.7.2.1.1' +) + +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Updates = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Updates', version='1.0', priority=None, group=None, @@ -507,11 +567,11 @@ ), link=None, level=4, - num='4.7.2.1' + num='4.7.3.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Deletes = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Deletes', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Deletes = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Deletes', version='1.0', priority=None, group=None, @@ -523,11 +583,30 @@ ), link=None, level=4, - num='4.7.3.1' + num='4.7.4.1' +) + +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_TableSchemaCreation = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[Altinity Sink Connector]SHALL support the following ways to replicate schema from [MySQL] to [CLickHouse]:\n' + '* auto-create option\n' + '* `clickhouse_loader` script\n' + '* `chump` utility\n' + '\n' + ), + link=None, + level=3, + num='4.8.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_AutoCreateTable = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoCreateTable', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_TableSchemaCreation_AutoCreate = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoCreate', version='1.0', priority=None, group=None, @@ -539,11 +618,27 @@ ), link=None, level=4, - num='4.7.4.1' + num='4.8.2.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_AutoDropTable = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoDropTable', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_TableSchemaCreation_MultipleAutoCreate = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.MultipleAutoCreate', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[Altinity Sink Connector] SHALL support auto creation of multiple tables from [MySQL] to [CLickHouse].\n' + '\n' + ), + link=None, + level=4, + num='4.8.2.2' +) + +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_TableSchemaCreation_AutoDrop = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoDrop', version='1.0', priority=None, group=None, @@ -555,11 +650,11 @@ ), link=None, level=4, - num='4.7.5.1' + num='4.8.3.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_ModifyColumn = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ModifyColumn', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Columns_Modify = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Modify', version='1.0', priority=None, group=None, @@ -571,11 +666,11 @@ ), link=None, level=4, - num='4.7.6.1' + num='4.9.1.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_AddColumn = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AddColumn', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Columns_Add = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Add', version='1.0', priority=None, group=None, @@ -587,11 +682,11 @@ ), link=None, level=4, - num='4.7.7.1' + num='4.9.2.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_RemoveColumn = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.RemoveColumn', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Columns_Remove = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Remove', version='1.0', priority=None, group=None, @@ -603,11 +698,11 @@ ), link=None, level=4, - num='4.7.8.1' + num='4.9.3.1' ) -RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_NoPrimaryKey = Requirement( - name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.NoPrimaryKey', +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_PrimaryKey_No = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.No', version='1.0', priority=None, group=None, @@ -619,8 +714,8 @@ '\n' ), link=None, - level=3, - num='4.8.1' + level=4, + num='4.10.1.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_PrimaryKey_Simple = Requirement( @@ -634,11 +729,10 @@ '[Altinity Sink Connector] query SHALL support [MySQL] data replication to [CLickHouse] on queries with the same order\n' 'as simple `PRIMARY KEY` does.\n' '\n' - '\n' ), link=None, - level=3, - num='4.8.2' + level=4, + num='4.10.2.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_PrimaryKey_Composite = Requirement( @@ -654,8 +748,8 @@ '\n' ), link=None, - level=3, - num='4.8.3' + level=4, + num='4.10.3.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MultipleUpstreamServers = Requirement( @@ -671,7 +765,7 @@ ), link=None, level=3, - num='4.9.1' + num='4.11.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MultipleDownstreamServers = Requirement( @@ -687,7 +781,7 @@ ), link=None, level=3, - num='4.10.1' + num='4.12.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_ArchivalMode = Requirement( @@ -704,7 +798,7 @@ ), link=None, level=3, - num='4.11.1' + num='4.13.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_BootstrappingMode = Requirement( @@ -722,7 +816,7 @@ ), link=None, level=3, - num='4.12.1' + num='4.14.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_BinlogPosition = Requirement( @@ -739,7 +833,7 @@ ), link=None, level=3, - num='4.13.1' + num='4.15.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_ColumnMappingAndTransformationRules = Requirement( @@ -756,7 +850,24 @@ ), link=None, level=3, - num='4.14.1' + num='4.16.1' +) + +RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_ColumnsInconsistency = Requirement( + name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnsInconsistency', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[Altinity Sink Connector] SHALL support [MySQL] replication to [CLickHouse] replica table when it has fewer columns.\n' + 'In other cases replication is not available.\n' + '\n' + ), + link=None, + level=3, + num='4.17.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Latency = Requirement( @@ -772,7 +883,7 @@ ), link=None, level=3, - num='4.15.1' + num='4.18.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Performance = Requirement( @@ -788,7 +899,7 @@ ), link=None, level=3, - num='4.16.1' + num='4.19.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Performance_LargeDailyDataVolumes = Requirement( @@ -804,7 +915,7 @@ ), link=None, level=4, - num='4.16.2.1' + num='4.19.2.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Settings_Topic2TableMap = Requirement( @@ -821,7 +932,7 @@ ), link=None, level=4, - num='4.17.1.1' + num='4.20.1.1' ) RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Prometheus = Requirement( @@ -844,7 +955,7 @@ ), link=None, level=3, - num='4.18.1' + num='4.21.1' ) SRS030_MySQL_to_ClickHouse_Replication = Specification( @@ -874,20 +985,23 @@ Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication', level=3, num='4.1.1'), Heading(name='Consistency', level=2, num='4.2'), Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency', level=3, num='4.2.1'), - Heading(name='Deduplication', level=3, num='4.2.2'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Deduplication', level=4, num='4.2.2.1'), - Heading(name='Selects', level=3, num='4.2.3'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Select', level=4, num='4.2.3.1'), - Heading(name='Only Once Guarantee', level=3, num='4.2.4'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.OnlyOnceGuarantee', level=4, num='4.2.4.1'), + Heading(name='Multiple MySQL Masters', level=3, num='4.2.2'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.MultipleMySQLMasters', level=4, num='4.2.2.1'), + Heading(name='Deduplication', level=3, num='4.2.3'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Deduplication', level=4, num='4.2.3.1'), + Heading(name='Selects', level=3, num='4.2.4'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Select', level=4, num='4.2.4.1'), + Heading(name='Only Once Guarantee', level=3, num='4.2.5'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.OnlyOnceGuarantee', level=4, num='4.2.5.1'), Heading(name='Transactions', level=2, num='4.3'), Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Transactions', level=3, num='4.3.1'), Heading(name='Supported Versions', level=2, num='4.4'), Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLVersions', level=3, num='4.4.1'), Heading(name='Supported Storage Engines', level=2, num='4.5'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree ', level=3, num='4.5.1'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree', level=3, num='4.5.1'), Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree.VirtualColumnNames', level=4, num='4.5.1.1'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.CollapsingMergeTree ', level=3, num='4.5.2'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree', level=3, num='4.5.2'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree.DifferentVersionColumnNames', level=4, num='4.5.2.1'), Heading(name='Data Types', level=2, num='4.6'), Heading(name='Integer Types', level=3, num='4.6.1'), Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.IntegerTypes', level=4, num='4.6.1.1'), @@ -912,50 +1026,63 @@ Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.JSON', level=4, num='4.6.10.1'), Heading(name='Year', level=3, num='4.6.11'), Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.Year', level=4, num='4.6.11.1'), + Heading(name='Bytes', level=3, num='4.6.12'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.Bytes', level=4, num='4.6.12.1'), Heading(name='Queries', level=2, num='4.7'), - Heading(name='Inserts', level=3, num='4.7.1'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Inserts', level=4, num='4.7.1.1'), - Heading(name='Updates', level=3, num='4.7.2'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Updates', level=4, num='4.7.2.1'), - Heading(name='Deletes', level=3, num='4.7.3'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Deletes', level=4, num='4.7.3.1'), - Heading(name='Auto Create Table', level=3, num='4.7.4'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoCreateTable', level=4, num='4.7.4.1'), - Heading(name='Auto Drop Table', level=3, num='4.7.5'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoDropTable', level=4, num='4.7.5.1'), - Heading(name='Modify Column', level=3, num='4.7.6'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ModifyColumn', level=4, num='4.7.6.1'), - Heading(name='Add Column', level=3, num='4.7.7'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AddColumn', level=4, num='4.7.7.1'), - Heading(name='Remove Column', level=3, num='4.7.8'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.RemoveColumn', level=4, num='4.7.8.1'), - Heading(name='Primary Key', level=2, num='4.8'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.NoPrimaryKey', level=3, num='4.8.1'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Simple', level=3, num='4.8.2'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Composite', level=3, num='4.8.3'), - Heading(name='Multiple Upstream Servers', level=2, num='4.9'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleUpstreamServers', level=3, num='4.9.1'), - Heading(name='Multiple Downstream Servers', level=2, num='4.10'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleDownstreamServers', level=3, num='4.10.1'), - Heading(name='Archival Mode', level=2, num='4.11'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ArchivalMode', level=3, num='4.11.1'), - Heading(name='Bootstrapping Mode', level=2, num='4.12'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BootstrappingMode', level=3, num='4.12.1'), - Heading(name='Binlog Position', level=2, num='4.13'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BinlogPosition', level=3, num='4.13.1'), - Heading(name='Column Mapping And Transformation Rules', level=2, num='4.14'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnMappingAndTransformationRules', level=3, num='4.14.1'), - Heading(name='Latency', level=2, num='4.15'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Latency', level=3, num='4.15.1'), - Heading(name='Performance ', level=2, num='4.16'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance', level=3, num='4.16.1'), - Heading(name='Large Daily Data Volumes', level=3, num='4.16.2'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance.LargeDailyDataVolumes', level=4, num='4.16.2.1'), - Heading(name='Settings', level=2, num='4.17'), - Heading(name='clickhouse.topic2table.map', level=3, num='4.17.1'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Settings.Topic2TableMap', level=4, num='4.17.1.1'), - Heading(name='Prometheus ', level=2, num='4.18'), - Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Prometheus', level=3, num='4.18.1'), + Heading(name='Test Feature Diagram', level=3, num='4.7.1'), + Heading(name='Inserts', level=3, num='4.7.2'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts', level=4, num='4.7.2.1'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts.PartitionLimits', level=5, num='4.7.2.1.1'), + Heading(name='Updates', level=3, num='4.7.3'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Updates', level=4, num='4.7.3.1'), + Heading(name='Deletes', level=3, num='4.7.4'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Deletes', level=4, num='4.7.4.1'), + Heading(name='Table Schema Creation', level=2, num='4.8'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation', level=3, num='4.8.1'), + Heading(name='Auto Create', level=3, num='4.8.2'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoCreate', level=4, num='4.8.2.1'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.MultipleAutoCreate', level=4, num='4.8.2.2'), + Heading(name='Auto Drop', level=3, num='4.8.3'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoDrop', level=4, num='4.8.3.1'), + Heading(name='Columns', level=2, num='4.9'), + Heading(name='Modify', level=3, num='4.9.1'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Modify', level=4, num='4.9.1.1'), + Heading(name='Add', level=3, num='4.9.2'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Add', level=4, num='4.9.2.1'), + Heading(name='Remove', level=3, num='4.9.3'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Remove', level=4, num='4.9.3.1'), + Heading(name='Primary Key', level=2, num='4.10'), + Heading(name='No Primary Key', level=3, num='4.10.1'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.No', level=4, num='4.10.1.1'), + Heading(name='Simple Primary Key', level=3, num='4.10.2'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Simple', level=4, num='4.10.2.1'), + Heading(name='Composite Primary Key', level=3, num='4.10.3'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Composite', level=4, num='4.10.3.1'), + Heading(name='Multiple Upstream Servers', level=2, num='4.11'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleUpstreamServers', level=3, num='4.11.1'), + Heading(name='Multiple Downstream Servers', level=2, num='4.12'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleDownstreamServers', level=3, num='4.12.1'), + Heading(name='Archival Mode', level=2, num='4.13'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ArchivalMode', level=3, num='4.13.1'), + Heading(name='Bootstrapping Mode', level=2, num='4.14'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BootstrappingMode', level=3, num='4.14.1'), + Heading(name='Binlog Position', level=2, num='4.15'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BinlogPosition', level=3, num='4.15.1'), + Heading(name='Column Mapping And Transformation Rules', level=2, num='4.16'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnMappingAndTransformationRules', level=3, num='4.16.1'), + Heading(name='Columns Inconsistency', level=2, num='4.17'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnsInconsistency', level=3, num='4.17.1'), + Heading(name='Latency', level=2, num='4.18'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Latency', level=3, num='4.18.1'), + Heading(name='Performance ', level=2, num='4.19'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance', level=3, num='4.19.1'), + Heading(name='Large Daily Data Volumes', level=3, num='4.19.2'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance.LargeDailyDataVolumes', level=4, num='4.19.2.1'), + Heading(name='Settings', level=2, num='4.20'), + Heading(name='clickhouse.topic2table.map', level=3, num='4.20.1'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Settings.Topic2TableMap', level=4, num='4.20.1.1'), + Heading(name='Prometheus ', level=2, num='4.21'), + Heading(name='RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Prometheus', level=3, num='4.21.1'), ), requirements=( RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication, @@ -964,9 +1091,10 @@ RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_OnlyOnceGuarantee, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Transactions, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLVersions, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplacingMergeTree_, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplacingMergeTree, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplacingMergeTree_VirtualColumnNames, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_CollapsingMergeTree_, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplicatedReplacingMergeTree, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplicatedReplacingMergeTree_DifferentVersionColumnNames, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_IntegerTypes, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Decimal, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Double, @@ -979,15 +1107,19 @@ RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_EnumToString, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_JSON, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Year, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Inserts, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Updates, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Deletes, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_AutoCreateTable, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_AutoDropTable, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_ModifyColumn, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_AddColumn, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_RemoveColumn, - RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_NoPrimaryKey, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Bytes, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Inserts, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Inserts_PartitionLimits, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Updates, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Deletes, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_TableSchemaCreation, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_TableSchemaCreation_AutoCreate, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_TableSchemaCreation_MultipleAutoCreate, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_TableSchemaCreation_AutoDrop, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Columns_Modify, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Columns_Add, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Columns_Remove, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_PrimaryKey_No, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_PrimaryKey_Simple, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_PrimaryKey_Composite, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MultipleUpstreamServers, @@ -996,6 +1128,7 @@ RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_BootstrappingMode, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_BinlogPosition, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_ColumnMappingAndTransformationRules, + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_ColumnsInconsistency, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Latency, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Performance, RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Performance_LargeDailyDataVolumes, @@ -1018,20 +1151,23 @@ * 4.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication](#rqsrs-030clickhousemysqltoclickhousereplication) * 4.2 [Consistency](#consistency) * 4.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency](#rqsrs-030clickhousemysqltoclickhousereplicationconsistency) - * 4.2.2 [Deduplication](#deduplication) - * 4.2.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Deduplication](#rqsrs-030clickhousemysqltoclickhousereplicationconsistencydeduplication) - * 4.2.3 [Selects](#selects) - * 4.2.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Select](#rqsrs-030clickhousemysqltoclickhousereplicationconsistencyselect) - * 4.2.4 [Only Once Guarantee](#only-once-guarantee) - * 4.2.4.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.OnlyOnceGuarantee](#rqsrs-030clickhousemysqltoclickhousereplicationonlyonceguarantee) + * 4.2.2 [Multiple MySQL Masters](#multiple-mysql-masters) + * 4.2.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.MultipleMySQLMasters](#rqsrs-030clickhousemysqltoclickhousereplicationconsistencymultiplemysqlmasters) + * 4.2.3 [Deduplication](#deduplication) + * 4.2.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Deduplication](#rqsrs-030clickhousemysqltoclickhousereplicationconsistencydeduplication) + * 4.2.4 [Selects](#selects) + * 4.2.4.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Select](#rqsrs-030clickhousemysqltoclickhousereplicationconsistencyselect) + * 4.2.5 [Only Once Guarantee](#only-once-guarantee) + * 4.2.5.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.OnlyOnceGuarantee](#rqsrs-030clickhousemysqltoclickhousereplicationonlyonceguarantee) * 4.3 [Transactions](#transactions) * 4.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Transactions](#rqsrs-030clickhousemysqltoclickhousereplicationtransactions) * 4.4 [Supported Versions](#supported-versions) * 4.4.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLVersions](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlversions) * 4.5 [Supported Storage Engines](#supported-storage-engines) - * 4.5.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree ](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginesreplacingmergetree-) + * 4.5.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginesreplacingmergetree) * 4.5.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree.VirtualColumnNames](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginesreplacingmergetreevirtualcolumnnames) - * 4.5.2 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.CollapsingMergeTree ](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginescollapsingmergetree-) + * 4.5.2 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginesreplicatedreplacingmergetree) + * 4.5.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree.DifferentVersionColumnNames](#rqsrs-030clickhousemysqltoclickhousereplicationmysqlstorageenginesreplicatedreplacingmergetreedifferentversioncolumnnames) * 4.6 [Data Types](#data-types) * 4.6.1 [Integer Types](#integer-types) * 4.6.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.IntegerTypes](#rqsrs-030clickhousemysqltoclickhousereplicationdatatypesintegertypes) @@ -1056,50 +1192,63 @@ * 4.6.10.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.JSON](#rqsrs-030clickhousemysqltoclickhousereplicationdatatypesjson) * 4.6.11 [Year](#year) * 4.6.11.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.Year](#rqsrs-030clickhousemysqltoclickhousereplicationdatatypesyear) + * 4.6.12 [Bytes](#bytes) + * 4.6.12.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.Bytes](#rqsrs-030clickhousemysqltoclickhousereplicationdatatypesbytes) * 4.7 [Queries](#queries) - * 4.7.1 [Inserts](#inserts) - * 4.7.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Inserts](#rqsrs-030clickhousemysqltoclickhousereplicationinserts) - * 4.7.2 [Updates](#updates) - * 4.7.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Updates](#rqsrs-030clickhousemysqltoclickhousereplicationupdates) - * 4.7.3 [Deletes](#deletes) - * 4.7.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Deletes](#rqsrs-030clickhousemysqltoclickhousereplicationdeletes) - * 4.7.4 [Auto Create Table](#auto-create-table) - * 4.7.4.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoCreateTable](#rqsrs-030clickhousemysqltoclickhousereplicationautocreatetable) - * 4.7.5 [Auto Drop Table](#auto-drop-table) - * 4.7.5.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoDropTable](#rqsrs-030clickhousemysqltoclickhousereplicationautodroptable) - * 4.7.6 [Modify Column](#modify-column) - * 4.7.6.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ModifyColumn](#rqsrs-030clickhousemysqltoclickhousereplicationmodifycolumn) - * 4.7.7 [Add Column](#add-column) - * 4.7.7.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AddColumn](#rqsrs-030clickhousemysqltoclickhousereplicationaddcolumn) - * 4.7.8 [Remove Column](#remove-column) - * 4.7.8.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.RemoveColumn](#rqsrs-030clickhousemysqltoclickhousereplicationremovecolumn) - * 4.8 [Primary Key](#primary-key) - * 4.8.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.NoPrimaryKey](#rqsrs-030clickhousemysqltoclickhousereplicationnoprimarykey) - * 4.8.2 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Simple](#rqsrs-030clickhousemysqltoclickhousereplicationprimarykeysimple) - * 4.8.3 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Composite](#rqsrs-030clickhousemysqltoclickhousereplicationprimarykeycomposite) - * 4.9 [Multiple Upstream Servers](#multiple-upstream-servers) - * 4.9.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleUpstreamServers](#rqsrs-030clickhousemysqltoclickhousereplicationmultipleupstreamservers) - * 4.10 [Multiple Downstream Servers](#multiple-downstream-servers) - * 4.10.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleDownstreamServers](#rqsrs-030clickhousemysqltoclickhousereplicationmultipledownstreamservers) - * 4.11 [Archival Mode](#archival-mode) - * 4.11.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ArchivalMode](#rqsrs-030clickhousemysqltoclickhousereplicationarchivalmode) - * 4.12 [Bootstrapping Mode](#bootstrapping-mode) - * 4.12.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BootstrappingMode](#rqsrs-030clickhousemysqltoclickhousereplicationbootstrappingmode) - * 4.13 [Binlog Position](#binlog-position) - * 4.13.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BinlogPosition](#rqsrs-030clickhousemysqltoclickhousereplicationbinlogposition) - * 4.14 [Column Mapping And Transformation Rules](#column-mapping-and-transformation-rules) - * 4.14.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnMappingAndTransformationRules](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnmappingandtransformationrules) - * 4.15 [Latency](#latency) - * 4.15.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Latency](#rqsrs-030clickhousemysqltoclickhousereplicationlatency) - * 4.16 [Performance ](#performance-) - * 4.16.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance](#rqsrs-030clickhousemysqltoclickhousereplicationperformance) - * 4.16.2 [Large Daily Data Volumes](#large-daily-data-volumes) - * 4.16.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance.LargeDailyDataVolumes](#rqsrs-030clickhousemysqltoclickhousereplicationperformancelargedailydatavolumes) - * 4.17 [Settings](#settings) - * 4.17.1 [clickhouse.topic2table.map](#clickhousetopic2tablemap) - * 4.17.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Settings.Topic2TableMap](#rqsrs-030clickhousemysqltoclickhousereplicationsettingstopic2tablemap) - * 4.18 [Prometheus ](#prometheus-) - * 4.18.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Prometheus](#rqsrs-030clickhousemysqltoclickhousereplicationprometheus) + * 4.7.1 [Test Feature Diagram](#test-feature-diagram) + * 4.7.2 [Inserts](#inserts) + * 4.7.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts](#rqsrs-030clickhousemysqltoclickhousereplicationqueriesinserts) + * 4.7.2.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts.PartitionLimits](#rqsrs-030clickhousemysqltoclickhousereplicationqueriesinsertspartitionlimits) + * 4.7.3 [Updates](#updates) + * 4.7.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Updates](#rqsrs-030clickhousemysqltoclickhousereplicationqueriesupdates) + * 4.7.4 [Deletes](#deletes) + * 4.7.4.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Deletes](#rqsrs-030clickhousemysqltoclickhousereplicationqueriesdeletes) + * 4.8 [Table Schema Creation](#table-schema-creation) + * 4.8.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation](#rqsrs-030clickhousemysqltoclickhousereplicationtableschemacreation) + * 4.8.2 [Auto Create](#auto-create) + * 4.8.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoCreate](#rqsrs-030clickhousemysqltoclickhousereplicationtableschemacreationautocreate) + * 4.8.2.2 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.MultipleAutoCreate](#rqsrs-030clickhousemysqltoclickhousereplicationtableschemacreationmultipleautocreate) + * 4.8.3 [Auto Drop](#auto-drop) + * 4.8.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoDrop](#rqsrs-030clickhousemysqltoclickhousereplicationtableschemacreationautodrop) + * 4.9 [Columns](#columns) + * 4.9.1 [Modify](#modify) + * 4.9.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Modify](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnsmodify) + * 4.9.2 [Add](#add) + * 4.9.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Add](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnsadd) + * 4.9.3 [Remove](#remove) + * 4.9.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Remove](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnsremove) + * 4.10 [Primary Key](#primary-key) + * 4.10.1 [No Primary Key](#no-primary-key) + * 4.10.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.No](#rqsrs-030clickhousemysqltoclickhousereplicationprimarykeyno) + * 4.10.2 [Simple Primary Key](#simple-primary-key) + * 4.10.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Simple](#rqsrs-030clickhousemysqltoclickhousereplicationprimarykeysimple) + * 4.10.3 [Composite Primary Key](#composite-primary-key) + * 4.10.3.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Composite](#rqsrs-030clickhousemysqltoclickhousereplicationprimarykeycomposite) + * 4.11 [Multiple Upstream Servers](#multiple-upstream-servers) + * 4.11.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleUpstreamServers](#rqsrs-030clickhousemysqltoclickhousereplicationmultipleupstreamservers) + * 4.12 [Multiple Downstream Servers](#multiple-downstream-servers) + * 4.12.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MultipleDownstreamServers](#rqsrs-030clickhousemysqltoclickhousereplicationmultipledownstreamservers) + * 4.13 [Archival Mode](#archival-mode) + * 4.13.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ArchivalMode](#rqsrs-030clickhousemysqltoclickhousereplicationarchivalmode) + * 4.14 [Bootstrapping Mode](#bootstrapping-mode) + * 4.14.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BootstrappingMode](#rqsrs-030clickhousemysqltoclickhousereplicationbootstrappingmode) + * 4.15 [Binlog Position](#binlog-position) + * 4.15.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.BinlogPosition](#rqsrs-030clickhousemysqltoclickhousereplicationbinlogposition) + * 4.16 [Column Mapping And Transformation Rules](#column-mapping-and-transformation-rules) + * 4.16.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnMappingAndTransformationRules](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnmappingandtransformationrules) + * 4.17 [Columns Inconsistency](#columns-inconsistency) + * 4.17.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnsInconsistency](#rqsrs-030clickhousemysqltoclickhousereplicationcolumnsinconsistency) + * 4.18 [Latency](#latency) + * 4.18.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Latency](#rqsrs-030clickhousemysqltoclickhousereplicationlatency) + * 4.19 [Performance ](#performance-) + * 4.19.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance](#rqsrs-030clickhousemysqltoclickhousereplicationperformance) + * 4.19.2 [Large Daily Data Volumes](#large-daily-data-volumes) + * 4.19.2.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Performance.LargeDailyDataVolumes](#rqsrs-030clickhousemysqltoclickhousereplicationperformancelargedailydatavolumes) + * 4.20 [Settings](#settings) + * 4.20.1 [clickhouse.topic2table.map](#clickhousetopic2tablemap) + * 4.20.1.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Settings.Topic2TableMap](#rqsrs-030clickhousemysqltoclickhousereplicationsettingstopic2tablemap) + * 4.21 [Prometheus ](#prometheus-) + * 4.21.1 [RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Prometheus](#rqsrs-030clickhousemysqltoclickhousereplicationprometheus) ## Introduction @@ -1214,6 +1363,13 @@ [Altinity Sink Connector] SHALL support consistent data replication from [MySQL] to [CLickHouse]. +#### Multiple MySQL Masters + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.MultipleMySQLMasters + +[Altinity Sink Connector] SHALL support consistent data replication from [MySQL] to [CLickHouse] when one or more MySQL +masters are going down. + #### Deduplication ##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Consistency.Deduplication @@ -1273,7 +1429,7 @@ ### Supported Storage Engines -#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree +#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplacingMergeTree version: 1.0 [Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to @@ -1283,13 +1439,20 @@ version: 1.0 [Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to -"ReplacingMergeTree" [ClickHouse] table engine and virtual column names should be "_version" and "_sign". +"ReplacingMergeTree" [ClickHouse] table engine and virtual column names by default should be "_version" and "_sign". + -#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.CollapsingMergeTree +#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree version: 1.0 [Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to -"CollapsingMergeTree" [ClickHouse] table engine. +"ReplicatedReplacingMergeTree" [ClickHouse] table engine. + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.MySQLStorageEngines.ReplicatedReplacingMergeTree.DifferentVersionColumnNames +version: 1.0 + +[Altinity Sink Connector] SHALL support replication of tables that use "InnoDB" [MySQL] storage engine to +"ReplicatedReplacingMergeTree" [ClickHouse] table engine with different version column names. ### Data Types @@ -1483,80 +1646,178 @@ |:------|:----------:| | Year | Int32 | +#### Bytes + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.DataTypes.Bytes +version: 1.0 + +[Altinity Sink Connector] SHALL support data replication to [CLickHouse] of tables that contain columns with 'BIT(m)' +data types where m: 2 - 64 as they supported by [MySQL]. + +Data types connection table: + +| MySQL | ClickHouse | +|:-------|:----------:| +| BIT(m) | String | + + ### Queries +#### Test Feature Diagram + +```mermaid +flowchart TB; + + classDef yellow fill:#ffff33,stroke:#333,stroke-width:4px,color:black; + classDef yellow2 fill:#ffff33,stroke:#333,stroke-width:4px,color:red; + classDef green fill:#00ff33,stroke:#333,stroke-width:4px,color:black; + classDef red fill:red,stroke:#333,stroke-width:4px,color:black; + classDef blue fill:blue,stroke:#333,stroke-width:4px,color:white; + + subgraph O["Queries Test Feature Diagram"] + A-->D-->C-->B + + 1A---2A---3A + 1D---2D + 1C---2C---3C + 1B---2B---3B---4B---5B---6B---7B + + subgraph A["User input MySQL"] + + 1A["INSERT"]:::green + 2A["DELETE"]:::green + 3A["UPDATE"]:::green + + end + + subgraph D["Engines"] + 1D["with table Engine"]:::yellow + 2D["without table Engine"]:::yellow + end + + subgraph C["Different primary keys"] + 1C["simple primary key"]:::blue + 2C["composite primary key"]:::blue + 3C["no primary key"]:::blue + end + + subgraph B["Different cases"] + 1B["one part one partition"]:::green + 2B["multiple parts one partition"]:::green + 3B["multiple partitions"]:::green + 4B["very large data set"]:::green + 5B["lots of small data sets"]:::green + 6B["table with large number of partitions"]:::green + 7B["table with large number of parts in partition"]:::green + end + + + + + end +``` + #### Inserts -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Inserts +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts version: 1.0 [Altinity Sink Connector] SHALL support new data inserts replication from [MySQL] to [CLickHouse]. +###### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Inserts.PartitionLimits +version: 1.0 + +[Altinity Sink Connector] SHALL support correct data inserts replication from [MySQL] to [CLickHouse] when partition +limits are hitting or avoid such situations. + + #### Updates -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Updates +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Updates version: 1.0 [Altinity Sink Connector] SHALL support data updates replication from [MySQL] to [CLickHouse]. #### Deletes -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Deletes +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Queries.Deletes version: 1.0 [Altinity Sink Connector] SHALL support data deletes replication from [MySQL] to [CLickHouse]. -#### Auto Create Table +### Table Schema Creation + +#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation +version: 1.0 + +[Altinity Sink Connector]SHALL support the following ways to replicate schema from [MySQL] to [CLickHouse]: +* auto-create option +* `clickhouse_loader` script +* `chump` utility + +#### Auto Create -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoCreateTable +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoCreate version: 1.0 [Altinity Sink Connector] SHALL support auto table creation from [MySQL] to [CLickHouse]. -#### Auto Drop Table +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.MultipleAutoCreate +version: 1.0 + +[Altinity Sink Connector] SHALL support auto creation of multiple tables from [MySQL] to [CLickHouse]. + +#### Auto Drop -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AutoDropTable +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.TableSchemaCreation.AutoDrop version: 1.0 [Altinity Sink Connector] SHALL support `DROP TABLE` query from [MySQL] to [CLickHouse]. -#### Modify Column +### Columns -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ModifyColumn +#### Modify + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Modify version: 1.0 [Altinity Sink Connector] SHALL support `MODIFY COLUMN` query from [MySQL] to [CLickHouse]. -#### Add Column +#### Add -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.AddColumn +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Add version: 1.0 [Altinity Sink Connector] SHALL support `ADD COLUMN` query from [MySQL] to [CLickHouse]. -#### Remove Column +#### Remove -##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.RemoveColumn +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Columns.Remove version: 1.0 [Altinity Sink Connector] SHALL support `REMOVE COLUMN` query from [MySQL] to [CLickHouse]. ### Primary Key -#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.NoPrimaryKey +#### No Primary Key + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.No version: 1.0 [Altinity Sink Connector] query SHALL support [MySQL] data replication to [CLickHouse] on queries to tables with no `PRIMARY KEY`. -#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Simple +#### Simple Primary Key + +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Simple version: 1.0 [Altinity Sink Connector] query SHALL support [MySQL] data replication to [CLickHouse] on queries with the same order as simple `PRIMARY KEY` does. +#### Composite Primary Key -#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Composite +##### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.PrimaryKey.Composite version: 1.0 [Altinity Sink Connector] query SHALL support [MySQL] data replication to [CLickHouse] on queries with the same order @@ -1609,6 +1870,14 @@ [Altinity Sink Connector] SHALL support [MySQL] replication to [CLickHouse] with support for defining column mapping and transformations rules. +### Columns Inconsistency + +#### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.ColumnsInconsistency +version: 1.0 + +[Altinity Sink Connector] SHALL support [MySQL] replication to [CLickHouse] replica table when it has fewer columns. +In other cases replication is not available. + ### Latency #### RQ.SRS-030.ClickHouse.MySQLToClickHouseReplication.Latency diff --git a/tests/Testflows/mysql_to_clickhouse_replication/sql/init_mysql.sql b/tests/integration/sql/init_mysql.sql similarity index 100% rename from tests/Testflows/mysql_to_clickhouse_replication/sql/init_mysql.sql rename to tests/integration/sql/init_mysql.sql diff --git a/tests/Testflows/mysql_to_clickhouse_replication/_instances/logs b/tests/integration/tests/__init__.py similarity index 100% rename from tests/Testflows/mysql_to_clickhouse_replication/_instances/logs rename to tests/integration/tests/__init__.py diff --git a/tests/integration/tests/autocreate.py b/tests/integration/tests/autocreate.py new file mode 100644 index 000000000..1f4b83898 --- /dev/null +++ b/tests/integration/tests/autocreate.py @@ -0,0 +1,104 @@ +from integration.tests.steps.sql import * +from integration.tests.steps.service_settings_steps import * +from integration.tests.steps.statements import * + + +@TestOutline +def create_all_data_types(self, mysql_columns, clickhouse_columns, clickhouse_table): + """Check auto-creation of replicated MySQL table + which contains all supported data types. + """ + table_name = f"autocreate_{getuid()}" + + mysql = self.context.cluster.node("mysql-master") + + init_sink_connector( + auto_create_tables=clickhouse_table[0], topics=f"SERVER5432.test.{table_name}" + ) + + with Given( + f"I create MySql to CH replicated table with all supported NOT NULL data types", + description=table_name, + ): + create_mysql_to_clickhouse_replicated_table( + name=table_name, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + with When(f"I check MySql table {table_name} was created"): + mysql.query(f"SHOW CREATE TABLE {table_name};", message=f"{table_name}") + + with Then(f"I make insert to create ClickHouse table"): + mysql.query( + f"INSERT INTO {table_name} VALUES (1,2/3,1.23,999.00009,'2012-12-12','2018-09-08 17:51:04.777','17:51:04.777','17:51:04.777',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,'x','some_text','IVAN','some_blob','x_Mediumblob','some_Longblobblobblob','a','IVAN')" + ) + + with Then( + f"I check that corresponding ClickHouse table was created and data was inserted" + ): + complex_check_creation_and_select( + table_name=table_name, + clickhouse_table=clickhouse_table, + statement="count(*)", + with_final=True, + ) + + +@TestFeature +def create_all_data_types_null_table( + self, + mysql_columns=all_mysql_datatypes, + clickhouse_columns=all_ch_datatypes, +): + """Check all availabe methods and tables creation of replicated MySQL to Ch table that + contains all supported "NULL" data types. + """ + + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + create_all_data_types( + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + +@TestFeature +def create_all_data_types_not_null_table_manual( + self, + mysql_columns=all_nullable_mysql_datatypes, + clickhouse_columns=all_nullable_ch_datatypes, +): + """Check all availabe methods and tables creation of replicated MySQL to CH table + which contains all supported "NOT NULL" data types. + """ + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + create_all_data_types( + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + +@TestModule +@Requirements( + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_TableSchemaCreation_AutoCreate( + "1.0" + ) +) +@Name("autocreate") +def module(self): + """Verify correct replication of all supported MySQL data types.""" + + with Given("I enable debezium and sink connectors after kafka starts up"): + init_debezium_connector() + + with Pool(1) as executor: + try: + for feature in loads(current_module(), Feature): + Feature(test=feature, parallel=True, executor=executor)() + finally: + join() diff --git a/tests/integration/tests/columns_inconsistency.py b/tests/integration/tests/columns_inconsistency.py new file mode 100644 index 000000000..0c4ec7a24 --- /dev/null +++ b/tests/integration/tests/columns_inconsistency.py @@ -0,0 +1,139 @@ +from integration.tests.steps.sql import * +from integration.tests.steps.statements import * +from integration.tests.steps.service_settings_steps import * + + +@TestOutline +def mysql_to_clickhouse_insert( + self, input, output, mysql_columns, clickhouse_table, clickhouse_columns=None +): + """Manual creation table section""" + + table_name = f"columns_inconsistency_{getuid()}" + mysql = self.context.cluster.node("mysql-master") + + init_sink_connector( + auto_create_tables=clickhouse_table[0], topics=f"SERVER5432.test.{table_name}" + ) + + with Given(f"I create MySql to CH replicated table", description=table_name): + create_mysql_to_clickhouse_replicated_table( + name=table_name, + mysql_columns=mysql_columns, + clickhouse_table=clickhouse_table, + clickhouse_columns=clickhouse_columns, + ) + + with When("I insert data in MySql table"): + mysql.query(f"INSERT INTO {table_name} (col1,col2,col3) VALUES {input};") + time.sleep(20) + + with Then("I check data inserted correct"): + complex_check_creation_and_select( + table_name=table_name, + manual_output=output, + clickhouse_table=clickhouse_table, + statement="count(*)", + with_final=True, + ) + + +@TestFeature +def more_columns( + self, + input="(2,7,777)", + output="0", + mysql_columns="col1 INT, col2 INT NOT NULL, col3 INT default 777", + clickhouse_columns="col1 Int32, col2 Int32, col3 Int32, col4 Int32", +): + """Check when manual created table has more columns than MySQL table.""" + for clickhouse_table in available_clickhouse_tables: + if clickhouse_table[0] == "manual": + with Example({clickhouse_table}, flags=TE): + mysql_to_clickhouse_insert( + input=input, + output=output, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + +@TestFeature +def less_columns( + self, + input="(2,7,777)", + output="1", + mysql_columns="col1 INT, col2 INT NOT NULL, col3 INT default 777", + clickhouse_columns="col1 Int32, col2 Int32", +): + """Check when manual created table has fewer columns than MySQL table.""" + for clickhouse_table in available_clickhouse_tables: + if clickhouse_table[0] == "manual": + with Example({clickhouse_table}, flags=TE): + mysql_to_clickhouse_insert( + input=input, + output=output, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + +@TestFeature +def equal_columns_different_names( + self, + input="(2,7,777)", + output="0", + mysql_columns="col1 INT, col2 INT NOT NULL, col3 INT default 777", + clickhouse_columns="col11 Int32, col22 Int32, col33 Int32", +): + """Check when manual created table has different named columns than MySQL table.""" + for clickhouse_table in available_clickhouse_tables: + if clickhouse_table[0] == "manual": + with Example({clickhouse_table}, flags=TE): + mysql_to_clickhouse_insert( + input=input, + output=output, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + +@TestFeature +def equal_columns_some_different_names( + self, + input="(2,7,777)", + output="0", + mysql_columns="col1 INT, col2 INT NOT NULL, col3 INT default 777", + clickhouse_columns="col1 Int32, col22 Int32, col33 Int32", +): + """Check when manual created table has some different named columns than MySQL table.""" + for clickhouse_table in available_clickhouse_tables: + if clickhouse_table[0] == "manual": + with Example({clickhouse_table}, flags=TE): + mysql_to_clickhouse_insert( + input=input, + output=output, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + +@TestModule +@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_ColumnsInconsistency("1.0")) +@Name("columns inconsistency") +def module(self): + """Check for different columns inconsistency.""" + + with Given("I enable debezium and sink connectors after kafka starts up"): + init_debezium_connector() + + with Pool(1) as executor: + try: + for feature in loads(current_module(), Feature): + Feature(test=feature, parallel=True, executor=executor)() + finally: + join() diff --git a/tests/integration/tests/consistency.py b/tests/integration/tests/consistency.py new file mode 100644 index 000000000..f3644d4df --- /dev/null +++ b/tests/integration/tests/consistency.py @@ -0,0 +1,302 @@ +import time +from itertools import combinations +from testflows.connect import Shell +from integration.tests.steps.sql import * +from integration.tests.steps.service_settings_steps import * + + +@TestOutline +def stop_start(self, services, loops=10): + """Check for data consistency with concurrently service is stopping and starting after 5 sec.""" + uid = getuid() + + clickhouse = self.context.cluster.node("clickhouse") + mysql = self.context.cluster.node("mysql-master") + + with Given("I create unique table name"): + table_name = f"test{uid}" + + init_sink_connector( + auto_create_tables="auto", topics=f"SERVER5432.test.{table_name}" + ) + + with Given(f"I create MySQL table {table_name}"): + create_mysql_table( + name=table_name, + statement=f"CREATE TABLE {table_name} " + "(id int(11) NOT NULL," + "k int(11) NOT NULL DEFAULT 0,c char(120) NOT NULL DEFAULT ''," + f"pad char(60) NOT NULL DEFAULT '', PRIMARY KEY (id)) ENGINE = InnoDB;", + ) + + with When( + "I insert, update, delete data in MySql table with concurrently unavailable service" + ): + Given( + "I insert, update, delete data in MySql table", + test=concurrent_queries, + parallel=True, + )( + table_name=table_name, + first_insert_number=1, + last_insert_number=3000, + first_insert_id=3001, + last_insert_id=6000, + first_delete_id=1, + last_delete_id=1500, + first_update_id=1501, + last_update_id=3000, + ) + + for i in range(loops): + with Step(f"LOOP STEP {i}"): + for node in services: + with Shell() as bash: + self.context.cluster.node(f"{node}").stop() + time.sleep(5) + for node in services: + with Shell() as bash: + self.context.cluster.node(f"{node}").start() + + with Then("I check that ClickHouse table has same number of rows as MySQL table"): + select(statement="count(*)", table_name=table_name, with_optimize=True) + + +@TestSuite +def combinatoric_stop_start(self): + """Check all possibilities of unavailable services.""" + nodes_list = ["sink", "debezium", "schemaregistry", "kafka", "clickhouse"] + for i in range(1, 6): + service_combinations = list(combinations(nodes_list, i)) + for combination in service_combinations: + Scenario(f"{combination} unavailable", test=stop_start, flags=TE)( + services=combination + ) + + +@TestOutline +def restart(self, services, loops=10): + """Check for data consistency with concurrently service restart 10 times.""" + uid = getuid() + + clickhouse = self.context.cluster.node("clickhouse") + mysql = self.context.cluster.node("mysql-master") + + with Given("I create unique table name"): + table_name = f"test{uid}" + + init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") + + with Given(f"I create MySQL table {table_name}"): + create_mysql_table( + name=table_name, + statement=f"CREATE TABLE {table_name} " + "(id int(11) NOT NULL," + "k int(11) NOT NULL DEFAULT 0,c char(120) NOT NULL DEFAULT ''," + f"pad char(60) NOT NULL DEFAULT '', PRIMARY KEY (id)) ENGINE = InnoDB;", + ) + + with When( + "I insert, update, delete data in MySql table concurrently with services restart" + ): + Given( + "I insert, update, delete data in MySql table", + test=concurrent_queries, + parallel=True, + )( + table_name=table_name, + first_insert_number=1, + last_insert_number=3000, + first_insert_id=3001, + last_insert_id=6000, + first_delete_id=1, + last_delete_id=1500, + first_update_id=1501, + last_update_id=3000, + ) + + for i in range(loops): + with Step(f"LOOP STEP {i}"): + for node in services: + self.context.cluster.node(f"{node}").restart() + + with Then("I check that ClickHouse table has same number of rows as MySQL table"): + select(statement="count(*)", table_name=table_name, with_optimize=True) + + +@TestSuite +def combinatoric_restart(self): + """Check all possibilities of restart services.""" + nodes_list = ["sink", "debezium", "schemaregistry", "kafka", "clickhouse"] + for i in range(1, 6): + service_combinations = list(combinations(nodes_list, i)) + for combination in service_combinations: + Scenario(f"{combination} restart", test=restart, flags=TE)( + services=combination + ) + + +@TestOutline +def unstable_network_connection(self, services, loops=10): + """Check for data consistency with unstable network connection to some services.""" + uid = getuid() + + clickhouse = self.context.cluster.node("clickhouse") + mysql = self.context.cluster.node("mysql-master") + + with Given("I create unique table name"): + table_name = f"test{uid}" + + init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") + + with Given(f"I create MySQL table {table_name}"): + create_mysql_table( + name=table_name, + statement=f"CREATE TABLE {table_name} " + "(id int(11) NOT NULL," + "k int(11) NOT NULL DEFAULT 0,c char(120) NOT NULL DEFAULT ''," + f"pad char(60) NOT NULL DEFAULT '', PRIMARY KEY (id)) ENGINE = InnoDB;", + ) + + with When("I add network fault"): + Given( + "I insert, update, delete data in MySql table", + test=concurrent_queries, + parallel=True, + )( + table_name=table_name, + first_insert_number=1, + last_insert_number=3000, + first_insert_id=3001, + last_insert_id=6000, + first_delete_id=1, + last_delete_id=1500, + first_update_id=1501, + last_update_id=3000, + ) + + for i in range(loops): + with Step(f"LOOP STEP {i}"): + for node in services: + with Shell() as bash: + bash( + f"docker network disconnect mysql_to_clickhouse_replication_env_default {node}", + timeout=100, + ) + time.sleep(5) + for node in services: + with Shell() as bash: + bash( + f"docker network connect mysql_to_clickhouse_replication_env_default {node}", + timeout=100, + ) + + with Then("I check that ClickHouse table has same number of rows as MySQL table"): + select(statement="count(*)", table_name=table_name, with_optimize=True) + + +@TestSuite +def combinatoric_unstable_network_connection(self): + """Check all possibilities of unstable network connection services.""" + nodes_list = ["sink", "debezium", "schemaregistry", "kafka", "clickhouse"] + for i in range(1, 6): + service_combinations = list(combinations(nodes_list, i)) + for combination in service_combinations: + Scenario( + f"{combination} unstable network connection", + test=unstable_network_connection, + flags=TE, + )(services=combination) + + +@TestOutline +def kill_start(self, services, loops=2): + """Check for data consistency with concurrently service "kill -9" and start.""" + uid = getuid() + + clickhouse = self.context.cluster.node("clickhouse") + mysql = self.context.cluster.node("mysql-master") + + with Given("I create unique table name"): + table_name = f"test{uid}" + + init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") + + with Given(f"I create MySQL table {table_name}"): + create_mysql_table( + name=table_name, + statement=f"CREATE TABLE {table_name} " + "(id int(11) NOT NULL," + "k int(11) NOT NULL DEFAULT 0,c char(120) NOT NULL DEFAULT ''," + f"pad char(60) NOT NULL DEFAULT '', PRIMARY KEY (id)) ENGINE = InnoDB;", + ) + + with When( + "I insert, update, delete data in MySql table concurrently with services SIGKILL" + ): + Given( + "I insert, update, delete data in MySql table", + test=concurrent_queries, + parallel=True, + )( + table_name=table_name, + first_insert_number=1, + last_insert_number=300, + first_insert_id=301, + last_insert_id=600, + first_delete_id=1, + last_delete_id=150, + first_update_id=151, + last_update_id=300, + ) + + for i in range(loops): + with Step(f"LOOP STEP {i}"): + for node in services: + with Shell() as bash: + self.context.cluster.node(f"{node}").kill() + time.sleep(5) + for node in services: + with Shell() as bash: + self.context.cluster.node(f"{node}").start() + + with Then("I check that ClickHouse table has same number of rows as MySQL table"): + select(statement="count(*)", table_name=table_name, with_optimize=True) + + +@TestSuite +def combinatoric_kill_start_test(self): + """Check all possibilities of restart services.""" + nodes_list = ["sink", "debezium"] + service_combinations = list(combinations(nodes_list, 1)) + for combination in service_combinations: + Scenario(f"{combination} restart", test=kill_start, flags=TE)( + services=combination + ) + + +@TestSuite +def combinatoric_kill_start(self): + """Check all possibilities of restart services.""" + nodes_list = ["sink", "debezium", "schemaregistry", "kafka", "clickhouse"] + for i in range(1, 6): + service_combinations = list(combinations(nodes_list, i)) + for combination in service_combinations: + Scenario(f"{combination} restart", test=kill_start, flags=TE)( + services=combination + ) + + +@TestModule +@Name("consistency") +@Requirements( + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_OnlyOnceGuarantee("1.0") +) +def module(self): + """Сheck data consistency when network or service faults are introduced.""" + xfail("") + with Given("I enable debezium and sink connectors after kafka starts up"): + init_debezium_connector() + + for suite in loads(current_module(), Suite): + Suite(run=suite) diff --git a/tests/integration/tests/deduplication.py b/tests/integration/tests/deduplication.py new file mode 100644 index 000000000..73343dc41 --- /dev/null +++ b/tests/integration/tests/deduplication.py @@ -0,0 +1,88 @@ +from integration.tests.steps.sql import * +from integration.tests.steps.statements import * +from integration.tests.steps.service_settings_steps import * + + +@TestOutline +def deduplication( + self, clickhouse_table, inserts=False, big_insert=False, insert_number=1000 +): + """Check MySQL to Clickhouse connection for non-duplication data""" + + table_name = f"deduplication_{getuid()}" + + mysql = self.context.cluster.node("mysql-master") + + init_sink_connector( + auto_create_tables=clickhouse_table[0], topics=f"SERVER5432.test.{table_name}" + ) + + with Given(f"I create MySql to CH replicated table", description=table_name): + create_mysql_to_clickhouse_replicated_table( + name=table_name, + mysql_columns="age INT", + clickhouse_columns="age Int32", + clickhouse_table=clickhouse_table, + ) + + if inserts: + with When(f"I insert {insert_number} rows of data in MySql table"): + for i in range(1, insert_number + 1): + mysql.query(f"insert into {table_name} values ({i},777)") + metric(name="map insert time", value=current_time(), units="sec") + elif big_insert: + with When(f"I make one insert on {insert_number} rows data in MySql table"): + mysql.query( + f"insert into {table_name} " + f"values {','.join([f'({i},777)' for i in range(1, insert_number + 1)])}" + ) + + with Then(f"I wait unique values from CLickHouse table equal to MySQL table"): + complex_check_creation_and_select( + manual_output=insert_number, + table_name=table_name, + statement="count(*)", + clickhouse_table=clickhouse_table, + with_final=True, + timeout=50, + ) + + +@TestFeature +def deduplication_on_big_insert(self): + """Check MySQL to Clickhouse connection for non-duplication data on 10 000 inserts.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + deduplication( + clickhouse_table=clickhouse_table, big_insert=True, insert_number=10000 + ) + + +@TestFeature +def deduplication_on_many_inserts(self): + """Check MySQL to Clickhouse connection for non-duplication data on big inserts.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + deduplication( + clickhouse_table=clickhouse_table, inserts=True, insert_number=1000 + ) + + +@TestModule +@Requirements( + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Consistency_Deduplication("1.0") +) +@Name("deduplication") +def module(self): + """MySql to ClickHouse replication tests to check + for non-duplication data on big inserts.""" + + with Given("I enable debezium and sink connectors after kafka starts up"): + init_debezium_connector() + + with Pool(1) as executor: + try: + for feature in loads(current_module(), Feature): + Feature(test=feature, parallel=True, executor=executor)() + finally: + join() diff --git a/tests/integration/tests/delete.py b/tests/integration/tests/delete.py new file mode 100644 index 000000000..e83749f59 --- /dev/null +++ b/tests/integration/tests/delete.py @@ -0,0 +1,144 @@ +from integration.tests.steps.sql import * +from integration.tests.steps.statements import * +from integration.tests.steps.service_settings_steps import * + + +@TestOutline +def delete( + self, mysql_columns, clickhouse_columns, clickhouse_table, primary_key, engine +): + """Check `DELETE` query replicating from MySQl table to CH with different primary keys.""" + + table_name = f"delete_{getuid()}" + + mysql = self.context.cluster.node("mysql-master") + + init_sink_connector( + auto_create_tables=clickhouse_table[0], topics=f"SERVER5432.test.{table_name}" + ) + + init_sink_connector(auto_create_tables=True, topics=f"SERVER5432.test.{table_name}") + + with Given(f"I create MySql to CH replicated table", description=table_name): + create_mysql_to_clickhouse_replicated_table( + name=table_name, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + primary_key=primary_key, + engine=engine, + ) + + with When(f"I insert data in MySql table"): + mysql.query(f"INSERT INTO {table_name} values (1,2,'a','b'), (2,3,'a','b');") + with Then(f"I delete data in MySql table"): + mysql.query(f"DELETE FROM {table_name} WHERE id=1;") + + with And("I check that ClickHouse table has same number of rows as MySQL table"): + complex_check_creation_and_select( + table_name=table_name, + clickhouse_table=clickhouse_table, + statement="count(*)", + with_final=True, + ) + + +@TestFeature +def no_primary_key(self): + """Check for `DELETE` with no primary key without InnoDB engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + delete( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key=None, + engine=False, + ) + + +@TestFeature +def no_primary_key_innodb(self): + """Check for `DELETE` with no primary key with InnoDB engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + delete( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key=None, + engine=True, + ) + + +@TestFeature +def simple_primary_key(self): + """Check for `DELETE` with simple primary key without InnoDB engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + delete( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id", + engine=False, + ) + + +@TestFeature +def simple_primary_key_innodb(self): + """Check for `DELETE` with simple primary key with InnoDB engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + delete( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id", + engine=True, + ) + + +@TestFeature +def complex_primary_key(self): + """Check for `DELETE` with complex primary key without engine InnoDB.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + delete( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id,k", + engine=True, + ) + + +@TestFeature +def complex_primary_key_innodb(self): + """Check for `DELETE` with complex primary key with engine InnoDB.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + delete( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id,k", + engine=False, + ) + + +@TestModule +@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Deletes("1.0")) +@Name("delete") +def module(self): + """MySql to ClickHouse replication delete tests to test `DELETE` queries.""" + + with Given("I enable debezium connector after kafka starts up"): + init_debezium_connector() + + with Pool(1) as executor: + try: + for feature in loads(current_module(), Feature): + Feature(test=feature, parallel=True, executor=executor)() + finally: + join() diff --git a/tests/integration/tests/insert.py b/tests/integration/tests/insert.py new file mode 100644 index 000000000..372e18757 --- /dev/null +++ b/tests/integration/tests/insert.py @@ -0,0 +1,135 @@ +from integration.tests.steps.sql import * +from integration.tests.steps.statements import * +from integration.tests.steps.service_settings_steps import * + + +@TestOutline +def mysql_to_clickhouse_inserts( + self, input, output, mysql_columns, clickhouse_table, clickhouse_columns=None +): + """`INSERT` check section""" + + table_name = f"insert_{getuid()}" + mysql = self.context.cluster.node("mysql-master") + + init_sink_connector( + auto_create_tables=clickhouse_table[0], topics=f"SERVER5432.test.{table_name}" + ) + + with Given(f"I create MySql to CH replicated table", description=table_name): + create_mysql_to_clickhouse_replicated_table( + name=table_name, + mysql_columns=mysql_columns, + clickhouse_table=clickhouse_table, + clickhouse_columns=clickhouse_columns, + ) + + with When("I insert data in MySql table"): + mysql.query(f"INSERT INTO {table_name} (col1,col2,col3) VALUES {input};") + + with Then("I check data inserted correct"): + complex_check_creation_and_select( + table_name=table_name, + manual_output=output, + clickhouse_table=clickhouse_table, + statement="col1,col2,col3", + with_final=True, + ) + + +@TestFeature +def null_default_insert( + self, + input="(DEFAULT,5,DEFAULT)", + output="\\N,5,777", + mysql_columns="col1 INT, col2 INT NOT NULL, col3 INT default 777", + clickhouse_columns="col1 Nullable(Int32), col2 Int32, col3 Int32", +): + """NULL and DEFAULT `INSERT` check.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + mysql_to_clickhouse_inserts( + input=input, + output=output, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + +@TestFeature +def null_default_insert_2( + self, + input="(DEFAULT,5,333)", + output="\\N,5,333", + mysql_columns="col1 INT, col2 INT NOT NULL, col3 INT default 777", + clickhouse_columns="col1 Nullable(Int32), col2 Int32, col3 Int32", +): + """NULL and DEFAULT `INSERT` check.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + mysql_to_clickhouse_inserts( + input=input, + output=output, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + +@TestFeature +def select_insert( + self, + input="((select 2),7,DEFAULT)", + output="2,7,777", + mysql_columns="col1 INT, col2 INT NOT NULL, col3 INT default 777", + clickhouse_columns="col1 Int32, col2 Int32, col3 Int32", +): + """SELECT and DEFAULT `INSERT` check.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + mysql_to_clickhouse_inserts( + input=input, + output=output, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + +@TestFeature +def select_insert_2( + self, + input="((select 2),7,DEFAULT)", + output="2,7,777", + mysql_columns="col1 INT, col2 INT NOT NULL, col3 INT default 777", + clickhouse_columns="col1 Int32, col2 Int32, col3 Int32", +): + """simple `INSERT` check.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + mysql_to_clickhouse_inserts( + input=input, + output=output, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + ) + + +@TestModule +@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Inserts("1.0")) +@Name("insert") +def module(self): + """Different `INSERT` tests section.""" + # xfail("") + + with Given("I enable debezium and sink connectors after kafka starts up"): + init_debezium_connector() + + with Pool(1) as executor: + try: + for feature in loads(current_module(), Feature): + Feature(test=feature, parallel=True, executor=executor)() + finally: + join() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/manual_scripts/debezium-connector-config.sh b/tests/integration/tests/manual_scripts/debezium-connector-config.sh similarity index 100% rename from tests/Testflows/mysql_to_clickhouse_replication/tests/manual_scripts/debezium-connector-config.sh rename to tests/integration/tests/manual_scripts/debezium-connector-config.sh diff --git a/tests/integration/tests/manual_scripts/debezium-connector-setup-database.sh b/tests/integration/tests/manual_scripts/debezium-connector-setup-database.sh new file mode 100755 index 000000000..e71c9a270 --- /dev/null +++ b/tests/integration/tests/manual_scripts/debezium-connector-setup-database.sh @@ -0,0 +1,179 @@ +#!/bin/bash + +# Source configuration +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source "${CUR_DIR}/debezium-connector-config.sh" + +CONNECTOR_NAME="debezium-connector-sbtest" +CONNECTOR_CLASS="io.debezium.connector.mysql.MySqlConnector" + +echo "*********** ${CONNECTOR_NAME} **************" +# Debezium parameters. Check +# https://debezium.io/documentation/reference/stable/connectors/mysql.html#_required_debezium_mysql_connector_configuration_properties +# for the full list of available properties + +MYSQL_HOST="mysql-master" +MYSQL_PORT="3306" +MYSQL_USER="root" +MYSQL_PASSWORD="root" +# Comma-separated list of regular expressions that match the databases for which to capture changes +DATABASE=sbtest +MYSQL_DBS="${DATABASE}" +# Comma-separated list of regular expressions that match fully-qualified table identifiers of tables +MYSQL_TABLES="" +#KAFKA_BOOTSTRAP_SERVERS="one-node-cluster-0.one-node-cluster.redpanda.svc.cluster.local:9092" +KAFKA_BOOTSTRAP_SERVERS="kafka:9092" +KAFKA_TOPIC="schema-changes.${DATABASE}" + +# Connector joins the MySQL database cluster as another server (with this unique ID) so it can read the binlog. +# By default, a random number between 5400 and 6400 is generated, though the recommendation is to explicitly set a value. +DATABASE_SERVER_ID="5432" +# Unique across all other connectors, used as a prefix for Kafka topic names for events emitted by this connector. +# Alphanumeric characters, hyphens, dots and underscores only. +DATABASE_SERVER_NAME="SERVER5432-${DATABASE}" + +if [[ $3 == "postgres" ]]; then + echo "postgres database" + HOST="postgres" + PORT="5432" + USER="postgres_user" + PASSWORD="postgres" + # Comma-separated list of regular expressions that match the databases for which to capture changes + DBS="test" + # Comma-separated list of regular expressions that match fully-qualified table identifiers of tables + TABLES="Employee" + CONNECTOR_CLASS="io.debezium.connector.postgresql.PostgresConnector" + SNAPSHOT_MODE="initial_only" + + if [[ $2 == "apicurio" ]]; then + curl --request POST --url "${CONNECTORS_MANAGEMENT_URL}" --header 'Content-Type: application/json' --data @payload.json + else + cat </dev/null | jq ." + ) + + +@TestStep(Given) +def init_debezium_connector(self, node=None): + """ + Initialize debezium connectors. + """ + if node is None: + node = self.context.cluster.node("bash-tools") + + debezium_settings_transfer_command_apicurio = """cat </dev/null | jq ." + ) + + +@TestStep(Given) +def sb_debizium_script_connector(self): + """ + Sysbench debezium script start up + :param self: + :return: + """ + try: + time.sleep(10) + with Given( + "I start debezium connector", + description="""Sending debezium settings push command on bash_tools + and wait message that they applied correct""", + ): + retry(self.context.cluster.node("bash-tools").cmd, timeout=100, delay=3)( + f"./../manual_scripts/debezium-connector-setup-database.sh", + message='{"error_code":409,"message":"Connector ' + 'debezium-connector-sbtest already exists"}', + ) + yield + finally: + time.sleep(5) + with Finally("I delete debezium sysbench connections"): + with By("deleting debezium connector", flags=TE): + self.context.cluster.node("bash-tools").cmd( + 'curl -X DELETE -H "Accept:application/json" "http://debezium:8083/connectors/' + 'debezium-connector-sbtest" ' + "2>/dev/null | jq ." + ) + with And("Drop CH table"): + self.context.cluster.node("clickhouse").query( + "DROP TABLE IF EXISTS test.sbtest1;" + ) diff --git a/tests/integration/tests/steps/sql.py b/tests/integration/tests/steps/sql.py new file mode 100644 index 000000000..7a180eefe --- /dev/null +++ b/tests/integration/tests/steps/sql.py @@ -0,0 +1,455 @@ +from integration.requirements.requirements import * + +from integration.helpers.common import * + + +@TestStep(Given) +def create_mysql_table(self, name=None, statement=None, node=None): + """ + Creation of default MySQL table for tests + :param self: + :param name: + :param statement: + :param node: + :return: + """ + if node is None: + node = self.context.cluster.node("mysql-master") + if name is None: + name = "users" + if statement is None: + statement = f"CREATE TABLE IF NOT EXISTS {name} " + f"(id INT AUTO_INCREMENT,age INT, PRIMARY KEY (id))" + f" ENGINE = InnoDB;" + + try: + with Given(f"I create MySQL table {name}"): + node.query(statement) + yield + finally: + with Finally("I clean up by deleting table in MySQL"): + node.query(f"DROP TABLE IF EXISTS {name};") + self.context.cluster.node("clickhouse").query( + f"DROP TABLE IF EXISTS test.{name} ON CLUSTER sharded_replicated_cluster;;" + ) + time.sleep(5) + + +@TestStep(Given) +def create_clickhouse_table( + self, name=None, statement=None, node=None, force_select_final=False +): + """ + Creation of default ClickHouse table for tests + :param self: + :param name: + :param statement: + :param node: + :return: + """ + if node is None: + node = self.context.cluster.node("clickhouse") + if name is None: + name = "users" + if statement is None: + statement = f"CREATE TABLE IF NOT EXISTS test.{name} " + f"(id Int32, age Int32) " + f"ENGINE = MergeTree " + f"PRIMARY KEY id ORDER BY id SETTINGS {' ignore_force_select_final=1' if force_select_final else ''}" + f"index_granularity = 8192;" + + try: + with Given(f"I create ClickHouse table {name}"): + node.query(statement) + yield + finally: + with Finally("I clean up by deleting table in ClickHouse"): + node.query( + f"DROP TABLE IF EXISTS test.{name} ON CLUSTER sharded_replicated_cluster;" + ) + + +@TestStep +def create_mysql_to_clickhouse_replicated_table( + self, + name, + mysql_columns, + clickhouse_table, + clickhouse_columns=None, + mysql_node=None, + clickhouse_node=None, + version_column="_version", + sign_column="_sign", + primary_key="id", + partition_by=None, + engine=True, +): + """Create MySQL-to-ClickHouse replicated table. + + :param self: + :param table_name: replicated table name + :param mysql_columns: MySQL table columns + :param clickhouse_columns: coresponding ClickHouse columns + :param clickhouse_table: use 'auto' for auto create, 'ReplicatedReplacingMergeTree' or 'ReplacingMergeTree' + :param mysql_node: MySql docker compose node + :param clickhouse_node: CH docker compose node + :return: + """ + if mysql_node is None: + mysql_node = self.context.cluster.node("mysql-master") + + if clickhouse_node is None: + clickhouse_node = self.context.cluster.node("clickhouse") + + try: + with Given(f"I create MySQL table", description=name): + mysql_node.query( + f"CREATE TABLE IF NOT EXISTS {name} " + f"(id INT AUTO_INCREMENT," + f"{mysql_columns}" + f"{f', PRIMARY KEY ({primary_key})'if primary_key is not None else ''})" + f"{' ENGINE = InnoDB;' if engine else ''}", + ) + + if clickhouse_table[0] == "auto": + if clickhouse_table[1] == "ReplacingMergeTree": + pass + else: + raise NotImplementedError( + f"table '{clickhouse_table[1]}' not supported" + ) + + elif clickhouse_table[0] == "manual": + if clickhouse_table[1] == "ReplicatedReplacingMergeTree": + with And( + f"I create ReplicatedReplacingMergeTree as a replication table", + description=name, + ): + clickhouse_node.query( + f"CREATE TABLE IF NOT EXISTS test.{name} ON CLUSTER sharded_replicated_cluster" + f"(id Int32,{clickhouse_columns}, {sign_column} " + f"Int8, {version_column} UInt64) " + f"ENGINE = ReplicatedReplacingMergeTree(" + "'/clickhouse/tables/{shard}" + f"/{name}'," + " '{replica}'," + f" {version_column}) " + f"{f'PRIMARY KEY ({primary_key}) ORDER BY ({primary_key})'if primary_key is not None else ''}" + f"{f'PARTITION BY ({partition_by})' if partition_by is not None else ''}" + f" SETTINGS " + f"index_granularity = 8192;", + ) + elif clickhouse_table[1] == "ReplacingMergeTree": + with And( + f"I create ClickHouse table as replication table to MySQL test.{name}" + ): + clickhouse_node.query( + f"CREATE TABLE IF NOT EXISTS test.{name} " + f"(id Int32,{clickhouse_columns}, {sign_column} " + f"Int8, {version_column} UInt64) " + f"ENGINE = ReplacingMergeTree({version_column}) " + f"{f'PRIMARY KEY ({primary_key}) ORDER BY ({primary_key})' if primary_key is not None else ''}" + f"{f'PARTITION BY ({partition_by})' if partition_by is not None else ''}" + f" SETTINGS " + f"index_granularity = 8192;", + ) + + else: + raise NotImplementedError( + f"table '{clickhouse_table[1]}' not supported" + ) + + else: + raise NotImplementedError( + f"table creation method '{clickhouse_table[0]}' not supported" + ) + + yield + finally: + with Finally( + "I clean up by deleting MySql to CH replicated table", description={name} + ): + mysql_node.query(f"DROP TABLE IF EXISTS {name};") + clickhouse_node.query( + f"DROP TABLE IF EXISTS test.{name} ON CLUSTER sharded_replicated_cluster;;" + ) + time.sleep(5) + + +@TestStep(Given) +def insert( + self, + first_insert_id, + last_insert_id, + table_name, + insert_values="({x},2,'a','b')", + node=None, +): + """ + Insert some controlled interval of id's + :param self: + :param node: + :param first_insert_id: + :param last_insert_id: + :param table_name: + :return: + """ + if node is None: + node = self.context.cluster.node("mysql-master") + + with Given( + f"I insert {first_insert_id - last_insert_id} rows of data in MySql table" + ): + for i in range(first_insert_id, last_insert_id + 1): + node.query(f"INSERT INTO {table_name} VALUES {insert_values}".format(x=i)) + + +@TestStep(When) +def complex_insert( + self, + table_name, + values, + node=None, + partitions=101, + parts_per_partition=1, + block_size=1, +): + """Insert data having specified number of partitions and parts.""" + if node is None: + node = self.context.cluster.node("mysql-master") + + insert_values_1 = ",".join( + f"{values[0]}".format(x=x, y=y) + for x in range(partitions) + for y in range(block_size * parts_per_partition) + ) + insert_values_2 = ",".join( + f"{values[1]}".format(x=x, y=y) + for x in range(partitions) + for y in range(block_size * parts_per_partition) + ) + node.query("system stop merges") + node.query(f"INSERT INTO {table_name} VALUES {insert_values_1}") + node.query(f"INSERT INTO {table_name} VALUES {insert_values_2}") + + +@TestStep(Then) +def select( + self, + manual_output=None, + table_name=None, + statement=None, + node=None, + with_final=False, + with_optimize=False, + sign_column="_sign", + timeout=100, +): + """SELECT with an option to either with FINAL or loop SELECT + OPTIMIZE TABLE default simple 'SELECT' + :param insert: expected insert data if None compare with MySQL table + :param table_name: table name for select default "users" + :param statement: statement for select default "*" + :param node: node name + :param with_final: 'SELECT ... FINAL' + :param with_optimize: loop 'OPTIMIZE TABLE' + 'SELECT' + :param timeout: retry timeout + """ + if node is None: + node = self.context.cluster.node("clickhouse") + if table_name is None: + table_name = "users" + if statement is None: + statement = "*" + + mysql = self.context.cluster.node("mysql-master") + mysql_output = mysql.query(f"select {statement} from {table_name}").output.strip()[ + 90: + ] + + if manual_output is None: + manual_output = mysql_output + + if with_final: + retry(node.query, timeout=timeout, delay=10,)( + f"SELECT {statement} FROM test.{table_name} FINAL where {sign_column} !=-1 FORMAT CSV", + message=f"{manual_output}", + ) + elif with_optimize: + for attempt in retries(count=10, timeout=100, delay=5): + with attempt: + node.query(f"OPTIMIZE TABLE test.{table_name} FINAL DEDUPLICATE") + + node.query( + f"SELECT {statement} FROM test.{table_name} where {sign_column} !=-1 FORMAT CSV", + message=f"{manual_output}", + ) + + else: + retry(node.query, timeout=timeout, delay=10,)( + f"SELECT {statement} FROM test.{table_name} where {sign_column} !=-1 FORMAT CSV", + message=f"{manual_output}", + ) + + +@TestStep(Then) +def complex_check_creation_and_select( + self, + table_name, + clickhouse_table, + statement, + timeout=50, + manual_output=None, + with_final=False, + with_optimize=False, +): + """ + Check for table creation on all clickhouse nodes where it is expected and select data consistency with MySql + :param self: + :param table_name: + :param auto_create_tables: + :param replicated: + :param statement: + :param with_final: + :param with_optimize: + :return: + """ + clickhouse = self.context.cluster.node("clickhouse") + clickhouse1 = self.context.cluster.node("clickhouse1") + clickhouse2 = self.context.cluster.node("clickhouse2") + clickhouse3 = self.context.cluster.node("clickhouse3") + mysql = self.context.cluster.node("mysql-master") + + if clickhouse_table[1].startswith("Replicated"): + with Then("I check table creation on few nodes"): + retry(clickhouse.query, timeout=30, delay=3)( + "SHOW TABLES FROM test", message=f"{table_name}" + ) + retry(clickhouse1.query, timeout=30, delay=3)( + "SHOW TABLES FROM test", message=f"{table_name}" + ) + retry(clickhouse2.query, timeout=30, delay=3)( + "SHOW TABLES FROM test", message=f"{table_name}" + ) + retry(clickhouse3.query, timeout=30, delay=3)( + "SHOW TABLES FROM test", message=f"{table_name}" + ) + else: + with Then("I check table creation"): + retry(clickhouse.query, timeout=30, delay=3)( + "SHOW TABLES FROM test", message=f"{table_name}" + ) + + with Then("I check that ClickHouse table has same number of rows as MySQL table"): + select( + table_name=table_name, + manual_output=manual_output, + statement=statement, + with_final=with_final, + with_optimize=with_optimize, + timeout=timeout, + ) + if clickhouse_table[1].startswith("Replicated"): + with Then( + "I check that ClickHouse table has same number of rows as MySQL table on the replica node if it is " + "replicted table" + ): + select( + table_name=table_name, + manual_output=manual_output, + statement=statement, + node=self.context.cluster.node("clickhouse1"), + with_final=with_final, + with_optimize=with_optimize, + timeout=timeout, + ) + + +@TestStep(When) +def delete(self, first_delete_id, last_delete_id, table_name): + """ + Delete query step + :param self: + :param first_delete_id: + :param last_delete_id: + :param table_name: + :return: + """ + mysql = self.context.cluster.node("mysql-master") + + with Given( + f"I delete {last_delete_id - first_delete_id} rows of data in MySql table" + ): + for i in range(first_delete_id, last_delete_id): + mysql.query(f"DELETE FROM {table_name} WHERE id={i}") + + +@TestStep(When) +def update(self, first_update_id, last_update_id, table_name): + """ + Update query step + :param self: + :param first_update_id: + :param last_update_id: + :param table_name: + :return: + """ + mysql = self.context.cluster.node("mysql-master") + + with Given( + f"I update {last_update_id - first_update_id} rows of data in MySql table" + ): + for i in range(first_update_id, last_update_id): + mysql.query(f"UPDATE {table_name} SET k=k+5 WHERE id={i};") + + +@TestStep(When) +def concurrent_queries( + self, + table_name, + first_insert_number, + last_insert_number, + first_insert_id, + last_insert_id, + first_delete_id, + last_delete_id, + first_update_id, + last_update_id, +): + """ + Insert, update, delete for concurrent queries. + :param self: + :param table_name: table name + :param first_insert_number: first id of precondition insert + :param last_insert_number: last id of precondition insert + :param first_insert_id: first id of concurrent insert + :param last_insert_id: last id of concurrent insert + :param first_delete_id: first id of concurrent delete + :param last_delete_id: last id of concurrent delete + :param first_update_id: first id of concurrent update + :param last_update_id: last id of concurrent update + :return: + """ + + with Given("I insert block of precondition rows"): + insert( + table_name=table_name, + first_insert_id=first_insert_number, + last_insert_id=last_insert_number, + ) + + with When("I start concurrently insert, update and delete queries in MySql table"): + By("inserting data in MySql table", test=insert, parallel=True,)( + first_insert_id=first_insert_id, + last_insert_id=last_insert_id, + table_name=table_name, + ) + By("deleting data in MySql table", test=delete, parallel=True,)( + first_delete_id=first_delete_id, + last_delete_id=last_delete_id, + table_name=table_name, + ) + By("updating data in MySql table", test=update, parallel=True,)( + first_update_id=first_update_id, + last_update_id=last_update_id, + table_name=table_name, + ) diff --git a/tests/integration/tests/steps/statements.py b/tests/integration/tests/steps/statements.py new file mode 100644 index 000000000..a15f9f23f --- /dev/null +++ b/tests/integration/tests/steps/statements.py @@ -0,0 +1,113 @@ +available_clickhouse_tables = [ + ("auto", "ReplacingMergeTree"), + ("manual", "ReplacingMergeTree"), + ("manual", "ReplicatedReplacingMergeTree"), +] + +all_nullable_mysql_datatypes = ( + f"D4 DECIMAL(2,1), D5 DECIMAL(30, 10)," + f" Doublex DOUBLE," + f" x_date DATE," + f"x_datetime6 DATETIME(6)," + f"x_time TIME," + f"x_time6 TIME(6)," + f"Intmin INT, Intmax INT," + f"UIntmin INT UNSIGNED, UIntmax INT UNSIGNED," + f"BIGIntmin BIGINT,BIGIntmax BIGINT," + f"UBIGIntmin BIGINT UNSIGNED,UBIGIntmax BIGINT UNSIGNED," + f"TIntmin TINYINT,TIntmax TINYINT," + f"UTIntmin TINYINT UNSIGNED,UTIntmax TINYINT UNSIGNED," + f"SIntmin SMALLINT,SIntmax SMALLINT," + f"USIntmin SMALLINT UNSIGNED,USIntmax SMALLINT UNSIGNED," + f"MIntmin MEDIUMINT,MIntmax MEDIUMINT," + f"UMIntmin MEDIUMINT UNSIGNED,UMIntmax MEDIUMINT UNSIGNED," + f" x_char CHAR," + f" x_text TEXT," + f" x_varchar VARCHAR(4)," + f" x_Blob BLOB," + f" x_Mediumblob MEDIUMBLOB," + f" x_Longblob LONGBLOB," + f" x_binary BINARY," + f" x_varbinary VARBINARY(4)" +) + +all_nullable_ch_datatypes = ( + f" D4 Nullable(DECIMAL(2,1)), D5 Nullable(DECIMAL(30, 10))," + f" Doublex Nullable(Float64)," + f" x_date Nullable(Date)," + f" x_datetime6 Nullable(String)," + f" x_time Nullable(String)," + f" x_time6 Nullable(String)," + f" Intmin Nullable(Int32), Intmax Nullable(Int32)," + f" UIntmin Nullable(UInt32), UIntmax Nullable(UInt32)," + f" BIGIntmin Nullable(UInt64), BIGIntmax Nullable(UInt64)," + f" UBIGIntmin Nullable(UInt64), UBIGIntmax Nullable(UInt64)," + f" TIntmin Nullable(Int8), TIntmax Nullable(Int8)," + f" UTIntmin Nullable(UInt8), UTIntmax Nullable(UInt8)," + f" SIntmin Nullable(Int16), SIntmax Nullable(Int16)," + f" USIntmin Nullable(UInt16), USIntmax Nullable(UInt16)," + f" MIntmin Nullable(Int32), MIntmax Nullable(Int32)," + f" UMIntmin Nullable(UInt32), UMIntmax Nullable(UInt32)," + f" x_char LowCardinality(Nullable(String))," + f" x_text Nullable(String)," + f" x_varchar Nullable(String)," + f" x_Blob Nullable(String)," + f" x_Mediumblob Nullable(String)," + f" x_Longblob Nullable(String)," + f" x_binary Nullable(String)," + f" x_varbinary Nullable(String)" +) + +all_mysql_datatypes = ( + f"D4 DECIMAL(2,1) NOT NULL, D5 DECIMAL(30, 10) NOT NULL," + f" Doublex DOUBLE NOT NULL," + f" x_date DATE NOT NULL," + f"x_datetime6 DATETIME(6) NOT NULL," + f"x_time TIME NOT NULL," + f"x_time6 TIME(6) NOT NULL," + f"Intmin INT NOT NULL, Intmax INT NOT NULL," + f"UIntmin INT UNSIGNED NOT NULL, UIntmax INT UNSIGNED NOT NULL," + f"BIGIntmin BIGINT NOT NULL,BIGIntmax BIGINT NOT NULL," + f"UBIGIntmin BIGINT UNSIGNED NOT NULL,UBIGIntmax BIGINT UNSIGNED NOT NULL," + f"TIntmin TINYINT NOT NULL,TIntmax TINYINT NOT NULL," + f"UTIntmin TINYINT UNSIGNED NOT NULL,UTIntmax TINYINT UNSIGNED NOT NULL," + f"SIntmin SMALLINT NOT NULL,SIntmax SMALLINT NOT NULL," + f"USIntmin SMALLINT UNSIGNED NOT NULL,USIntmax SMALLINT UNSIGNED NOT NULL," + f"MIntmin MEDIUMINT NOT NULL,MIntmax MEDIUMINT NOT NULL," + f"UMIntmin MEDIUMINT UNSIGNED NOT NULL,UMIntmax MEDIUMINT UNSIGNED NOT NULL," + f" x_char CHAR NOT NULL," + f" x_text TEXT NOT NULL," + f" x_varchar VARCHAR(4) NOT NULL," + f" x_Blob BLOB NOT NULL," + f" x_Mediumblob MEDIUMBLOB NOT NULL," + f" x_Longblob LONGBLOB NOT NULL," + f" x_binary BINARY NOT NULL," + f" x_varbinary VARBINARY(4) NOT NULL" +) + +all_ch_datatypes = ( + f" D4 DECIMAL(2,1), D5 DECIMAL(30, 10)," + f" Doublex Float64," + f" x_date Date," + f" x_datetime6 String," + f" x_time String," + f" x_time6 String," + f" Intmin Int32, Intmax Int32," + f" UIntmin UInt32, UIntmax UInt32," + f" BIGIntmin UInt64, BIGIntmax UInt64," + f" UBIGIntmin UInt64, UBIGIntmax UInt64," + f" TIntmin Int8, TIntmax Int8," + f" UTIntmin UInt8, UTIntmax UInt8," + f" SIntmin Int16, SIntmax Int16," + f" USIntmin UInt16, USIntmax UInt16," + f" MIntmin Int32, MIntmax Int32," + f" UMIntmin UInt32, UMIntmax UInt32," + f" x_char LowCardinality(String)," + f" x_text String," + f" x_varchar String," + f" x_Blob String," + f" x_Mediumblob String," + f" x_Longblob String," + f" x_binary String," + f" x_varbinary String" +) diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/steps_global.py b/tests/integration/tests/steps/steps_global.py similarity index 53% rename from tests/Testflows/mysql_to_clickhouse_replication/tests/steps_global.py rename to tests/integration/tests/steps/steps_global.py index 939fb680f..7e882fed2 100644 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/steps_global.py +++ b/tests/integration/tests/steps/steps_global.py @@ -1,4 +1,4 @@ -from testflows.core import * +from integration.helpers.common import * @TestStep(Given) @@ -9,8 +9,12 @@ def create_database(self, name="test", node=None): try: with By(f"adding {name} database if not exists"): - node.query(f"CREATE DATABASE IF NOT EXISTS {name}") + node.query( + f"CREATE DATABASE IF NOT EXISTS {name} ON CLUSTER sharded_replicated_cluster" + ) yield finally: with Finally(f"I delete {name} database if exists"): - node.query(f"DROP DATABASE IF EXISTS {name};") + node.query( + f"DROP DATABASE IF EXISTS {name} ON CLUSTER sharded_replicated_cluster;" + ) diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/sysbench.py b/tests/integration/tests/sysbench.py similarity index 88% rename from tests/Testflows/mysql_to_clickhouse_replication/tests/sysbench.py rename to tests/integration/tests/sysbench.py index cd2bd60d9..2c1c1758f 100644 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/sysbench.py +++ b/tests/integration/tests/sysbench.py @@ -1,9 +1,6 @@ -import time - -from testflows.core import * from datetime import datetime -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * +from integration.tests.steps.sql import * +from integration.tests.steps.service_settings_steps import * @TestScenario @@ -20,7 +17,9 @@ def sysbench_sanity(self): @TestOutline -def sysbench_tests(self, script, test_name=None, distinct_values_timeout=70, distinct_values_delay=10): +def sysbench_tests( + self, script, test_name=None, distinct_values_timeout=70, distinct_values_delay=10 +): """Run specified sysbench tests.""" table_name = "sbtest1" @@ -35,7 +34,8 @@ def sysbench_tests(self, script, test_name=None, distinct_values_timeout=70, dis with And(f"I start sysbench test script"): if script == "run_sysbench_tests.sh": self.context.cluster.node("bash-tools").cmd( - f"/manual_scripts/sysbench/{script} -t " f"{test_name}", message="Threads started!" + f"/manual_scripts/sysbench/{script} -t " f"{test_name}", + message="Threads started!", ) # , test_name should be one of the following # bulk_insert, oltp_insert, oltp_delete, oltp_update_index, oltp_update_non_index" @@ -50,7 +50,7 @@ def sysbench_tests(self, script, test_name=None, distinct_values_timeout=70, dis f'"Using a password on the command line interface" > /tmp/MySQL.tsv' ) - with Then(f"I wait unique values from CLickHouse table equal to MySQL table"): + with Then("I wait unique values from CLickHouse table equal to MySQL table"): mysql_count = mysql.query( f"SELECT count(*) FROM sbtest.{table_name}" ).output.strip()[90:] @@ -65,17 +65,18 @@ def sysbench_tests(self, script, test_name=None, distinct_values_timeout=70, dis if script == "run_sysbench_bulk_insert.sh": with Then(f"I write data from ClickHouse table to file"): - pause() clickhouse.cmd( 'clickhouse client -uroot --password root --query "select id ,k from test.sbtest1 FINAL where _sign !=-1 ' 'order by id format TSV" | grep -v "" > /tmp/share_folder/CH.tsv' ) + # time.sleep(30) else: with Then(f"I write data from ClickHouse table to file"): clickhouse.cmd( 'clickhouse client -uroot --password root --query "select id ,k ,c ,pad from test.sbtest1 FINAL where _sign !=-1 ' 'order by id format TSV" | grep -v "" > /tmp/share_folder/CH.tsv' ) + # time.sleep(30) with Then( "I check MySQL data has equal to CH data hash and if it is not write difference " @@ -87,12 +88,12 @@ def sysbench_tests(self, script, test_name=None, distinct_values_timeout=70, dis assert ( mysql_hash.output.strip().split()[0] == ch_hash.output.strip().split()[0] ), mysql.cmd( + # f"diff --strip-trailing-cr /tmp/MySQL.tsv /tmp/share_folder/CH.tsv > /tmp/diff{now}.err.diff" f"diff --strip-trailing-cr /tmp/MySQL.tsv /tmp/share_folder/CH.tsv > /tmp/diff{now}.err.diff" ) with And(f"I drop tables"): - pass - # clickhouse.query(f"DROP TABLE IF EXISTS test.{table_name}") + clickhouse.query(f"DROP TABLE IF EXISTS test.{table_name}") @TestScenario @@ -174,13 +175,13 @@ def oltp_update_index(self): sysbench_tests(script="run_sysbench_tests.sh", test_name="oltp_update_index") -@TestFeature +@TestModule @Name("sysbench") -def feature(self): +def module(self): """MySQL to ClickHouse sysbench tests.""" with Given("I send rpk command on kafka"): - self.context.cluster.node("kafka").cmd( + retry(self.context.cluster.node("kafka").cmd, timeout=100, delay=2)( "rpk topic create SERVER5432.sbtest.sbtest1 -p 6 rpk", message="SERVER5432.sbtest.sbtest1 OK", exitcode=0, @@ -188,7 +189,7 @@ def feature(self): with And("I enable debezium connector"): sb_debizium_script_connector() - init_sink_connector(auto_create_tables=True) + init_sink_connector(auto_create_tables="auto") for scenario in loads(current_module(), Scenario): scenario() diff --git a/tests/integration/tests/truncate.py b/tests/integration/tests/truncate.py new file mode 100644 index 000000000..d0720964a --- /dev/null +++ b/tests/integration/tests/truncate.py @@ -0,0 +1,149 @@ +from integration.tests.steps.sql import * +from integration.tests.steps.statements import * +from integration.tests.steps.service_settings_steps import * + + +@TestOutline +def truncate( + self, mysql_columns, clickhouse_columns, clickhouse_table, primary_key, engine +): + """ + Just simple 'TRUNCATE' query check + """ + table_name = f"truncate_{getuid()}" + mysql = self.context.cluster.node("mysql-master") + + init_sink_connector( + auto_create_tables=clickhouse_table[0], topics=f"SERVER5432.test.{table_name}" + ) + + with Given(f"I create MySql to CH replicated table", description=table_name): + create_mysql_to_clickhouse_replicated_table( + name=table_name, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + primary_key=primary_key, + engine=engine, + ) + + with When(f"I insert data in MySql table"): + mysql.query(f"INSERT INTO {table_name} values (1,2,'a','b'), (2,3,'a','b');") + + with Then("I check that clickhouse table received data"): + complex_check_creation_and_select( + table_name=table_name, + clickhouse_table=clickhouse_table, + statement="count(*)", + with_final=True, + ) + + with And("I truncate MySQL table"): + mysql.query(f"TRUNCATE TABLE {table_name}") + + with And("I check that clickhouse table empty"): + complex_check_creation_and_select( + table_name=table_name, + clickhouse_table=clickhouse_table, + statement="count(*)", + with_final=True, + ) + + +@TestFeature +def no_primary_key(self): + """Check for `DELETE` with no primary key without InnoDB engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + truncate( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key=None, + engine=False, + ) + + +@TestFeature +def no_primary_key_innodb(self): + """Check for `DELETE` with no primary key with InnoDB engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + truncate( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key=None, + engine=True, + ) + + +@TestFeature +def simple_primary_key(self): + """Check for `DELETE` with simple primary key without InnoDB engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + truncate( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id", + engine=False, + ) + + +@TestFeature +def simple_primary_key_innodb(self): + """Check for `DELETE` with simple primary key with InnoDB engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + truncate( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id", + engine=True, + ) + + +@TestFeature +def complex_primary_key(self): + """Check for `DELETE` with complex primary key without engine InnoDB.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + truncate( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id,k", + engine=True, + ) + + +@TestFeature +def complex_primary_key_innodb(self): + """Check for `DELETE` with complex primary key with engine InnoDB.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + truncate( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id,k", + engine=False, + ) + + +@TestModule +@Name("truncate") +def module(self): + """'ALTER TRUNCATE' query tests.""" + with Given("I enable debezium connector after kafka starts up"): + init_debezium_connector() + + with Pool(1) as executor: + try: + for feature in loads(current_module(), Feature): + Feature(test=feature, parallel=True, executor=executor)() + finally: + join() diff --git a/tests/Testflows/mysql_to_clickhouse_replication/tests/data_types.py b/tests/integration/tests/types.py similarity index 54% rename from tests/Testflows/mysql_to_clickhouse_replication/tests/data_types.py rename to tests/integration/tests/types.py index 3d3c80621..eb26440c0 100644 --- a/tests/Testflows/mysql_to_clickhouse_replication/tests/data_types.py +++ b/tests/integration/tests/types.py @@ -1,9 +1,6 @@ -import time - -from testflows.core import * -from mysql_to_clickhouse_replication.requirements import * -from mysql_to_clickhouse_replication.tests.steps import * -from helpers.common import * +from integration.tests.steps.sql import * +from integration.tests.steps.statements import * +from integration.tests.steps.service_settings_steps import * @TestOutline @@ -13,44 +10,33 @@ def check_datatype_replication( ch_type, values, ch_values, + clickhouse_table, nullable=False, hex_type=False, - auto_create_tables=False, ): """Check replication of a given MySQL data type.""" - with Given("Receive UID"): - uid = getuid() - - with And("I create unique table name"): - table_name = f"test{uid}" + table_name = f"types_{getuid()}" clickhouse = self.context.cluster.node("clickhouse") mysql = self.context.cluster.node("mysql-master") - init_sink_connector(auto_create_tables=auto_create_tables, topics=f"SERVER5432.test.{table_name}") + mysql_columns = f"MyData {mysql_type}{' NOT NULL' if not nullable else ''}" + clickhouse_columns = ( + f"{f'MyData Nullable({ch_type})' if nullable else f'MyData {ch_type}'}" + ) + + init_sink_connector( + auto_create_tables=clickhouse_table[0], topics=f"SERVER5432.test.{table_name}" + ) - with Given(f"I create MySQL table {table_name})"): - create_mysql_table( + with Given(f"I create MySql to CH replicated table", description=table_name): + create_mysql_to_clickhouse_replicated_table( name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS {table_name} " - f"(id INT AUTO_INCREMENT," - f"MyData {mysql_type}{' NOT NULL' if not nullable else ''}," - f" PRIMARY KEY (id))" - f" ENGINE = InnoDB;", + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, ) - if not auto_create_tables: - with And(f"I create ClickHouse replica test.{table_name}"): - create_clickhouse_table( - name=table_name, - statement=f"CREATE TABLE IF NOT EXISTS test.{table_name} " - f"(id Int32,{f'MyData Nullable({ch_type})' if nullable else f'MyData {ch_type}'}, sign " - f"Int8, ver UInt64) " - f"ENGINE = ReplacingMergeTree(ver) " - f"PRIMARY KEY id ORDER BY id SETTINGS " - f"index_granularity = 8192;", - ) - with When(f"I insert data in MySql table {table_name}"): for i, value in enumerate(values, 1): mysql.query(f"INSERT INTO {table_name} VALUES ({i}, {value})") @@ -61,7 +47,7 @@ def check_datatype_replication( ) -@TestOutline(Scenario) +@TestOutline(Feature) @Examples( "mysql_type ch_type values ch_values nullable", [ @@ -84,21 +70,26 @@ def check_datatype_replication( ) def decimal(self, mysql_type, ch_type, values, ch_values, nullable): """Check replication of MySQl 'DECIMAL' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - ) + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + check_datatype_replication( + mysql_type=mysql_type, + ch_type=ch_type, + values=values, + ch_values=ch_values, + nullable=nullable, + clickhouse_table=clickhouse_table, + ) -@TestOutline(Scenario) +@TestOutline(Feature) @Examples( "mysql_type ch_type values ch_values nullable", [ - ("DOUBLE", "Float64", ["999.00009"], ["999.00009"], False), - ("DOUBLE", "Float64", ["NULL"], ["\\N"], True), + # ("DOUBLE", "Float64", ["999.00009"], ["999.00009"], False), + # ("DOUBLE", "Float64", ["NULL"], ["\\N"], True), + ("DOUBLE", "Decimal128(20)", ["999.00009"], ["999.00009"], False), + ("DOUBLE", "Decimal128(20)", ["1.7091"], ["1.7091"], False), ], ) @Requirements( @@ -107,31 +98,34 @@ def decimal(self, mysql_type, ch_type, values, ch_values, nullable): @Requirements() def double(self, mysql_type, ch_type, values, ch_values, nullable): """Check replication of MySQl 'DOUBLE' data type.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - ) + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + check_datatype_replication( + mysql_type=mysql_type, + ch_type=ch_type, + values=values, + ch_values=ch_values, + nullable=nullable, + clickhouse_table=clickhouse_table, + ) -@TestOutline(Scenario) +@TestOutline(Feature) @Examples( "mysql_type ch_type values ch_values nullable", [ - ("DATE", "Date", ["'2012-12-12'"], ['"2012-12-12"'], False), + ("DATE", "Date32", ["'2012-12-12'"], ['"2012-12-12"'], False), ( - "DATETIME(6)", - "String", - ["'2018-09-08 17:51:04.777'"], - ['"2018-09-08 17:51:04.777000"'], + "DATETIME", + "DateTime64", + ["'2018-09-08 17:51:04'"], + ['"2018-09-08 17:51:04.000"'], False, ), - ("TIME", "String", ["'17:51:04.777'"], ['"17:51:05"'], False), + ("TIME", "String", ["'17:51:04.777'"], ['"17:51:05.000000"'], False), ("TIME(6)", "String", ["'17:51:04.777'"], ['"17:51:04.777000"'], False), - ("DATE", "Date", ["NULL"], ["\\N"], True), - ("DATETIME(6)", "String", ["NULL"], ["\\N"], True), + ("DATE", "Date32", ["NULL"], ["\\N"], True), + ("DATETIME", "DateTime64", ["NULL"], ["\\N"], True), ("TIME", "String", ["NULL"], ["\\N"], True), ("TIME(6)", "String", ["NULL"], ["\\N"], True), ], @@ -141,16 +135,19 @@ def double(self, mysql_type, ch_type, values, ch_values, nullable): ) def date_time(self, mysql_type, ch_type, values, ch_values, nullable): """Check replication of MySQl 'DATE' and 'TIME' data type.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - ) + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + check_datatype_replication( + mysql_type=mysql_type, + ch_type=ch_type, + values=values, + ch_values=ch_values, + nullable=nullable, + clickhouse_table=clickhouse_table, + ) -@TestOutline(Scenario) +@TestOutline(Feature) # @Repeat(3) @Examples( "mysql_type ch_type values ch_values nullable", @@ -170,13 +167,6 @@ def date_time(self, mysql_type, ch_type, values, ch_values, nullable): ["-9223372036854775808", "0", "9223372036854775807"], False, ), - ( - "BIGINT UNSIGNED", - "UInt64", - ["0", "18446744073709551615"], - ["0", "18446744073709551615"], - False, - ), ("TINYINT", "Int8", ["-128", "127"], ["-128", "127"], False), ("TINYINT UNSIGNED", "UInt8", ["0", "255"], ["0", "255"], False), ( @@ -212,16 +202,46 @@ def date_time(self, mysql_type, ch_type, values, ch_values, nullable): ) def integer_types(self, mysql_type, ch_type, values, ch_values, nullable): """Check replication of MySQl 'INT' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - ) + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + check_datatype_replication( + mysql_type=mysql_type, + ch_type=ch_type, + values=values, + ch_values=ch_values, + nullable=nullable, + clickhouse_table=clickhouse_table, + ) + + +@TestOutline(Feature) +@Examples( + "mysql_type ch_type values ch_values nullable", + [ + ( + "BIGINT UNSIGNED", + "UInt64", + ["0", "18446744073709551615"], + ["0", "18446744073709551615"], + False, + ), + ], +) +def bigint(self, mysql_type, ch_type, values, ch_values, nullable): + """Check replication of MySQl 'BIGINT UNSIGNED' data type.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + check_datatype_replication( + mysql_type=mysql_type, + ch_type=ch_type, + values=values, + ch_values=ch_values, + nullable=nullable, + clickhouse_table=clickhouse_table, + ) -@TestOutline(Scenario) +@TestOutline(Feature) @Examples( "mysql_type ch_type values ch_values nullable", [ @@ -238,16 +258,19 @@ def integer_types(self, mysql_type, ch_type, values, ch_values, nullable): ) def string(self, mysql_type, ch_type, values, ch_values, nullable): """Check replication of MySQl 'STRING' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - ) + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + check_datatype_replication( + mysql_type=mysql_type, + ch_type=ch_type, + values=values, + ch_values=ch_values, + nullable=nullable, + clickhouse_table=clickhouse_table, + ) -@TestOutline(Scenario) +@TestOutline(Feature) @Examples( "mysql_type ch_type values ch_values nullable", [ @@ -270,17 +293,20 @@ def string(self, mysql_type, ch_type, values, ch_values, nullable): ) def blob(self, mysql_type, ch_type, values, ch_values, nullable): """Check replication of MySQl 'BLOB' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - hex_type=True, - ) + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + check_datatype_replication( + mysql_type=mysql_type, + ch_type=ch_type, + values=values, + ch_values=ch_values, + nullable=nullable, + clickhouse_table=clickhouse_table, + hex_type=True, + ) -@TestOutline(Scenario) +@TestOutline(Feature) @Examples( "mysql_type ch_type values ch_values nullable", [ @@ -295,17 +321,20 @@ def blob(self, mysql_type, ch_type, values, ch_values, nullable): ) def binary(self, mysql_type, ch_type, values, ch_values, nullable): """Check replication of MySQl 'BINARY' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - hex_type=True, - ) + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + check_datatype_replication( + mysql_type=mysql_type, + ch_type=ch_type, + values=values, + ch_values=ch_values, + nullable=nullable, + clickhouse_table=clickhouse_table, + hex_type=True, + ) -@TestOutline(Scenario) +@TestOutline(Feature) @Examples( "mysql_type ch_type values ch_values nullable", [ @@ -313,28 +342,66 @@ def binary(self, mysql_type, ch_type, values, ch_values, nullable): ("ENUM('hello','world')", "String", ["NULL"], ["\\N"], True), ], ) -@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_EnumToString("1.0")) +@Requirements( + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_EnumToString("1.0") +) def enum(self, mysql_type, ch_type, values, ch_values, nullable): """Check replication of MySQl 'ENUM' data types.""" - check_datatype_replication( - mysql_type=mysql_type, - ch_type=ch_type, - values=values, - ch_values=ch_values, - nullable=nullable, - ) + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + check_datatype_replication( + mysql_type=mysql_type, + ch_type=ch_type, + values=values, + ch_values=ch_values, + nullable=nullable, + clickhouse_table=clickhouse_table, + ) + + +@TestOutline(Feature) +@Examples( + "mysql_type ch_type values ch_values nullable", + [ + ( + "JSON", + "String", + ['\'{\\"key1\\": \\"value1\\", \\"key2\\": \\"value2\\"}\''], + ['{""key1"": ""value1"", ""key2"": ""value2""}'], + False, + ), + ("JSON", "String", ["NULL"], ["\\N"], True), + ], +) +@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_JSON("1.0")) +def json(self, mysql_type, ch_type, values, ch_values, nullable): + """Check replication of MySQl 'JSON' data types.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + check_datatype_replication( + mysql_type=mysql_type, + ch_type=ch_type, + values=values, + ch_values=ch_values, + nullable=nullable, + clickhouse_table=clickhouse_table, + ) -@TestFeature +@TestModule @Requirements( RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_DataTypes_Nullable("1.0") ) -@Name("data types") -def feature(self): +@Name("types") +def module(self): """Verify correct replication of all supported MySQL data types.""" with Given("I enable debezium and sink connectors after kafka starts up"): init_debezium_connector() - for scenario in loads(current_module(), Scenario): - scenario() + with Pool(1) as executor: + try: + for feature in loads(current_module(), Feature): + Feature(test=feature, parallel=True, executor=executor)() + finally: + join() diff --git a/tests/integration/tests/update.py b/tests/integration/tests/update.py new file mode 100644 index 000000000..5f6aa822d --- /dev/null +++ b/tests/integration/tests/update.py @@ -0,0 +1,143 @@ +from integration.tests.steps.sql import * +from integration.tests.steps.statements import * +from integration.tests.steps.service_settings_steps import * + + +@TestOutline +def update( + self, mysql_columns, clickhouse_columns, clickhouse_table, primary_key, engine +): + """Check `UPDATE` query replicating from MySQl table to CH with different primary keys.""" + + table_name = f"update_{getuid()}" + + mysql = self.context.cluster.node("mysql-master") + + init_sink_connector( + auto_create_tables=clickhouse_table[0], topics=f"SERVER5432.test.{table_name}" + ) + + with Given(f"I create MySql to CH replicated table", description=table_name): + create_mysql_to_clickhouse_replicated_table( + name=table_name, + mysql_columns=mysql_columns, + clickhouse_columns=clickhouse_columns, + clickhouse_table=clickhouse_table, + primary_key=primary_key, + engine=engine, + ) + + with When(f"I insert data in MySql table"): + mysql.query(f"INSERT INTO {table_name} values (1,2,'a','b'), (2,3,'a','b');") + with Then(f"I update data in MySql table"): + mysql.query(f"UPDATE {table_name} SET k=k+5 WHERE id=1;") + + with And("I check that ClickHouse has updated data as MySQL"): + complex_check_creation_and_select( + table_name=table_name, + manual_output='1,7,"a","b"', + clickhouse_table=clickhouse_table, + statement="id,k,c,pad", + with_final=True, + ) + + +@TestFeature +def no_primary_key(self): + """Check for `UPDATE` with no primary key without table engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + update( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key=None, + engine=False, + ) + + +@TestFeature +def no_primary_key_innodb(self): + """Check for `UPDATE` with no primary key with table engine InnoDB.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + update( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key=None, + engine=True, + ) + + +@TestFeature +def simple_primary_key(self): + """Check for `UPDATE` with simple primary key without table engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + update( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id", + engine=False, + ) + + +@TestFeature +def simple_primary_key_innodb(self): + """Check for `UPDATE` with simple primary key with table engine InnoDB.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + update( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id", + engine=True, + ) + + +@TestFeature +def complex_primary_key(self): + """Check for `UPDATE` with complex primary key without table engine.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + update( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id,k", + engine=False, + ) + + +@TestFeature +def complex_primary_key_innodb(self): + """Check for `UPDATE` with complex primary key with table engine InnoDB.""" + for clickhouse_table in available_clickhouse_tables: + with Example({clickhouse_table}, flags=TE): + update( + clickhouse_table=clickhouse_table, + mysql_columns=" k INT,c CHAR, pad CHAR", + clickhouse_columns=" k Int32,c String, pad String", + primary_key="id,k", + engine=True, + ) + + +@TestModule +@Requirements(RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_Queries_Updates("1.0")) +@Name("update") +def module(self): + """MySql to ClickHouse replication update tests to test `UPDATE` queries.""" + + with Given("I enable debezium connector after kafka starts up"): + init_debezium_connector() + + with Pool(1) as executor: + try: + for feature in loads(current_module(), Feature): + Feature(test=feature, parallel=True, executor=executor)() + finally: + join() diff --git a/tests/integration/tests/virtual_columns.py b/tests/integration/tests/virtual_columns.py new file mode 100644 index 000000000..ece24f2be --- /dev/null +++ b/tests/integration/tests/virtual_columns.py @@ -0,0 +1,97 @@ +from integration.tests.steps.sql import * +from integration.tests.steps.statements import * +from integration.tests.steps.service_settings_steps import * + + +@TestOutline +@Requirements( + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplacingMergeTree_VirtualColumnNames( + "1.0" + ) +) +def virtual_column_names( + self, + clickhouse_table, + version_column="_version", + clickhouse_columns=None, + mysql_columns=" MyData DATETIME", +): + """Check correctness of virtual column names.""" + + clickhouse = self.context.cluster.node("clickhouse") + mysql = self.context.cluster.node("mysql-master") + + table_name = f"virtual_columns_{getuid()}" + + init_sink_connector( + auto_create_tables=clickhouse_table[0], topics=f"SERVER5432.test.{table_name}" + ) + + with Given(f"I create MySQL table {table_name})"): + create_mysql_to_clickhouse_replicated_table( + version_column=version_column, + name=table_name, + clickhouse_columns=clickhouse_columns, + mysql_columns=mysql_columns, + clickhouse_table=clickhouse_table, + ) + + with When(f"I insert data in MySql table {table_name}"): + mysql.query(f"INSERT INTO {table_name} VALUES (1, '2018-09-08 17:51:05.777')") + + with Then(f"I make check that ClickHouse table virtual column names are correct"): + retry(clickhouse.query, timeout=50, delay=1)( + f"SHOW CREATE TABLE test.{table_name}", + message=f"`_sign` Int8,\\n `{version_column}` UInt64\\n", + ) + + with And(f"I check that data is replicated"): + complex_check_creation_and_select( + table_name=table_name, + clickhouse_table=clickhouse_table, + statement="count(*)", + with_final=True, + ) + + +@TestFeature +def virtual_column_names_default(self): + """Check correctness of default virtual column names.""" + for clickhouse_table in available_clickhouse_tables: + if clickhouse_table[0] == "auto": + with Example({clickhouse_table}, flags=TE): + virtual_column_names(clickhouse_table=clickhouse_table) + + +@TestFeature +@Requirements( + RQ_SRS_030_ClickHouse_MySQLToClickHouseReplication_MySQLStorageEngines_ReplicatedReplacingMergeTree_DifferentVersionColumnNames( + "1.0" + ) +) +def virtual_column_names_replicated_random(self): + """Check replication with some random version column name.""" + for clickhouse_table in available_clickhouse_tables: + if clickhouse_table[1].startswith("Replicated"): + with Example({clickhouse_table}, flags=TE): + virtual_column_names( + clickhouse_table=clickhouse_table, + clickhouse_columns=" MyData String", + version_column="some_version_column", + ) + + +@TestModule +@Name("virtual columns") +def module(self): + """Section to check behavior of virtual columns.""" + + with Given("I enable debezium and sink connectors after kafka starts up"): + init_debezium_connector() + + with Pool(1) as executor: + try: + for feature in loads(current_module(), Feature): + Feature(test=feature, parallel=True, executor=executor)() + finally: + join()