DBZ-1084 Removing SQL Server Connector as it's in the main repo now

This commit is contained in:
Gunnar Morling 2019-01-23 17:54:28 +01:00 committed by Jiri Pechanec
parent 09e9dcad37
commit 7f77f6bdf0
34 changed files with 0 additions and 5266 deletions

View File

@ -1,260 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-incubator-parent</artifactId>
<version>0.9.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-connector-sqlserver</artifactId>
<name>Debezium Connector for MS SQL Server</name>
<packaging>jar</packaging>
<properties>
<!--
Specify the properties that will be used for setting up the integration tests' Docker container.
Note that the `dockerhost.ip` property is computed from the IP address of DOCKER_HOST, which will
work on all platforms. We'll set some of these as system properties during integration testing.
-->
<sqlserver.port>1433</sqlserver.port>
<sqlserver.user>sa</sqlserver.user>
<sqlserver.password>Password!</sqlserver.password>
<sqlserver.dbname>testDB</sqlserver.dbname>
<docker.filter>microsoft/mssql-server-linux:2017-CU9-GDR2</docker.filter>
<docker.skip>false</docker.skip>
<docker.showLogs>true</docker.showLogs>
</properties>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>connect-api</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-embedded</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-embedded</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</dependency>
<dependency>
<groupId>org.easytesting</groupId>
<artifactId>fest-assert</artifactId>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.fabric8</groupId>
<artifactId>docker-maven-plugin</artifactId>
<configuration>
<watchInterval>500</watchInterval>
<logDate>default</logDate>
<verbose>true</verbose>
<images>

</images>
</configuration>
<!--
Connect this plugin to the maven lifecycle around the integration-test phase:
start the container in pre-integration-test and stop it in post-integration-test.
-->
<executions>
<execution>
<id>start</id>
<phase>pre-integration-test</phase>
<goals>
<goal>start</goal>
</goals>
</execution>
<execution>
<id>stop</id>
<phase>post-integration-test</phase>
<goals>
<goal>stop</goal>
</goals>
</execution>
</executions>
</plugin>
<!--
Unlike surefire, the failsafe plugin ensures 'post-integration-test' phase always runs, even
when there are failed integration tests. We rely upon this to always shut down the Docker container
after the integration tests (defined as '*IT.java') are run.
-->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemPropertyVariables>
<!-- Make these available to the tests via system properties -->
<database.hostname>${docker.host.address}</database.hostname>
<database.port>${sqlserver.port}</database.port>
<database.user>${sqlserver.user}</database.user>
<database.password>${sqlserver.password}</database.password>
<skipLongRunningTests>${skipLongRunningTests}</skipLongRunningTests>
</systemPropertyVariables>
</configuration>
</plugin>
</plugins>
<resources>
<!-- Apply the properties set in the POM to the resource files -->
<resource>
<filtering>true</filtering>
<directory>src/main/resources</directory>
<includes>
<include>*</include>
<include>**/*</include>
</includes>
</resource>
</resources>
<testResources>
<testResource>
<directory>src/test/resources</directory>
<filtering>true</filtering>
<includes>
<include>*</include>
<include>**/*</include>
</includes>
</testResource>
</testResources>
</build>
<!--
Define several useful profiles
-->
<profiles>
<profile>
<id>assembly</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-assembly-descriptors</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>default</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<finalName>${project.artifactId}-${project.version}</finalName>
<attach>true</attach> <!-- we want attach & deploy these to Maven -->
<descriptorRefs>
<descriptorRef>connector-distribution</descriptorRef>
</descriptorRefs>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Do not perform any Docker-related functionality
To use, specify "-DskipITs" on the Maven command line.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<profile>
<id>skip-integration-tests</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>skipITs</name>
</property>
</activation>
<properties>
<docker.skip>true</docker.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,97 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import io.debezium.relational.TableId;
/**
* A logical representation of change table containing changes for a given source table.
* There is usually one change table for each source table. When the schema of the source table
* is changed then two change tables could be present.
*
* @author Jiri Pechanec
*
*/
public class ChangeTable {
private static final String CDC_SCHEMA = "cdc";
/**
* The logical name of the change capture process
*/
private final String captureInstance;
/**
* The table from which the changes are captured
*/
private final TableId sourceTableId;
/**
* The table that contains the changes for the source table
*/
private final TableId changeTableId;
/**
* A LSN from which the data in the change table are relevant
*/
private final Lsn startLsn;
/**
* A LSN to which the data in the change table are relevant
*/
private Lsn stopLsn;
/**
* Numeric identifier of change table in SQL Server schema
*/
private final int changeTableObjectId;
public ChangeTable(TableId sourceTableId, String captureInstance, int changeTableObjectId, Lsn startLsn, Lsn stopLsn) {
super();
this.sourceTableId = sourceTableId;
this.captureInstance = captureInstance;
this.changeTableObjectId = changeTableObjectId;
this.startLsn = startLsn;
this.stopLsn = stopLsn;
this.changeTableId = sourceTableId != null ? new TableId(sourceTableId.catalog(), CDC_SCHEMA, captureInstance + "_CT") : null;
}
public ChangeTable(String captureInstance, int changeTableObjectId, Lsn startLsn, Lsn stopLsn) {
this(null, captureInstance, changeTableObjectId, startLsn, stopLsn);
}
public String getCaptureInstance() {
return captureInstance;
}
public Lsn getStartLsn() {
return startLsn;
}
public Lsn getStopLsn() {
return stopLsn;
}
public void setStopLsn(Lsn stopLsn) {
this.stopLsn = stopLsn;
}
public TableId getSourceTableId() {
return sourceTableId;
}
public TableId getChangeTableId() {
return changeTableId;
}
public int getChangeTableObjectId() {
return changeTableObjectId;
}
@Override
public String toString() {
return "ChangeTable [captureInstance=" + captureInstance + ", sourceTableId=" + sourceTableId
+ ", changeTableId=" + changeTableId + ", startLsn=" + startLsn + ", changeTableObjectId="
+ changeTableObjectId + ", stopLsn=" + stopLsn + "]";
}
}

View File

@ -1,162 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.util.Arrays;
import io.debezium.util.Strings;
/**
* A logical representation of SQL Server LSN (log sequence number) position. When LSN is not available
* it is replaced with {@link Lsn.NULL} constant.
*
* @author Jiri Pechanec
*
*/
public class Lsn implements Comparable<Lsn> {
private static final String NULL_STRING = "NULL";
public static final Lsn NULL = new Lsn(null);
private final byte[] binary;
private int[] unsignedBinary;
private String string;
private Lsn(byte[] binary) {
this.binary = binary;
}
/**
* @return binary representation of the stored LSN
*/
public byte[] getBinary() {
return binary;
}
/**
* @return true if this is a real LSN or false it it is {@code NULL}
*/
public boolean isAvailable() {
return binary != null;
}
private int[] getUnsignedBinary() {
if (unsignedBinary != null || binary == null) {
return unsignedBinary;
}
unsignedBinary = new int[binary.length];
for (int i = 0; i < binary.length; i++) {
unsignedBinary[i] = Byte.toUnsignedInt(binary[i]);
}
return unsignedBinary;
}
/**
* @return textual representation of the stored LSN
*/
public String toString() {
if (string != null) {
return string;
}
final StringBuilder sb = new StringBuilder();
if (binary == null) {
return NULL_STRING;
}
final int[] unsigned = getUnsignedBinary();
for (int i = 0; i < unsigned.length; i++) {
final String byteStr = Integer.toHexString(unsigned[i]);
if (byteStr.length() == 1) {
sb.append('0');
}
sb.append(byteStr);
if (i == 3 || i == 7) {
sb.append(':');
}
}
string = sb.toString();
return string;
}
/**
* @param lsnString - textual representation of Lsn
* @return LSN converted from its textual representation
*/
public static Lsn valueOf(String lsnString) {
return (lsnString == null || NULL_STRING.equals(lsnString)) ? NULL : new Lsn(Strings.hexStringToByteArray(lsnString.replace(":", "")));
}
/**
* @param lsnBinary - binary representation of Lsn
* @return LSN converted from its binary representation
*/
public static Lsn valueOf(byte[] lsnBinary) {
return (lsnBinary == null ) ? NULL : new Lsn(lsnBinary);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + Arrays.hashCode(binary);
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Lsn other = (Lsn) obj;
if (!Arrays.equals(binary, other.binary))
return false;
return true;
}
/**
* Enables ordering of LSNs. The {@code NULL} LSN is always the smallest one.
*/
@Override
public int compareTo(Lsn o) {
if (this == o) {
return 0;
}
if (!this.isAvailable()) {
if (!o.isAvailable()) {
return 0;
}
return -1;
}
if (!o.isAvailable()) {
return 1;
}
final int[] thisU = getUnsignedBinary();
final int[] thatU = o.getUnsignedBinary();
for (int i = 0; i < thisU.length; i++) {
final int diff = thisU[i] - thatU[i];
if (diff != 0) {
return diff;
}
}
return 0;
}
/**
* Verifies whether the LSN falls into a LSN interval
*
* @param from start of the interval (included)
* @param to end of the interval (excluded)
*
* @return true if the LSN falls into the interval
*/
public boolean isBetween(Lsn from, Lsn to) {
return this.compareTo(from) >= 0 && this.compareTo(to) < 0;
}
}

View File

@ -1,28 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.util.Properties;
import io.debezium.util.IoUtil;
/**
* Information about this module.
*
* @author Jiri Pechanec
*/
public final class Module {
private static final Properties INFO = IoUtil.loadProperties(Module.class, "io/debezium/connector/sqlserver/build.version");
public static String version() {
return INFO.getProperty("version");
}
public static String name() {
return "sqlserver";
}
}

View File

@ -1,42 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import io.debezium.data.Envelope.Operation;
import io.debezium.pipeline.spi.OffsetContext;
import io.debezium.relational.RelationalChangeRecordEmitter;
import io.debezium.util.Clock;
/**
* Emits change data based on a single row read via JDBC.
*
* @author Jiri Pechanec
*/
public class SnapshotChangeRecordEmitter extends RelationalChangeRecordEmitter {
private final Object[] row;
public SnapshotChangeRecordEmitter(OffsetContext offset, Object[] row, Clock clock) {
super(offset, clock);
this.row = row;
}
@Override
protected Operation getOperation() {
return Operation.READ;
}
@Override
protected Object[] getOldColumnValues() {
throw new UnsupportedOperationException("Can't get old row values for READ record");
}
@Override
protected Object[] getNewColumnValues() {
return row;
}
}

View File

@ -1,132 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.time.Instant;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
import io.debezium.annotation.NotThreadSafe;
import io.debezium.connector.AbstractSourceInfo;
/**
* Coordinates from the database log to establish the relation between the change streamed and the source log position.
* Maps to {@code source} field in {@code Envelope}.
*
* @author Jiri Pechanec
*
*/
@NotThreadSafe
public class SourceInfo extends AbstractSourceInfo {
public static final String SERVER_NAME_KEY = "name";
public static final String LOG_TIMESTAMP_KEY = "ts_ms";
public static final String CHANGE_LSN_KEY = "change_lsn";
public static final String COMMIT_LSN_KEY = "commit_lsn";
public static final String SNAPSHOT_KEY = "snapshot";
public static final Schema SCHEMA = schemaBuilder()
.name("io.debezium.connector.sqlserver.Source")
.field(SERVER_NAME_KEY, Schema.STRING_SCHEMA)
.field(LOG_TIMESTAMP_KEY, Schema.OPTIONAL_INT64_SCHEMA)
.field(CHANGE_LSN_KEY, Schema.OPTIONAL_STRING_SCHEMA)
.field(COMMIT_LSN_KEY, Schema.OPTIONAL_STRING_SCHEMA)
.field(SNAPSHOT_KEY, Schema.OPTIONAL_BOOLEAN_SCHEMA)
.build();
private final String serverName;
private Lsn changeLsn;
private Lsn commitLsn;
private boolean snapshot;
private Instant sourceTime;
protected SourceInfo(String serverName) {
super(Module.version());
this.serverName = serverName;
}
/**
* @param lsn - LSN of the change in the database log
*/
public void setChangeLsn(Lsn lsn) {
changeLsn = lsn;
}
public Lsn getChangeLsn() {
return changeLsn;
}
public Lsn getCommitLsn() {
return commitLsn;
}
/**
* @param commitLsn - LSN of the {@code COMMIT} of the transaction whose part the change is
*/
public void setCommitLsn(Lsn commitLsn) {
this.commitLsn = commitLsn;
}
/**
* @param instant a time at which the transaction commit was executed
*/
public void setSourceTime(Instant instant) {
sourceTime = instant;
}
public boolean isSnapshot() {
return snapshot;
}
/**
* @param snapshot - true if the source of even is snapshot phase, not the database log
*/
public void setSnapshot(boolean snapshot) {
this.snapshot = snapshot;
}
@Override
protected Schema schema() {
return SCHEMA;
}
@Override
protected String connector() {
return Module.name();
}
/**
* @return the coordinates encoded as a {@code Struct}
*/
@Override
public Struct struct() {
final Struct ret = super.struct()
.put(SERVER_NAME_KEY, serverName)
.put(LOG_TIMESTAMP_KEY, sourceTime == null ? null : sourceTime.toEpochMilli())
.put(SNAPSHOT_KEY, snapshot);
if (changeLsn.isAvailable()) {
ret.put(CHANGE_LSN_KEY, changeLsn.toString());
}
if (commitLsn != null) {
ret.put(COMMIT_LSN_KEY, commitLsn.toString());
}
return ret;
}
@Override
public String toString() {
return "SourceInfo [" +
"serverName=" + serverName +
", changeLsn=" + changeLsn +
", commitLsn=" + commitLsn +
", snapshot=" + snapshot +
", sourceTime=" + sourceTime +
"]";
}
}

View File

@ -1,54 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import io.debezium.pipeline.ErrorHandler;
import io.debezium.pipeline.EventDispatcher;
import io.debezium.pipeline.source.spi.ChangeEventSourceFactory;
import io.debezium.pipeline.source.spi.SnapshotChangeEventSource;
import io.debezium.pipeline.source.spi.SnapshotProgressListener;
import io.debezium.pipeline.source.spi.StreamingChangeEventSource;
import io.debezium.pipeline.spi.OffsetContext;
import io.debezium.relational.TableId;
import io.debezium.util.Clock;
public class SqlServerChangeEventSourceFactory implements ChangeEventSourceFactory {
private final SqlServerConnectorConfig configuration;
private final SqlServerConnection jdbcConnection;
private final ErrorHandler errorHandler;
private final EventDispatcher<TableId> dispatcher;
private final Clock clock;
private final SqlServerDatabaseSchema schema;
public SqlServerChangeEventSourceFactory(SqlServerConnectorConfig configuration, SqlServerConnection jdbcConnection,
ErrorHandler errorHandler, EventDispatcher<TableId> dispatcher, Clock clock, SqlServerDatabaseSchema schema) {
this.configuration = configuration;
this.jdbcConnection = jdbcConnection;
this.errorHandler = errorHandler;
this.dispatcher = dispatcher;
this.clock = clock;
this.schema = schema;
}
@Override
public SnapshotChangeEventSource getSnapshotChangeEventSource(OffsetContext offsetContext, SnapshotProgressListener snapshotProgressListener) {
return new SqlServerSnapshotChangeEventSource(configuration, (SqlServerOffsetContext) offsetContext, jdbcConnection, schema, dispatcher, clock, snapshotProgressListener);
}
@Override
public StreamingChangeEventSource getStreamingChangeEventSource(OffsetContext offsetContext) {
return new SqlServerStreamingChangeEventSource(
configuration,
(SqlServerOffsetContext) offsetContext,
jdbcConnection,
dispatcher,
errorHandler,
clock,
schema
);
}
}

View File

@ -1,75 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import io.debezium.data.Envelope.Operation;
import io.debezium.pipeline.spi.OffsetContext;
import io.debezium.relational.RelationalChangeRecordEmitter;
import io.debezium.relational.Table;
import io.debezium.util.Clock;
/**
* Emits change data based on a single (or two in case of updates) CDC data row(s).
*
* @author Jiri Pechanec
*/
public class SqlServerChangeRecordEmitter extends RelationalChangeRecordEmitter {
public static final int OP_DELETE = 1;
public static final int OP_INSERT = 2;
public static final int OP_UPDATE_BEFORE = 3;
public static final int OP_UPDATE_AFTER = 4;
private final int operation;
private final Object[] data;
private final Object[] dataNext;
public SqlServerChangeRecordEmitter(OffsetContext offset, int operation, Object[] data, Object[] dataNext, Table table, Clock clock) {
super(offset, clock);
this.operation = operation;
this.data = data;
this.dataNext = dataNext;
}
@Override
protected Operation getOperation() {
if (operation == OP_DELETE) {
return Operation.DELETE;
}
else if (operation == OP_INSERT) {
return Operation.CREATE;
}
else if (operation == OP_UPDATE_BEFORE) {
return Operation.UPDATE;
}
throw new IllegalArgumentException("Received event of unexpected command type: " + operation);
}
@Override
protected Object[] getOldColumnValues() {
switch (getOperation()) {
case CREATE:
case READ:
return null;
default:
return data;
}
}
@Override
protected Object[] getNewColumnValues() {
switch (getOperation()) {
case CREATE:
case READ:
return data;
case UPDATE:
return dataNext;
default:
return null;
}
}
}

View File

@ -1,367 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.microsoft.sqlserver.jdbc.SQLServerDriver;
import io.debezium.config.Configuration;
import io.debezium.jdbc.JdbcConfiguration;
import io.debezium.jdbc.JdbcConnection;
import io.debezium.relational.Column;
import io.debezium.relational.ColumnEditor;
import io.debezium.relational.Table;
import io.debezium.relational.TableId;
import io.debezium.util.BoundedConcurrentHashMap;
/**
* {@link JdbcConnection} extension to be used with Microsoft SQL Server
*
* @author Horia Chiorean (hchiorea@redhat.com), Jiri Pechanec
*
*/
public class SqlServerConnection extends JdbcConnection {
private static final String GET_DATABASE_NAME = "SELECT db_name()";
private static Logger LOGGER = LoggerFactory.getLogger(SqlServerConnection.class);
private static final String STATEMENTS_PLACEHOLDER = "#";
private static final String GET_MAX_LSN = "SELECT sys.fn_cdc_get_max_lsn()";
private static final String LOCK_TABLE = "SELECT * FROM # WITH (TABLOCKX)";
private static final String LSN_TO_TIMESTAMP = "SELECT sys.fn_cdc_map_lsn_to_time(?)";
private static final String INCREMENT_LSN = "SELECT sys.fn_cdc_increment_lsn(?)";
private static final String GET_ALL_CHANGES_FOR_TABLE = "SELECT * FROM cdc.fn_cdc_get_all_changes_#(ISNULL(?,sys.fn_cdc_get_min_lsn('#')), ?, N'all update old')";
private static final String GET_LIST_OF_CDC_ENABLED_TABLES = "EXEC sys.sp_cdc_help_change_data_capture";
private static final String GET_LIST_OF_NEW_CDC_ENABLED_TABLES = "SELECT * FROM cdc.change_tables WHERE start_lsn BETWEEN ? AND ?";
private static final String GET_LIST_OF_KEY_COLUMNS = "SELECT * FROM cdc.index_columns WHERE object_id=?";
private static final int CHANGE_TABLE_DATA_COLUMN_OFFSET = 5;
private static final String URL_PATTERN = "jdbc:sqlserver://${" + JdbcConfiguration.HOSTNAME + "}:${" + JdbcConfiguration.PORT + "};databaseName=${" + JdbcConfiguration.DATABASE + "}";
private static final ConnectionFactory FACTORY = JdbcConnection.patternBasedFactory(URL_PATTERN,
SQLServerDriver.class.getName(),
SqlServerConnection.class.getClassLoader());
/**
* actual name of the database, which could differ in casing from the database name given in the connector config.
*/
private final String realDatabaseName;
private static interface ResultSetExtractor<T> {
T apply(ResultSet rs) throws SQLException;
}
private final BoundedConcurrentHashMap<Lsn, Instant> lsnToInstantCache;
/**
* Creates a new connection using the supplied configuration.
*
* @param config
* {@link Configuration} instance, may not be null.
*/
public SqlServerConnection(Configuration config) {
super(config, FACTORY);
lsnToInstantCache = new BoundedConcurrentHashMap<>(100);
realDatabaseName = retrieveRealDatabaseName();
}
/**
* @return the current largest log sequence number
*/
public Lsn getMaxLsn() throws SQLException {
return queryAndMap(GET_MAX_LSN, singleResultMapper(rs -> {
final Lsn ret = Lsn.valueOf(rs.getBytes(1));
LOGGER.trace("Current maximum lsn is {}", ret);
return ret;
}, "Maximum LSN query must return exactly one value"));
}
/**
* Provides all changes recorded by the SQL Server CDC capture process for a given table.
*
* @param tableId - the requested table changes
* @param fromLsn - closed lower bound of interval of changes to be provided
* @param toLsn - closed upper bound of interval of changes to be provided
* @param consumer - the change processor
* @throws SQLException
*/
public void getChangesForTable(TableId tableId, Lsn fromLsn, Lsn toLsn, ResultSetConsumer consumer) throws SQLException {
final String query = GET_ALL_CHANGES_FOR_TABLE.replace(STATEMENTS_PLACEHOLDER, cdcNameForTable(tableId));
prepareQuery(query, statement -> {
statement.setBytes(1, fromLsn.getBinary());
statement.setBytes(2, toLsn.getBinary());
}, consumer);
}
/**
* Provides all changes recorder by the SQL Server CDC capture process for a set of tables.
*
* @param changeTables - the requested tables to obtain changes for
* @param intervalFromLsn - closed lower bound of interval of changes to be provided
* @param intervalToLsn - closed upper bound of interval of changes to be provided
* @param consumer - the change processor
* @throws SQLException
*/
public void getChangesForTables(ChangeTable[] changeTables, Lsn intervalFromLsn, Lsn intervalToLsn, BlockingMultiResultSetConsumer consumer) throws SQLException, InterruptedException {
final String[] queries = new String[changeTables.length];
final StatementPreparer[] preparers = new StatementPreparer[changeTables.length];
int idx = 0;
for (ChangeTable changeTable: changeTables) {
final String query = GET_ALL_CHANGES_FOR_TABLE.replace(STATEMENTS_PLACEHOLDER, changeTable.getCaptureInstance());
queries[idx] = query;
// If the table was added in the middle of queried buffer we need
// to adjust from to the first LSN available
final Lsn fromLsn = changeTable.getStartLsn().compareTo(intervalFromLsn) > 0 ? changeTable.getStartLsn() : intervalFromLsn;
LOGGER.trace("Getting changes for table {} in range[{}, {}]", changeTable, fromLsn, intervalToLsn);
preparers[idx] = statement -> {
statement.setBytes(1, fromLsn.getBinary());
statement.setBytes(2, intervalToLsn.getBinary());
};
idx++;
}
prepareQuery(queries, preparers, consumer);
}
/**
* Obtain the next available position in the database log.
*
* @param lsn - LSN of the current position
* @return LSN of the next position in the database
* @throws SQLException
*/
public Lsn incrementLsn(Lsn lsn) throws SQLException {
final String query = INCREMENT_LSN;
return prepareQueryAndMap(query, statement -> {
statement.setBytes(1, lsn.getBinary());
}, singleResultMapper(rs -> {
final Lsn ret = Lsn.valueOf(rs.getBytes(1));
LOGGER.trace("Increasing lsn from {} to {}", lsn, ret);
return ret;
}, "Increment LSN query must return exactly one value"));
}
/**
* Map a commit LSN to a point in time when the commit happened.
*
* @param lsn - LSN of the commit
* @return time when the commit was recorded into the database log
* @throws SQLException
*/
public Instant timestampOfLsn(Lsn lsn) throws SQLException {
final String query = LSN_TO_TIMESTAMP;
if (lsn.getBinary() == null) {
return null;
}
Instant cachedInstant = lsnToInstantCache.get(lsn);
if (cachedInstant != null) {
return cachedInstant;
}
return prepareQueryAndMap(query, statement -> {
statement.setBytes(1, lsn.getBinary());
}, singleResultMapper(rs -> {
final Timestamp ts = rs.getTimestamp(1);
final Instant ret = (ts == null) ? null : ts.toInstant();
LOGGER.trace("Timestamp of lsn {} is {}", lsn, ret);
if (ret != null) {
lsnToInstantCache.put(lsn, ret);
}
return ret;
}, "LSN to timestamp query must return exactly one value"));
}
/**
* Creates an exclusive lock for a given table.
*
* @param tableId to be locked
* @throws SQLException
*/
public void lockTable(TableId tableId) throws SQLException {
final String lockTableStmt = LOCK_TABLE.replace(STATEMENTS_PLACEHOLDER, tableId.table());
execute(lockTableStmt);
}
private String cdcNameForTable(TableId tableId) {
return tableId.schema() + '_' + tableId.table();
}
private <T> ResultSetMapper<T> singleResultMapper(ResultSetExtractor<T> extractor, String error) throws SQLException {
return (rs) -> {
if (rs.next()) {
final T ret = extractor.apply(rs);
if (!rs.next()) {
return ret;
}
}
throw new IllegalStateException(error);
};
}
public static class CdcEnabledTable {
private final String tableId;
private final String captureName;
private final Lsn fromLsn;
private CdcEnabledTable(String tableId, String captureName, Lsn fromLsn) {
this.tableId = tableId;
this.captureName = captureName;
this.fromLsn = fromLsn;
}
public String getTableId() {
return tableId;
}
public String getCaptureName() {
return captureName;
}
public Lsn getFromLsn() {
return fromLsn;
}
}
public Set<ChangeTable> listOfChangeTables() throws SQLException {
final String query = GET_LIST_OF_CDC_ENABLED_TABLES;
return queryAndMap(query, rs -> {
final Set<ChangeTable> changeTables = new HashSet<>();
while (rs.next()) {
changeTables.add(
new ChangeTable(
new TableId(realDatabaseName, rs.getString(1), rs.getString(2)),
rs.getString(3),
rs.getInt(4),
Lsn.valueOf(rs.getBytes(6)),
Lsn.valueOf(rs.getBytes(7))
)
);
}
return changeTables;
});
}
public Set<ChangeTable> listOfNewChangeTables(Lsn fromLsn, Lsn toLsn) throws SQLException {
final String query = GET_LIST_OF_NEW_CDC_ENABLED_TABLES;
return prepareQueryAndMap(query,
ps -> {
ps.setBytes(1, fromLsn.getBinary());
ps.setBytes(2, toLsn.getBinary());
},
rs -> {
final Set<ChangeTable> changeTables = new HashSet<>();
while (rs.next()) {
changeTables.add(new ChangeTable(
rs.getString(4),
rs.getInt(1),
Lsn.valueOf(rs.getBytes(5)),
Lsn.valueOf(rs.getBytes(6))
));
}
return changeTables;
}
);
}
public Table getTableSchemaFromTable(ChangeTable changeTable) throws SQLException {
final DatabaseMetaData metadata = connection().getMetaData();
List<Column> columns = new ArrayList<>();
try (ResultSet rs = metadata.getColumns(
realDatabaseName,
changeTable.getSourceTableId().schema(),
changeTable.getSourceTableId().table(),
null)
) {
while (rs.next()) {
readTableColumn(rs, changeTable.getSourceTableId(), null).ifPresent(ce -> columns.add(ce.create()));
}
}
final List<String> pkColumnNames = readPrimaryKeyNames(metadata, changeTable.getSourceTableId());
Collections.sort(columns);
return Table.editor()
.tableId(changeTable.getSourceTableId())
.addColumns(columns)
.setPrimaryKeyNames(pkColumnNames)
.create();
}
public Table getTableSchemaFromChangeTable(ChangeTable changeTable) throws SQLException {
final DatabaseMetaData metadata = connection().getMetaData();
final TableId changeTableId = changeTable.getChangeTableId();
List<ColumnEditor> columnEditors = new ArrayList<>();
try (ResultSet rs = metadata.getColumns(realDatabaseName, changeTableId.schema(), changeTableId.table(), null)) {
while (rs.next()) {
readTableColumn(rs, changeTableId, null).ifPresent(columnEditors::add);
}
}
// The first 5 columns and the last column of the change table are CDC metadata
final List<Column> columns = columnEditors.subList(CHANGE_TABLE_DATA_COLUMN_OFFSET, columnEditors.size() - 1).stream()
.map(c -> c.position(c.position() - CHANGE_TABLE_DATA_COLUMN_OFFSET).create())
.collect(Collectors.toList());
final List<String> pkColumnNames = new ArrayList<>();
prepareQuery(GET_LIST_OF_KEY_COLUMNS, ps -> ps.setInt(1, changeTable.getChangeTableObjectId()), rs -> {
while (rs.next()) {
pkColumnNames.add(rs.getString(2));
}
});
Collections.sort(columns);
return Table.editor()
.tableId(changeTable.getSourceTableId())
.addColumns(columns)
.setPrimaryKeyNames(pkColumnNames)
.create();
}
public synchronized void rollback() throws SQLException {
if (isConnected()) {
connection().rollback();
}
}
public String getNameOfChangeTable(String captureName) {
return captureName + "_CT";
}
public String getRealDatabaseName() {
return realDatabaseName;
}
private String retrieveRealDatabaseName() {
try {
return queryAndMap(
GET_DATABASE_NAME,
singleResultMapper(rs -> rs.getString(1), "Could not retrieve database name")
);
}
catch (SQLException e) {
throw new RuntimeException("Couldn't obtain database name", e);
}
}
}

View File

@ -1,59 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.connect.connector.Task;
import org.apache.kafka.connect.source.SourceConnector;
/**
* The main connector class used to instantiate configuration and execution classes
*
* @author Jiri Pechanec
*
*/
public class SqlServerConnector extends SourceConnector {
private Map<String, String> properties;
@Override
public String version() {
return Module.version();
}
@Override
public void start(Map<String, String> props) {
this.properties = Collections.unmodifiableMap(new HashMap<>(props));
}
@Override
public Class<? extends Task> taskClass() {
return SqlServerConnectorTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
if (maxTasks > 1) {
throw new IllegalArgumentException("Only a single connector task may be started");
}
return Collections.singletonList(properties);
}
@Override
public void stop() {
}
@Override
public ConfigDef config() {
return SqlServerConnectorConfig.configDef();
}
}

View File

@ -1,304 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.util.function.Predicate;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance;
import org.apache.kafka.common.config.ConfigDef.Type;
import org.apache.kafka.common.config.ConfigDef.Width;
import io.debezium.config.CommonConnectorConfig;
import io.debezium.config.Configuration;
import io.debezium.config.EnumeratedValue;
import io.debezium.config.Field;
import io.debezium.document.Document;
import io.debezium.function.Predicates;
import io.debezium.heartbeat.Heartbeat;
import io.debezium.jdbc.JdbcConfiguration;
import io.debezium.relational.ColumnId;
import io.debezium.relational.HistorizedRelationalDatabaseConnectorConfig;
import io.debezium.relational.RelationalDatabaseConnectorConfig;
import io.debezium.relational.TableId;
import io.debezium.relational.Tables.TableFilter;
import io.debezium.relational.history.HistoryRecordComparator;
import io.debezium.relational.history.KafkaDatabaseHistory;
/**
* The list of configuration options for SQL Server connector
*
* @author Jiri Pechanec
*/
public class SqlServerConnectorConfig extends HistorizedRelationalDatabaseConnectorConfig {
/**
* The set of predefined SnapshotMode options or aliases.
*/
public static enum SnapshotMode implements EnumeratedValue {
/**
* Perform a snapshot of data and schema upon initial startup of a connector.
*/
INITIAL("initial", true),
/**
* Perform a snapshot of the schema but no data upon initial startup of a connector.
*/
INITIAL_SCHEMA_ONLY("initial_schema_only", false);
private final String value;
private final boolean includeData;
private SnapshotMode(String value, boolean includeData) {
this.value = value;
this.includeData = includeData;
}
@Override
public String getValue() {
return value;
}
/**
* Whether this snapshotting mode should include the actual data or just the
* schema of captured tables.
*/
public boolean includeData() {
return includeData;
}
/**
* Determine if the supplied value is one of the predefined options.
*
* @param value the configuration property value; may not be null
* @return the matching option, or null if no match is found
*/
public static SnapshotMode parse(String value) {
if (value == null) {
return null;
}
value = value.trim();
for (SnapshotMode option : SnapshotMode.values()) {
if (option.getValue().equalsIgnoreCase(value)) return option;
}
return null;
}
/**
* Determine if the supplied value is one of the predefined options.
*
* @param value the configuration property value; may not be null
* @param defaultValue the default value; may be null
* @return the matching option, or null if no match is found and the non-null default is invalid
*/
public static SnapshotMode parse(String value, String defaultValue) {
SnapshotMode mode = parse(value);
if (mode == null && defaultValue != null) {
mode = parse(defaultValue);
}
return mode;
}
}
/**
* The set of predefined Snapshot Locking Mode options.
*/
public static enum SnapshotLockingMode implements EnumeratedValue {
/**
* This mode will block all reads and writes for the entire duration of the snapshot.
*
* The connector will execute {@code SELECT * FROM .. WITH (TABLOCKX)}
*/
EXCLUSIVE("exclusive"),
/**
* This mode uses SNAPSHOT isolation level. This way reads and writes are not blocked for the entire duration
* of the snapshot. Snapshot consistency is guaranteed as long as DDL statements are not executed at the time.
*/
SNAPSHOT("snapshot"),
/**
* This mode will avoid using ANY table locks during the snapshot process. This mode can only be used with SnapShotMode
* set to schema_only or schema_only_recovery.
*/
NONE("none");
private final String value;
private SnapshotLockingMode(String value) {
this.value = value;
}
@Override
public String getValue() {
return value;
}
/**
* Determine if the supplied value is one of the predefined options.
*
* @param value the configuration property value; may not be null
* @return the matching option, or null if no match is found
*/
public static SnapshotLockingMode parse(String value) {
if (value == null) return null;
value = value.trim();
for (SnapshotLockingMode option : SnapshotLockingMode.values()) {
if (option.getValue().equalsIgnoreCase(value)) return option;
}
return null;
}
/**
* Determine if the supplied value is one of the predefined options.
*
* @param value the configuration property value; may not be null
* @param defaultValue the default value; may be null
* @return the matching option, or null if no match is found and the non-null default is invalid
*/
public static SnapshotLockingMode parse(String value, String defaultValue) {
SnapshotLockingMode mode = parse(value);
if (mode == null && defaultValue != null) mode = parse(defaultValue);
return mode;
}
}
public static final Field LOGICAL_NAME = Field.create("database.server.name")
.withDisplayName("Namespace")
.withType(Type.STRING)
.withWidth(Width.MEDIUM)
.withImportance(Importance.HIGH)
.withValidation(Field::isRequired)
.withValidation(Field::isRequired, CommonConnectorConfig::validateServerNameIsDifferentFromHistoryTopicName)
.withDescription("Unique name that identifies the database server and all recorded offsets, and"
+ "that is used as a prefix for all schemas and topics. "
+ "Each distinct SQL Server installation should have a separate namespace and monitored by "
+ "at most one Debezium connector.");
public static final Field DATABASE_NAME = Field.create(DATABASE_CONFIG_PREFIX + JdbcConfiguration.DATABASE)
.withDisplayName("Database name")
.withType(Type.STRING)
.withWidth(Width.MEDIUM)
.withImportance(Importance.HIGH)
.withValidation(Field::isRequired)
.withDescription("The name of the database the connector should be monitoring. When working with a "
+ "multi-tenant set-up, must be set to the CDB name.");
public static final Field SNAPSHOT_MODE = Field.create("snapshot.mode")
.withDisplayName("Snapshot mode")
.withEnum(SnapshotMode.class, SnapshotMode.INITIAL)
.withWidth(Width.SHORT)
.withImportance(Importance.LOW)
.withDescription("The criteria for running a snapshot upon startup of the connector. "
+ "Options include: "
+ "'initial' (the default) to specify the connector should run a snapshot only when no offsets are available for the logical server name; "
+ "'initial_schema_only' to specify the connector should run a snapshot of the schema when no offsets are available for the logical server name. ");
public static final Field SNAPSHOT_LOCKING_MODE = Field.create("snapshot.locking.mode")
.withDisplayName("Snapshot locking mode")
.withEnum(SnapshotLockingMode.class, SnapshotLockingMode.NONE)
.withWidth(Width.SHORT)
.withImportance(Importance.LOW)
.withDescription("Controls how long the connector locks the montiored tables for snapshot execution. The default is '" + SnapshotLockingMode.NONE.getValue() + "', "
+ "which means that the connector does not hold any locks for all monitored tables."
+ "Using a value of '" + SnapshotLockingMode.EXCLUSIVE.getValue() + "' ensures that the connector holds the exlusive lock (and thus prevents any reads and updates) for all monitored tables.");
/**
* The set of {@link Field}s defined as part of this configuration.
*/
public static Field.Set ALL_FIELDS = Field.setOf(
LOGICAL_NAME,
DATABASE_NAME,
SNAPSHOT_MODE,
HistorizedRelationalDatabaseConnectorConfig.DATABASE_HISTORY,
RelationalDatabaseConnectorConfig.TABLE_WHITELIST,
RelationalDatabaseConnectorConfig.TABLE_BLACKLIST,
RelationalDatabaseConnectorConfig.TABLE_IGNORE_BUILTIN,
RelationalDatabaseConnectorConfig.COLUMN_BLACKLIST,
CommonConnectorConfig.POLL_INTERVAL_MS,
CommonConnectorConfig.MAX_BATCH_SIZE,
CommonConnectorConfig.MAX_QUEUE_SIZE,
CommonConnectorConfig.SNAPSHOT_DELAY_MS,
Heartbeat.HEARTBEAT_INTERVAL, Heartbeat.HEARTBEAT_TOPICS_PREFIX
);
public static ConfigDef configDef() {
ConfigDef config = new ConfigDef();
Field.group(config, "SQL Server", LOGICAL_NAME, DATABASE_NAME, SNAPSHOT_MODE);
Field.group(config, "History Storage", KafkaDatabaseHistory.BOOTSTRAP_SERVERS,
KafkaDatabaseHistory.TOPIC, KafkaDatabaseHistory.RECOVERY_POLL_ATTEMPTS,
KafkaDatabaseHistory.RECOVERY_POLL_INTERVAL_MS, HistorizedRelationalDatabaseConnectorConfig.DATABASE_HISTORY);
Field.group(config, "Events", RelationalDatabaseConnectorConfig.TABLE_WHITELIST,
RelationalDatabaseConnectorConfig.TABLE_BLACKLIST,
RelationalDatabaseConnectorConfig.COLUMN_BLACKLIST,
RelationalDatabaseConnectorConfig.TABLE_IGNORE_BUILTIN,
Heartbeat.HEARTBEAT_INTERVAL, Heartbeat.HEARTBEAT_TOPICS_PREFIX
);
Field.group(config, "Connector", CommonConnectorConfig.POLL_INTERVAL_MS, CommonConnectorConfig.MAX_BATCH_SIZE,
CommonConnectorConfig.MAX_QUEUE_SIZE, CommonConnectorConfig.SNAPSHOT_DELAY_MS);
return config;
}
private final String databaseName;
private final SnapshotMode snapshotMode;
private final SnapshotLockingMode snapshotLockingMode;
private final Predicate<ColumnId> columnFilter;
public SqlServerConnectorConfig(Configuration config) {
super(config, config.getString(LOGICAL_NAME), new SystemTablesPredicate(), x -> x.schema() + "." + x.table());
this.databaseName = config.getString(DATABASE_NAME);
this.snapshotMode = SnapshotMode.parse(config.getString(SNAPSHOT_MODE), SNAPSHOT_MODE.defaultValueAsString());
this.snapshotLockingMode = SnapshotLockingMode.parse(config.getString(SNAPSHOT_LOCKING_MODE), SNAPSHOT_LOCKING_MODE.defaultValueAsString());
this.columnFilter = Predicates.excludes(config.getString(RelationalDatabaseConnectorConfig.COLUMN_BLACKLIST),
columnId -> String.format("%s.%s.%s", columnId.schema(), columnId.table(), columnId.columnName()));
}
public String getDatabaseName() {
return databaseName;
}
public SnapshotLockingMode getSnapshotLockingMode() {
return this.snapshotLockingMode;
}
public SnapshotMode getSnapshotMode() {
return snapshotMode;
}
public Predicate<ColumnId> getColumnFilter() {
return columnFilter;
}
private static class SystemTablesPredicate implements TableFilter {
@Override
public boolean isIncluded(TableId t) {
return !(t.schema().toLowerCase().equals("cdc") ||
t.schema().toLowerCase().equals("sys") ||
t.table().toLowerCase().equals("systranschemas"));
}
}
@Override
protected HistoryRecordComparator getHistoryRecordComparator() {
return new HistoryRecordComparator() {
@Override
protected boolean isPositionAtOrBefore(Document recorded, Document desired) {
return Lsn.valueOf(recorded.getString(SourceInfo.CHANGE_LSN_KEY))
.compareTo(Lsn.valueOf(desired.getString(SourceInfo.CHANGE_LSN_KEY))) < 1;
}
};
}
}

View File

@ -1,222 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.config.Configuration;
import io.debezium.config.Field;
import io.debezium.connector.base.ChangeEventQueue;
import io.debezium.connector.common.BaseSourceTask;
import io.debezium.pipeline.ChangeEventSourceCoordinator;
import io.debezium.pipeline.DataChangeEvent;
import io.debezium.pipeline.ErrorHandler;
import io.debezium.pipeline.EventDispatcher;
import io.debezium.pipeline.spi.OffsetContext;
import io.debezium.relational.HistorizedRelationalDatabaseConnectorConfig;
import io.debezium.relational.TableId;
import io.debezium.relational.history.DatabaseHistory;
import io.debezium.schema.TopicSelector;
import io.debezium.util.Clock;
import io.debezium.util.SchemaNameAdjuster;
/**
* The main task executing streaming from SQL Server.
* Responsible for lifecycle management the streaming code.
*
* @author Jiri Pechanec
*
*/
public class SqlServerConnectorTask extends BaseSourceTask {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlServerConnectorTask.class);
private static final String CONTEXT_NAME = "sql-server-connector-task";
private static enum State {
RUNNING, STOPPED;
}
private final AtomicReference<State> state = new AtomicReference<State>(State.STOPPED);
private volatile SqlServerTaskContext taskContext;
private volatile ChangeEventQueue<DataChangeEvent> queue;
private volatile SqlServerConnection jdbcConnection;
private volatile ChangeEventSourceCoordinator coordinator;
private volatile ErrorHandler errorHandler;
private volatile SqlServerDatabaseSchema schema;
private volatile Map<String, ?> lastOffset;
@Override
public String version() {
return Module.version();
}
@Override
public void start(Configuration config) {
if (!state.compareAndSet(State.STOPPED, State.RUNNING)) {
LOGGER.info("Connector has already been started");
return;
}
final SqlServerConnectorConfig connectorConfig = new SqlServerConnectorConfig(config);
final TopicSelector<TableId> topicSelector = SqlServerTopicSelector.defaultSelector(connectorConfig);
final SchemaNameAdjuster schemaNameAdjuster = SchemaNameAdjuster.create(LOGGER);
final Configuration jdbcConfig = config.filter(x -> !(x.startsWith(DatabaseHistory.CONFIGURATION_FIELD_PREFIX_STRING) || x.equals(HistorizedRelationalDatabaseConnectorConfig.DATABASE_HISTORY.name())))
.subset("database.", true);
jdbcConnection = new SqlServerConnection(jdbcConfig);
try {
jdbcConnection.setAutoCommit(false);
}
catch (SQLException e) {
throw new ConnectException(e);
}
this.schema = new SqlServerDatabaseSchema(connectorConfig, schemaNameAdjuster, topicSelector, jdbcConnection);
this.schema.initializeStorage();
final OffsetContext previousOffset = getPreviousOffset(new SqlServerOffsetContext.Loader(connectorConfig.getLogicalName()));
if (previousOffset != null) {
schema.recover(previousOffset);
}
taskContext = new SqlServerTaskContext(connectorConfig, schema);
final Clock clock = Clock.system();
// Set up the task record queue ...
this.queue = new ChangeEventQueue.Builder<DataChangeEvent>()
.pollInterval(connectorConfig.getPollInterval())
.maxBatchSize(connectorConfig.getMaxBatchSize())
.maxQueueSize(connectorConfig.getMaxQueueSize())
.loggingContextSupplier(() -> taskContext.configureLoggingContext(CONTEXT_NAME))
.build();
errorHandler = new ErrorHandler(SqlServerConnector.class, connectorConfig.getLogicalName(), queue, this::cleanupResources);
final EventDispatcher<TableId> dispatcher = new EventDispatcher<>(
connectorConfig,
topicSelector,
schema,
queue,
connectorConfig.getTableFilters().dataCollectionFilter(),
DataChangeEvent::new);
coordinator = new ChangeEventSourceCoordinator(
previousOffset,
errorHandler,
SqlServerConnector.class,
connectorConfig.getLogicalName(),
new SqlServerChangeEventSourceFactory(connectorConfig, jdbcConnection, errorHandler, dispatcher, clock, schema),
dispatcher
);
coordinator.start(taskContext);
}
/**
* Loads the connector's persistent offset (if present) via the given loader.
*/
@Override
protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) {
Map<String, ?> partition = loader.getPartition();
Map<String, Object> previousOffset = context.offsetStorageReader()
.offsets(Collections.singleton(partition))
.get(partition);
if (previousOffset != null) {
OffsetContext offsetContext = loader.load(previousOffset);
LOGGER.info("Found previous offset {}", offsetContext);
return offsetContext;
}
else {
return null;
}
}
@Override
public List<SourceRecord> poll() throws InterruptedException {
final List<DataChangeEvent> records = queue.poll();
final List<SourceRecord> sourceRecords = records.stream()
.map(DataChangeEvent::getRecord)
.collect(Collectors.toList());
if (!sourceRecords.isEmpty()) {
this.lastOffset = sourceRecords.get(sourceRecords.size() - 1).sourceOffset();
}
return sourceRecords;
}
@Override
public void commit() throws InterruptedException {
if (coordinator != null) {
coordinator.commitOffset(lastOffset);
}
}
@Override
public void stop() {
cleanupResources();
}
private void cleanupResources() {
if (!state.compareAndSet(State.RUNNING, State.STOPPED)) {
LOGGER.info("Connector has already been stopped");
return;
}
try {
if (coordinator != null) {
coordinator.stop();
}
}
catch (InterruptedException e) {
Thread.interrupted();
LOGGER.error("Interrupted while stopping coordinator", e);
throw new ConnectException("Interrupted while stopping coordinator, failing the task");
}
try {
if (errorHandler != null) {
errorHandler.stop();
}
}
catch (InterruptedException e) {
Thread.interrupted();
LOGGER.error("Interrupted while stopping", e);
}
try {
if (jdbcConnection != null) {
jdbcConnection.close();
}
}
catch (SQLException e) {
LOGGER.error("Exception while closing JDBC connection", e);
}
if (schema != null) {
schema.close();
}
}
@Override
protected Iterable<Field> getAllConfigurationFields() {
return SqlServerConnectorConfig.ALL_FIELDS;
}
}

View File

@ -1,63 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.relational.HistorizedRelationalDatabaseSchema;
import io.debezium.relational.Table;
import io.debezium.relational.TableId;
import io.debezium.relational.TableSchemaBuilder;
import io.debezium.relational.ddl.DdlParser;
import io.debezium.relational.history.TableChanges;
import io.debezium.schema.SchemaChangeEvent;
import io.debezium.schema.SchemaChangeEvent.SchemaChangeEventType;
import io.debezium.schema.TopicSelector;
import io.debezium.util.SchemaNameAdjuster;
/**
* Logical representation of SQL Server schema.
*
* @author Jiri Pechanec
*/
public class SqlServerDatabaseSchema extends HistorizedRelationalDatabaseSchema {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlServerDatabaseSchema.class);
public SqlServerDatabaseSchema(SqlServerConnectorConfig connectorConfig, SchemaNameAdjuster schemaNameAdjuster, TopicSelector<TableId> topicSelector, SqlServerConnection connection) {
super(connectorConfig, topicSelector, connectorConfig.getTableFilters().dataCollectionFilter(), connectorConfig.getColumnFilter(),
new TableSchemaBuilder(
new SqlServerValueConverters(connectorConfig.getDecimalMode()),
schemaNameAdjuster,
SourceInfo.SCHEMA
),
false);
}
@Override
public void applySchemaChange(SchemaChangeEvent schemaChange) {
LOGGER.debug("Applying schema change event {}", schemaChange);
// just a single table per DDL event for SQL Server
Table table = schemaChange.getTables().iterator().next();
buildAndRegisterSchema(table);
tables().overwriteTable(table);
TableChanges tableChanges = null;
if (schemaChange.getType() == SchemaChangeEventType.CREATE) {
tableChanges = new TableChanges();
tableChanges.create(table);
}
record(schemaChange, tableChanges);
}
@Override
protected DdlParser getDdlParser() {
return null;
}
}

View File

@ -1,142 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.time.Instant;
import java.util.Collections;
import java.util.Map;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
import io.debezium.pipeline.spi.OffsetContext;
import io.debezium.util.Collect;
public class SqlServerOffsetContext implements OffsetContext {
private static final String SERVER_PARTITION_KEY = "server";
private static final String SNAPSHOT_COMPLETED_KEY = "snapshot_completed";
private final Schema sourceInfoSchema;
private final SourceInfo sourceInfo;
private final Map<String, String> partition;
private boolean snapshotCompleted;
public SqlServerOffsetContext(String serverName, Lsn lsn, boolean snapshot, boolean snapshotCompleted) {
partition = Collections.singletonMap(SERVER_PARTITION_KEY, serverName);
sourceInfo = new SourceInfo(serverName);
sourceInfo.setChangeLsn(lsn);
sourceInfoSchema = sourceInfo.schema();
this.snapshotCompleted = snapshotCompleted;
if (this.snapshotCompleted) {
postSnapshotCompletion();
}
else {
sourceInfo.setSnapshot(snapshot);
}
}
@Override
public Map<String, ?> getPartition() {
return partition;
}
@Override
public Map<String, ?> getOffset() {
if (sourceInfo.isSnapshot()) {
return Collect.hashMapOf(
SourceInfo.SNAPSHOT_KEY, true,
SNAPSHOT_COMPLETED_KEY, snapshotCompleted,
SourceInfo.CHANGE_LSN_KEY, sourceInfo.getChangeLsn().toString()
);
}
else {
return Collections.singletonMap(SourceInfo.CHANGE_LSN_KEY, sourceInfo.getChangeLsn().toString());
}
}
@Override
public Schema getSourceInfoSchema() {
return sourceInfoSchema;
}
@Override
public Struct getSourceInfo() {
return sourceInfo.struct();
}
public void setChangeLsn(Lsn lsn) {
sourceInfo.setChangeLsn(lsn);
}
public Lsn getChangeLsn() {
return sourceInfo.getChangeLsn() == null ? Lsn.NULL : sourceInfo.getChangeLsn();
}
public void setCommitLsn(Lsn lsn) {
sourceInfo.setCommitLsn(lsn);
}
public void setSourceTime(Instant instant) {
sourceInfo.setSourceTime(instant);
}
@Override
public boolean isSnapshotRunning() {
return sourceInfo.isSnapshot() && !snapshotCompleted;
}
@Override
public void preSnapshotStart() {
sourceInfo.setSnapshot(true);
snapshotCompleted = false;
}
@Override
public void preSnapshotCompletion() {
snapshotCompleted = true;
}
@Override
public void postSnapshotCompletion() {
sourceInfo.setSnapshot(false);
}
public static class Loader implements OffsetContext.Loader {
private final String logicalName;
public Loader(String logicalName) {
this.logicalName = logicalName;
}
@Override
public Map<String, ?> getPartition() {
return Collections.singletonMap(SERVER_PARTITION_KEY, logicalName);
}
@Override
public OffsetContext load(Map<String, ?> offset) {
final Lsn lsn = Lsn.valueOf((String)offset.get(SourceInfo.CHANGE_LSN_KEY));
boolean snapshot = Boolean.TRUE.equals(offset.get(SourceInfo.SNAPSHOT_KEY));
boolean snapshotCompleted = Boolean.TRUE.equals(offset.get(SNAPSHOT_COMPLETED_KEY));
return new SqlServerOffsetContext(logicalName, lsn, snapshot, snapshotCompleted);
}
}
@Override
public String toString() {
return "SqlServerOffsetContext [" +
"sourceInfoSchema=" + sourceInfoSchema +
", sourceInfo=" + sourceInfo +
", partition=" + partition +
", snapshotCompleted=" + snapshotCompleted +
"]";
}
}

View File

@ -1,50 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.pipeline.spi.SchemaChangeEventEmitter;
import io.debezium.relational.Table;
import io.debezium.schema.SchemaChangeEvent;
import io.debezium.schema.SchemaChangeEvent.SchemaChangeEventType;
/**
* {@link SchemaChangeEventEmitter} implementation based on SQL Server.
*
* @author Jiri Pechanec
*/
public class SqlServerSchemaChangeEventEmitter implements SchemaChangeEventEmitter {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlServerSchemaChangeEventEmitter.class);
private final SqlServerOffsetContext offsetContext;
private final ChangeTable changeTable;
private final Table tableSchema;
public SqlServerSchemaChangeEventEmitter(SqlServerOffsetContext offsetContext, ChangeTable changeTable, Table tableSchema) {
this.offsetContext = offsetContext;
this.changeTable = changeTable;
this.tableSchema = tableSchema;
}
@Override
public void emitSchemaChangeEvent(Receiver receiver) throws InterruptedException {
final SchemaChangeEvent event = new SchemaChangeEvent(
offsetContext.getPartition(),
offsetContext.getOffset(),
changeTable.getSourceTableId().catalog(),
changeTable.getSourceTableId().schema(),
"N/A",
tableSchema,
SchemaChangeEventType.CREATE,
false
);
receiver.schemaChangeEvent(event);
}
}

View File

@ -1,199 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Savepoint;
import java.sql.Statement;
import java.time.Instant;
import java.util.Set;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.connector.sqlserver.SqlServerConnectorConfig.SnapshotLockingMode;
import io.debezium.pipeline.EventDispatcher;
import io.debezium.pipeline.source.spi.SnapshotProgressListener;
import io.debezium.pipeline.spi.ChangeRecordEmitter;
import io.debezium.pipeline.spi.OffsetContext;
import io.debezium.relational.HistorizedRelationalSnapshotChangeEventSource;
import io.debezium.relational.Table;
import io.debezium.relational.TableId;
import io.debezium.schema.SchemaChangeEvent;
import io.debezium.schema.SchemaChangeEvent.SchemaChangeEventType;
import io.debezium.util.Clock;
public class SqlServerSnapshotChangeEventSource extends HistorizedRelationalSnapshotChangeEventSource {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlServerSnapshotChangeEventSource.class);
/**
* Code 4096 corresponds to SNAPSHOT isolation level, which is not a part of the standard but SQL Server specific.
*/
private static final int TRANSACTION_SNAPSHOT = 4096;
private final SqlServerConnectorConfig connectorConfig;
private final SqlServerConnection jdbcConnection;
public SqlServerSnapshotChangeEventSource(SqlServerConnectorConfig connectorConfig, SqlServerOffsetContext previousOffset, SqlServerConnection jdbcConnection, SqlServerDatabaseSchema schema, EventDispatcher<TableId> dispatcher, Clock clock, SnapshotProgressListener snapshotProgressListener) {
super(connectorConfig, previousOffset, jdbcConnection, schema, dispatcher, clock, snapshotProgressListener);
this.connectorConfig = connectorConfig;
this.jdbcConnection = jdbcConnection;
}
@Override
protected SnapshottingTask getSnapshottingTask(OffsetContext previousOffset) {
boolean snapshotSchema = true;
boolean snapshotData = true;
// found a previous offset and the earlier snapshot has completed
if (previousOffset != null && !previousOffset.isSnapshotRunning()) {
LOGGER.info("A previous offset indicating a completed snapshot has been found. Neither schema nor data will be snapshotted.");
snapshotSchema = false;
snapshotData = false;
}
else {
LOGGER.info("No previous offset has been found");
if (connectorConfig.getSnapshotMode().includeData()) {
LOGGER.info("According to the connector configuration both schema and data will be snapshotted");
}
else {
LOGGER.info("According to the connector configuration only schema will be snapshotted");
}
snapshotData = connectorConfig.getSnapshotMode().includeData();
}
return new SnapshottingTask(snapshotSchema, snapshotData);
}
@Override
protected SnapshotContext prepare(ChangeEventSourceContext context) throws Exception {
return new SqlServerSnapshotContext(jdbcConnection.getRealDatabaseName());
}
@Override
protected void connectionCreated(SnapshotContext snapshotContext) throws Exception {
if (connectorConfig.getSnapshotLockingMode() == SnapshotLockingMode.SNAPSHOT) {
// Terminate any transaction in progress so we can change the isolation level
jdbcConnection.connection().rollback();
// With one exception, you can switch from one isolation level to another at any time during a transaction.
// The exception occurs when changing from any isolation level to SNAPSHOT isolation.
// That is why SNAPSHOT isolation level has to be set at the very beginning of the transaction.
jdbcConnection.connection().setTransactionIsolation(TRANSACTION_SNAPSHOT);
}
}
@Override
protected Set<TableId> getAllTableIds(SnapshotContext ctx) throws Exception {
return jdbcConnection.readTableNames(ctx.catalogName, null, null, new String[] {"TABLE"});
}
@Override
protected void lockTablesForSchemaSnapshot(ChangeEventSourceContext sourceContext, SnapshotContext snapshotContext) throws SQLException, InterruptedException {
if (connectorConfig.getSnapshotLockingMode() == SnapshotLockingMode.NONE) {
jdbcConnection.connection().setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
((SqlServerSnapshotContext)snapshotContext).preSchemaSnapshotSavepoint = jdbcConnection.connection().setSavepoint("dbz_schema_snapshot");
LOGGER.info("Schema locking was disabled in connector configuration");
}
else if (connectorConfig.getSnapshotLockingMode() == SnapshotLockingMode.EXCLUSIVE) {
LOGGER.info("Executing schema locking");
((SqlServerSnapshotContext)snapshotContext).preSchemaSnapshotSavepoint = jdbcConnection.connection().setSavepoint("dbz_schema_snapshot");
try (Statement statement = jdbcConnection.connection().createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)) {
for (TableId tableId : snapshotContext.capturedTables) {
if (!sourceContext.isRunning()) {
throw new InterruptedException("Interrupted while locking table " + tableId);
}
LOGGER.info("Locking table {}", tableId);
String query = String.format("SELECT * FROM [%s] WITH (TABLOCKX)", tableId.table());
statement.executeQuery(query).close();
}
}
}
else if (connectorConfig.getSnapshotLockingMode() == SnapshotLockingMode.SNAPSHOT) {
((SqlServerSnapshotContext)snapshotContext).preSchemaSnapshotSavepoint = jdbcConnection.connection().setSavepoint("dbz_schema_snapshot");
}
else {
throw new IllegalStateException("Unknown locking mode specified.");
}
}
@Override
protected void releaseSchemaSnapshotLocks(SnapshotContext snapshotContext) throws SQLException {
jdbcConnection.connection().rollback(((SqlServerSnapshotContext)snapshotContext).preSchemaSnapshotSavepoint);
}
@Override
protected void determineSnapshotOffset(SnapshotContext ctx) throws Exception {
ctx.offset = new SqlServerOffsetContext(connectorConfig.getLogicalName(), jdbcConnection.getMaxLsn(), false, false);
}
@Override
protected void readTableStructure(ChangeEventSourceContext sourceContext, SnapshotContext snapshotContext) throws SQLException, InterruptedException {
Set<String> schemas = snapshotContext.capturedTables.stream()
.map(TableId::schema)
.collect(Collectors.toSet());
// reading info only for the schemas we're interested in as per the set of captured tables;
// while the passed table name filter alone would skip all non-included tables, reading the schema
// would take much longer that way
for (String schema : schemas) {
if (!sourceContext.isRunning()) {
throw new InterruptedException("Interrupted while reading structure of schema " + schema);
}
LOGGER.info("Reading structure of schema '{}'", snapshotContext.catalogName);
jdbcConnection.readSchema(
snapshotContext.tables,
snapshotContext.catalogName,
schema,
connectorConfig.getTableFilters().dataCollectionFilter(),
null,
false
);
}
}
@Override
protected SchemaChangeEvent getCreateTableEvent(SnapshotContext snapshotContext, Table table) throws SQLException {
return new SchemaChangeEvent(snapshotContext.offset.getPartition(), snapshotContext.offset.getOffset(), snapshotContext.catalogName,
table.id().schema(), null, table, SchemaChangeEventType.CREATE, true);
}
@Override
protected void complete() {
}
@Override
protected String getSnapshotSelect(SnapshotContext snapshotContext, TableId tableId) {
return String.format("SELECT * FROM [%s].[%s]", tableId.schema(), tableId.table());
}
@Override
protected ChangeRecordEmitter getChangeRecordEmitter(SnapshotContext snapshotContext, Object[] row) {
((SqlServerOffsetContext) snapshotContext.offset).setSourceTime(Instant.ofEpochMilli(getClock().currentTimeInMillis()));
return new SnapshotChangeRecordEmitter(snapshotContext.offset, row, getClock());
}
/**
* Mutable context which is populated in the course of snapshotting.
*/
private static class SqlServerSnapshotContext extends SnapshotContext {
private Savepoint preSchemaSnapshotSavepoint;
public SqlServerSnapshotContext(String catalogName) throws SQLException {
super(catalogName);
}
}
}

View File

@ -1,357 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.pipeline.ErrorHandler;
import io.debezium.pipeline.EventDispatcher;
import io.debezium.pipeline.source.spi.StreamingChangeEventSource;
import io.debezium.relational.TableId;
import io.debezium.util.Clock;
import io.debezium.util.Metronome;
/**
* A {@link StreamingChangeEventSource} based on SQL Server change data capture functionality.
* A main polls database DDL change and change data tables and turns them into change events.
*
* @author Jiri Pechanec
*/
public class SqlServerStreamingChangeEventSource implements StreamingChangeEventSource {
private static final int COL_COMMIT_LSN = 1;
private static final int COL_ROW_LSN = 2;
private static final int COL_OPERATION = 3;
private static final int COL_DATA = 5;
private static final Pattern MISSING_CDC_FUNCTION_CHANGES_ERROR = Pattern.compile("Invalid object name 'cdc.fn_cdc_get_all_changes_(.*)'\\.");
private static final Logger LOGGER = LoggerFactory.getLogger(SqlServerStreamingChangeEventSource.class);
private final SqlServerConnection connection;
private final EventDispatcher<TableId> dispatcher;
private final ErrorHandler errorHandler;
private final Clock clock;
private final SqlServerDatabaseSchema schema;
private final SqlServerOffsetContext offsetContext;
private final Duration pollInterval;
private final SqlServerConnectorConfig connectorConfig;
public SqlServerStreamingChangeEventSource(SqlServerConnectorConfig connectorConfig, SqlServerOffsetContext offsetContext, SqlServerConnection connection, EventDispatcher<TableId> dispatcher, ErrorHandler errorHandler, Clock clock, SqlServerDatabaseSchema schema) {
this.connectorConfig = connectorConfig;
this.connection = connection;
this.dispatcher = dispatcher;
this.errorHandler = errorHandler;
this.clock = clock;
this.schema = schema;
this.offsetContext = offsetContext;
this.pollInterval = connectorConfig.getPollInterval();
}
@Override
public void execute(ChangeEventSourceContext context) throws InterruptedException {
final Metronome metronome = Metronome.sleeper(pollInterval, clock);
final Queue<ChangeTable> schemaChangeCheckpoints = new PriorityQueue<>((x, y) -> x.getStopLsn().compareTo(y.getStopLsn()));
try {
final AtomicReference<ChangeTable[]> tablesSlot = new AtomicReference<ChangeTable[]>(getCdcTablesToQuery());
final Lsn lastProcessedLsnOnStart = offsetContext.getChangeLsn();
LOGGER.info("Last LSN recorded in offsets is {}", lastProcessedLsnOnStart);
Lsn lastProcessedLsn = offsetContext.getChangeLsn();
while (context.isRunning()) {
final Lsn currentMaxLsn = connection.getMaxLsn();
// Probably cannot happen but it is better to guard against such
// situation
if (!currentMaxLsn.isAvailable()) {
LOGGER.debug("No maximum LSN recorded in the database");
metronome.pause();
continue;
}
// There is no change in the database
if (currentMaxLsn.equals(lastProcessedLsn)) {
LOGGER.debug("No change in the database");
metronome.pause();
continue;
}
// Reading interval is inclusive so we need to move LSN forward
final Lsn fromLsn = lastProcessedLsn.isAvailable() ? connection.incrementLsn(lastProcessedLsn)
: lastProcessedLsn;
while (!schemaChangeCheckpoints.isEmpty()) {
migrateTable(schemaChangeCheckpoints);
}
if (!connection.listOfNewChangeTables(fromLsn, currentMaxLsn).isEmpty()) {
final ChangeTable[] tables = getCdcTablesToQuery();
tablesSlot.set(tables);
for (ChangeTable table: tables) {
if (table.getStartLsn().isBetween(fromLsn, currentMaxLsn)) {
LOGGER.info("Schema will be changed for {}", table);
schemaChangeCheckpoints.add(table);
}
}
}
try {
connection.getChangesForTables(tablesSlot.get(), fromLsn, currentMaxLsn, resultSets -> {
final int tableCount = resultSets.length;
final ChangeTablePointer[] changeTables = new ChangeTablePointer[tableCount];
final ChangeTable[] tables = tablesSlot.get();
for (int i = 0; i < tableCount; i++) {
changeTables[i] = new ChangeTablePointer(tables[i], resultSets[i]);
changeTables[i].next();
}
for (;;) {
ChangeTablePointer tableWithSmallestLsn = null;
for (ChangeTablePointer changeTable: changeTables) {
if (changeTable.isCompleted()) {
continue;
}
if (tableWithSmallestLsn == null || changeTable.compareTo(tableWithSmallestLsn) < 0) {
tableWithSmallestLsn = changeTable;
}
}
if (tableWithSmallestLsn == null) {
// No more LSNs available
break;
}
if (!tableWithSmallestLsn.getRowLsn().isAvailable()) {
LOGGER.error("Skipping change {} as its LSN is NULL which is not expected", tableWithSmallestLsn);
tableWithSmallestLsn.next();
continue;
}
if (tableWithSmallestLsn.getRowLsn().compareTo(lastProcessedLsnOnStart) <= 0) {
LOGGER.info("Skipping change {} as its LSN is smaller than the last recorded LSN {}", tableWithSmallestLsn, lastProcessedLsnOnStart);
tableWithSmallestLsn.next();
continue;
}
if (tableWithSmallestLsn.getChangeTable().getStopLsn().isAvailable() &&
tableWithSmallestLsn.getChangeTable().getStopLsn().compareTo(tableWithSmallestLsn.getRowLsn()) <= 0) {
LOGGER.debug("Skipping table change {} as its stop LSN is smaller than the last recorded LSN {}", tableWithSmallestLsn, tableWithSmallestLsn.getRowLsn());
tableWithSmallestLsn.next();
continue;
}
LOGGER.trace("Processing change {}", tableWithSmallestLsn);
if (!schemaChangeCheckpoints.isEmpty()) {
if (tableWithSmallestLsn.getRowLsn().compareTo(schemaChangeCheckpoints.peek().getStopLsn()) >= 0) {
migrateTable(schemaChangeCheckpoints);
}
}
final TableId tableId = tableWithSmallestLsn.getChangeTable().getSourceTableId();
final Lsn commitLsn = tableWithSmallestLsn.getCommitLsn();
final Lsn rowLsn = tableWithSmallestLsn.getRowLsn();
final int operation = tableWithSmallestLsn.getOperation();
final Object[] data = tableWithSmallestLsn.getData();
// UPDATE consists of two consecutive events, first event contains
// the row before it was updated and the second the row after
// it was updated
if (operation == SqlServerChangeRecordEmitter.OP_UPDATE_BEFORE) {
if (!tableWithSmallestLsn.next() || tableWithSmallestLsn.getOperation() != SqlServerChangeRecordEmitter.OP_UPDATE_AFTER) {
throw new IllegalStateException("The update before event at " + rowLsn + " for table " + tableId + " was not followed by after event.\n Please report this as a bug together with a events around given LSN.");
}
}
final Object[] dataNext = (operation == SqlServerChangeRecordEmitter.OP_UPDATE_BEFORE) ? tableWithSmallestLsn.getData() : null;
offsetContext.setChangeLsn(rowLsn);
offsetContext.setCommitLsn(commitLsn);
offsetContext.setSourceTime(connection.timestampOfLsn(commitLsn));
dispatcher
.dispatchDataChangeEvent(
tableId,
new SqlServerChangeRecordEmitter(
offsetContext,
operation,
data,
dataNext,
schema.tableFor(tableId),
clock
)
);
tableWithSmallestLsn.next();
}
});
lastProcessedLsn = currentMaxLsn;
// Terminate the transaction otherwise CDC could not be disabled for tables
connection.rollback();
}
catch (SQLException e) {
tablesSlot.set(processErrorFromChangeTableQuery(e, tablesSlot.get()));
}
}
}
catch (Exception e) {
errorHandler.setProducerThrowable(e);
}
}
private void migrateTable(final Queue<ChangeTable> schemaChangeCheckpoints)
throws InterruptedException, SQLException {
final ChangeTable newTable = schemaChangeCheckpoints.poll();
LOGGER.info("Migrating schema to {}", newTable);
dispatcher.dispatchSchemaChangeEvent(newTable.getSourceTableId(), new SqlServerSchemaChangeEventEmitter(offsetContext, newTable, connection.getTableSchemaFromTable(newTable)));
}
private ChangeTable[] processErrorFromChangeTableQuery(SQLException exception, ChangeTable[] currentChangeTables) throws Exception {
final Matcher m = MISSING_CDC_FUNCTION_CHANGES_ERROR.matcher(exception.getMessage());
if (m.matches()) {
final String captureName = m.group(1);
LOGGER.info("Table is no longer captured with capture instance {}", captureName);
return Arrays.asList(currentChangeTables).stream()
.filter(x -> !x.getCaptureInstance().equals(captureName))
.collect(Collectors.toList()).toArray(new ChangeTable[0]);
}
throw exception;
}
private ChangeTable[] getCdcTablesToQuery() throws SQLException, InterruptedException {
final Set<ChangeTable> cdcEnabledTables = connection.listOfChangeTables();
final Map<TableId, List<ChangeTable>> whitelistedCdcEnabledTables = cdcEnabledTables.stream()
.filter(changeTable -> {
if (connectorConfig.getTableFilters().dataCollectionFilter().isIncluded(changeTable.getSourceTableId())) {
return true;
}
else {
LOGGER.info("CDC is enabled for table {} but the table is not whitelisted by connector", changeTable);
return false;
}
})
.collect(Collectors.groupingBy(x -> x.getSourceTableId()));
final List<ChangeTable> tables = new ArrayList<>();
for (List<ChangeTable> captures: whitelistedCdcEnabledTables.values()) {
ChangeTable currentTable = captures.get(0);
if (captures.size() > 1) {
ChangeTable futureTable;
if (captures.get(0).getStartLsn().compareTo(captures.get(1).getStartLsn()) < 0) {
futureTable = captures.get(1);
}
else {
currentTable = captures.get(1);
futureTable = captures.get(0);
}
currentTable.setStopLsn(futureTable.getStartLsn());
tables.add(futureTable);
LOGGER.info("Multiple capture instances {} and {} present for the same table", currentTable, futureTable);
}
if (schema.tableFor(currentTable.getSourceTableId()) == null) {
LOGGER.info("Table {} is new to be monitored by capture instance {}", currentTable.getSourceTableId(), currentTable.getCaptureInstance());
// We need to read the source table schema - primary key information cannot be obtained from change table
dispatcher.dispatchSchemaChangeEvent(
currentTable.getSourceTableId(),
new SqlServerSchemaChangeEventEmitter(
offsetContext,
currentTable,
connection.getTableSchemaFromTable(currentTable)
)
);
}
tables.add(currentTable);
}
return tables.toArray(new ChangeTable[tables.size()]);
}
@Override
public void commitOffset(Map<String, ?> offset) {
}
/**
* The logical representation of a position for the change in the transaction log.
* During each sourcing cycle it is necessary to query all change tables and then
* make a total order of changes across all tables.<br>
* This class represents an open database cursor over the change table that is
* able to move the cursor forward and report the LSN for the change to which the cursor
* now points.
*
* @author Jiri Pechanec
*
*/
private static class ChangeTablePointer {
private final ChangeTable changeTable;
private final ResultSet resultSet;
private boolean completed = false;
private Lsn currentChangeLsn;
public ChangeTablePointer(ChangeTable changeTable, ResultSet resultSet) {
this.changeTable = changeTable;
this.resultSet = resultSet;
}
public ChangeTable getChangeTable() {
return changeTable;
}
public Lsn getCommitLsn() throws SQLException {
return Lsn.valueOf(resultSet.getBytes(COL_COMMIT_LSN));
}
public Lsn getRowLsn() throws SQLException {
return currentChangeLsn;
}
public int getOperation() throws SQLException {
return resultSet.getInt(COL_OPERATION);
}
public Object[] getData() throws SQLException {
final int dataColumnCount = resultSet.getMetaData().getColumnCount() - (COL_DATA - 1);
final Object[] data = new Object[dataColumnCount];
for (int i = 0; i < dataColumnCount; i++) {
data[i] = resultSet.getObject(COL_DATA + i);
}
return data;
}
public boolean next() throws SQLException {
completed = !resultSet.next();
currentChangeLsn = completed ? Lsn.NULL : Lsn.valueOf(resultSet.getBytes(COL_ROW_LSN));
if (completed) {
LOGGER.trace("Closing result set of change tables for table {}", changeTable);
resultSet.close();
}
return !completed;
}
public boolean isCompleted() {
return completed;
}
public int compareTo(ChangeTablePointer o) throws SQLException {
return getRowLsn().compareTo(o.getRowLsn());
}
@Override
public String toString() {
return "ChangeTablePointer [changeTable=" + changeTable + ", resultSet=" + resultSet + ", completed="
+ completed + ", currentChangeLsn=" + currentChangeLsn + "]";
}
}
}

View File

@ -1,21 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import io.debezium.connector.common.CdcSourceTaskContext;
/**
* A state (context) associated with a SQL Server task
*
* @author Jiri Pechanec
*
*/
public class SqlServerTaskContext extends CdcSourceTaskContext {
public SqlServerTaskContext(SqlServerConnectorConfig config, SqlServerDatabaseSchema schema) {
super("SQL_Server", config.getLogicalName(), schema::tableIds);
}
}

View File

@ -1,23 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import io.debezium.relational.TableId;
import io.debezium.schema.TopicSelector;
/**
* The topic naming strategy based on connector configuration and table name
*
* @author Jiri Pechanec
*
*/
public class SqlServerTopicSelector {
public static TopicSelector<TableId> defaultSelector(SqlServerConnectorConfig connectorConfig) {
return TopicSelector.defaultSelector(connectorConfig,
(tableId, prefix, delimiter) -> String.join(delimiter, prefix, tableId.schema(), tableId.table()));
}
}

View File

@ -1,111 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.sql.Timestamp;
import java.sql.Types;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import org.apache.kafka.connect.data.Field;
import org.apache.kafka.connect.data.SchemaBuilder;
import io.debezium.data.SpecialValueDecimal;
import io.debezium.jdbc.JdbcValueConverters;
import io.debezium.jdbc.TemporalPrecisionMode;
import io.debezium.relational.Column;
import io.debezium.relational.ValueConverter;
import io.debezium.time.ZonedTimestamp;
import microsoft.sql.DateTimeOffset;
/**
* Conversion of SQL Server specific datatypes.
*
* @author Jiri Pechanec
*
*/
public class SqlServerValueConverters extends JdbcValueConverters {
public SqlServerValueConverters() {
}
/**
* Create a new instance that always uses UTC for the default time zone when
* converting values without timezone information to values that require
* timezones.
* <p>
*
* @param decimalMode
* how {@code DECIMAL} and {@code NUMERIC} values should be
* treated; may be null if
* {@link io.debezium.jdbc.JdbcValueConverters.DecimalMode#PRECISE}
* is to be used
*/
public SqlServerValueConverters(DecimalMode decimalMode) {
super(decimalMode, TemporalPrecisionMode.ADAPTIVE_TIME_MICROSECONDS, ZoneOffset.UTC, null, null);
}
@Override
public SchemaBuilder schemaBuilder(Column column) {
switch (column.jdbcType()) {
// Numeric integers
case Types.TINYINT:
// values are an 8-bit unsigned integer value between 0 and 255, we thus need to store it in short int
return SchemaBuilder.int16();
// Floating point
case microsoft.sql.Types.SMALLMONEY:
case microsoft.sql.Types.MONEY:
return SpecialValueDecimal.builder(decimalMode, column.length(), column.scale().get());
case microsoft.sql.Types.DATETIMEOFFSET:
return ZonedTimestamp.builder();
default:
return super.schemaBuilder(column);
}
}
@Override
public ValueConverter converter(Column column, Field fieldDefn) {
switch (column.jdbcType()) {
// Numeric integers
case Types.TINYINT:
// values are an 8-bit unsigned integer value between 0 and 255, we thus need to store it in short int
return (data) -> convertSmallInt(column, fieldDefn, data);
// Floating point
case microsoft.sql.Types.SMALLMONEY:
case microsoft.sql.Types.MONEY:
return (data) -> convertDecimal(column, fieldDefn, data);
case microsoft.sql.Types.DATETIMEOFFSET:
return (data) -> convertTimestampWithZone(column, fieldDefn, data);
// TODO Geometry and geography supported since 6.5.0
default:
return super.converter(column, fieldDefn);
}
}
/**
* Time precision in SQL Server is defined in scale, the default one is 7
*/
@Override
protected int getTimePrecision(Column column) {
return column.scale().get();
}
protected Object convertTimestampWithZone(Column column, Field fieldDefn, Object data) {
if (!(data instanceof DateTimeOffset)) {
return super.convertTimestampWithZone(column, fieldDefn, data);
}
final DateTimeOffset dto = (DateTimeOffset)data;
// Timestamp is provided in UTC time
final Timestamp utc = dto.getTimestamp();
final ZoneOffset offset = ZoneOffset.ofTotalSeconds(dto.getMinutesOffset() * 60);
return super.convertTimestampWithZone(column, fieldDefn, LocalDateTime.ofEpochSecond(utc.getTime() / 1000, utc.getNanos(), offset).atOffset(offset));
}
}

View File

@ -1,253 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import static org.fest.assertions.Assertions.assertThat;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;
import org.apache.kafka.connect.data.Decimal;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import io.debezium.connector.sqlserver.util.TestHelper;
import io.debezium.data.SchemaAndValueField;
import io.debezium.data.VerifyRecord;
import io.debezium.embedded.AbstractConnectorTest;
import io.debezium.time.Date;
import io.debezium.time.MicroTime;
import io.debezium.time.NanoTimestamp;
import io.debezium.time.Timestamp;
import io.debezium.time.ZonedTimestamp;
import io.debezium.util.Testing;
/**
* Integration test to verify different SQL Server datatypes.
*
* @author Jiri Pechanec
*/
public abstract class AbstractSqlServerDatatypesTest extends AbstractConnectorTest {
/**
* Key for schema parameter used to store DECIMAL/NUMERIC columns' precision.
*/
static final String PRECISION_PARAMETER_KEY = "connect.decimal.precision";
private static final String DDL_STRING = "create table type_string (" +
" id int not null, " +
" val_char char(3), " +
" val_varchar varchar(1000), " +
" val_text text, " +
" val_nchar nchar(3), " +
" val_nvarchar nvarchar(1000), " +
" val_ntext ntext, " +
" primary key (id)" +
")";
private static final String DDL_FP = "create table type_fp (" +
" id int not null, " +
" val_decimal decimal(6,3), " +
" val_numeric numeric, " +
" val_float float, " +
" val_real real, " +
" val_smallmoney smallmoney, " +
" val_money money " +
" primary key (id)" +
")";
private static final String DDL_INT = "create table type_int (" +
" id int not null, " +
" val_bit bit, " +
" val_tinyint tinyint, " +
" val_smallint smallint, " +
" val_int int, " +
" val_bigint bigint, " +
" primary key (id)" +
")";
private static final String DDL_TIME = "create table type_time (" +
" id int not null, " +
" val_date date, " +
" val_time time(4), " +
" val_datetime2 datetime2, " +
" val_datetimeoffset datetimeoffset, " +
" val_datetime datetime, " +
" val_smalldatetime smalldatetime, " +
" primary key (id)" +
")";
private static final String DDL_XML = "create table type_xml (" +
" id int not null, " +
" val_xml xml, " +
" primary key (id)" +
")";
private static final List<SchemaAndValueField> EXPECTED_INT = Arrays.asList(
new SchemaAndValueField("val_bit", Schema.OPTIONAL_BOOLEAN_SCHEMA, true),
new SchemaAndValueField("val_tinyint", Schema.OPTIONAL_INT16_SCHEMA, (short)22),
new SchemaAndValueField("val_smallint", Schema.OPTIONAL_INT16_SCHEMA, (short)333),
new SchemaAndValueField("val_int", Schema.OPTIONAL_INT32_SCHEMA, 4444),
new SchemaAndValueField("val_bigint", Schema.OPTIONAL_INT64_SCHEMA, 55555l)
);
private static final List<SchemaAndValueField> EXPECTED_FP = Arrays.asList(
new SchemaAndValueField("val_decimal",Decimal.builder(3).parameter(PRECISION_PARAMETER_KEY, "6").optional().build(), new BigDecimal("1.123")),
new SchemaAndValueField("val_numeric", Decimal.builder(0).parameter(PRECISION_PARAMETER_KEY, "18").optional().build(), new BigDecimal("2")),
new SchemaAndValueField("val_float", Schema.OPTIONAL_FLOAT64_SCHEMA, 3.323),
new SchemaAndValueField("val_real", Schema.OPTIONAL_FLOAT32_SCHEMA, 4.323f),
new SchemaAndValueField("val_smallmoney", Decimal.builder(4).parameter(PRECISION_PARAMETER_KEY, "10").optional().build(), new BigDecimal("5.3230")),
new SchemaAndValueField("val_money", Decimal.builder(4).parameter(PRECISION_PARAMETER_KEY, "19").optional().build(), new BigDecimal("6.3230"))
);
private static final List<SchemaAndValueField> EXPECTED_STRING = Arrays.asList(
new SchemaAndValueField("val_char", Schema.OPTIONAL_STRING_SCHEMA, "cc "),
new SchemaAndValueField("val_varchar", Schema.OPTIONAL_STRING_SCHEMA, "vcc"),
new SchemaAndValueField("val_text", Schema.OPTIONAL_STRING_SCHEMA, "tc"),
new SchemaAndValueField("val_nchar", Schema.OPTIONAL_STRING_SCHEMA, "c\u010d "),
new SchemaAndValueField("val_nvarchar", Schema.OPTIONAL_STRING_SCHEMA, "vc\u010d"),
new SchemaAndValueField("val_ntext", Schema.OPTIONAL_STRING_SCHEMA, "t\u010d")
);
private static final List<SchemaAndValueField> EXPECTED_DATE_TIME = Arrays.asList(
new SchemaAndValueField("val_date", Date.builder().optional().build(), 17_725),
new SchemaAndValueField("val_time", MicroTime.builder().optional().build(), 37_425_000_000l),
new SchemaAndValueField("val_datetime2", NanoTimestamp.builder().optional().build(), 1_531_481_025_340_000_000l),
new SchemaAndValueField("val_datetimeoffset", ZonedTimestamp.builder().optional().build(), "2018-07-13T12:23:45.456+11:00"),
new SchemaAndValueField("val_datetime", Timestamp.builder().optional().build(), 1_531_488_225_780l),
new SchemaAndValueField("val_smalldatetime", Timestamp.builder().optional().build(), 1_531_491_840_000l)
);
private static final List<SchemaAndValueField> EXPECTED_XML = Arrays.asList(
new SchemaAndValueField("val_xml", Schema.OPTIONAL_STRING_SCHEMA, "<a>b</a>")
);
private static final String[] ALL_TABLES = {
"type_int",
"type_fp",
"type_string",
"type_time",
"type_xml"
};
private static final String[] ALL_DDLS = {
DDL_INT,
DDL_FP,
DDL_STRING,
DDL_TIME,
DDL_XML
};
private static final int EXPECTED_RECORD_COUNT = ALL_DDLS.length;
@AfterClass
public static void dropTables() throws SQLException {
TestHelper.dropTestDatabase();
}
@BeforeClass
public static void createTables() throws SQLException {
TestHelper.createTestDatabase();
try (SqlServerConnection connection = TestHelper.testConnection()) {
connection.execute(ALL_DDLS);
for (String table: ALL_TABLES) {
TestHelper.enableTableCdc(connection, table);
}
connection.execute(
"INSERT INTO type_int VALUES (0, 1, 22, 333, 4444, 55555)",
"INSERT INTO type_fp VALUES (0, 1.123, 2, 3.323, 4.323, 5.323, 6.323)",
"INSERT INTO type_string VALUES (0, 'c\u010d', 'vc\u010d', 't\u010d', N'c\u010d', N'vc\u010d', N't\u010d')",
"INSERT INTO type_time VALUES (0, '2018-07-13', '10:23:45', '2018-07-13 11:23:45.34', '2018-07-13 12:23:45.456+11:00', '2018-07-13 13:23:45.78', '2018-07-13 14:23:45')",
"INSERT INTO type_xml VALUES (0, '<a>b</a>')"
);
}
}
@Test
public void intTypes() throws Exception {
Testing.debug("Inserted");
final SourceRecords records = consumeRecordsByTopic(EXPECTED_RECORD_COUNT);
List<SourceRecord> testTableRecords = records.recordsForTopic("server1.dbo.type_int");
assertThat(testTableRecords).hasSize(1);
// insert
VerifyRecord.isValidRead(testTableRecords.get(0));
Struct after = (Struct) ((Struct)testTableRecords.get(0).value()).get("after");
assertRecord(after, EXPECTED_INT);
}
@Test
public void fpTypes() throws Exception {
Testing.debug("Inserted");
final SourceRecords records = consumeRecordsByTopic(EXPECTED_RECORD_COUNT);
List<SourceRecord> testTableRecords = records.recordsForTopic("server1.dbo.type_fp");
assertThat(testTableRecords).hasSize(1);
// insert
VerifyRecord.isValidRead(testTableRecords.get(0));
Struct after = (Struct) ((Struct)testTableRecords.get(0).value()).get("after");
assertRecord(after, EXPECTED_FP);
}
@Test
public void stringTypes() throws Exception {
Testing.debug("Inserted");
final SourceRecords records = consumeRecordsByTopic(EXPECTED_RECORD_COUNT);
List<SourceRecord> testTableRecords = records.recordsForTopic("server1.dbo.type_string");
assertThat(testTableRecords).hasSize(1);
// insert
VerifyRecord.isValidRead(testTableRecords.get(0));
Struct after = (Struct) ((Struct)testTableRecords.get(0).value()).get("after");
assertRecord(after, EXPECTED_STRING);
}
@Test
public void dateTimeTypes() throws Exception {
Testing.debug("Inserted");
final SourceRecords records = consumeRecordsByTopic(EXPECTED_RECORD_COUNT);
List<SourceRecord> testTableRecords = records.recordsForTopic("server1.dbo.type_time");
assertThat(testTableRecords).hasSize(1);
// insert
VerifyRecord.isValidRead(testTableRecords.get(0));
Struct after = (Struct) ((Struct)testTableRecords.get(0).value()).get("after");
assertRecord(after, EXPECTED_DATE_TIME);
}
@Test
public void otherTypes() throws Exception {
Testing.debug("Inserted");
final SourceRecords records = consumeRecordsByTopic(EXPECTED_RECORD_COUNT);
List<SourceRecord> testTableRecords = records.recordsForTopic("server1.dbo.type_xml");
assertThat(testTableRecords).hasSize(1);
// insert
VerifyRecord.isValidRead(testTableRecords.get(0));
Struct after = (Struct) ((Struct)testTableRecords.get(0).value()).get("after");
assertRecord(after, EXPECTED_XML);
}
private void assertRecord(Struct record, List<SchemaAndValueField> expected) {
expected.forEach(schemaAndValueField -> schemaAndValueField.assertFor(record));
}
}

View File

@ -1,151 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.sql.SQLException;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.fest.assertions.Assertions;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import io.debezium.config.Configuration;
import io.debezium.connector.sqlserver.SqlServerConnectorConfig.SnapshotMode;
import io.debezium.connector.sqlserver.util.TestHelper;
import io.debezium.doc.FixFor;
import io.debezium.embedded.AbstractConnectorTest;
import io.debezium.util.Testing;
/**
* Integration test to verify behaviour of database with and without case sensitive names.
*
* @author Jiri Pechanec
*/
public class CaseSensitivenessIT extends AbstractConnectorTest {
private SqlServerConnection connection;
@Before
public void before() throws SQLException {
TestHelper.createTestDatabase();
connection = TestHelper.testConnection();
initializeConnectorTestFramework();
Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
}
@After
public void after() throws SQLException {
if (connection != null) {
connection.close();
}
}
@Test
@FixFor("DBZ-1051")
public void caseInsensitiveDatabase() throws Exception {
connection.execute(
"CREATE TABLE MyTableOne (Id int primary key, ColA varchar(30))",
"INSERT INTO MyTableOne VALUES(1, 'a')"
);
TestHelper.enableTableCdc(connection, "MyTableOne");
testDatabase();
}
@Test
@FixFor("DBZ-1051")
public void caseSensitiveDatabase() throws Exception {
connection.execute(
"ALTER DATABASE testDB COLLATE Latin1_General_BIN",
"CREATE TABLE MyTableOne (Id int primary key, ColA varchar(30))",
"INSERT INTO MyTableOne VALUES(1, 'a')"
);
TestHelper.enableTableCdc(connection, "MyTableOne");
testDatabase();
}
private void testDatabase() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
SourceRecords records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("server1.dbo.MyTableOne")).hasSize(1);
SourceRecord record = records.recordsForTopic("server1.dbo.MyTableOne").get(0);
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.MyTableOne.Value")
.field("Id", Schema.INT32_SCHEMA)
.field("ColA", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
assertSchemaMatchesStruct(
(Struct)record.key(),
SchemaBuilder.struct()
.name("server1.dbo.MyTableOne.Key")
.field("Id", Schema.INT32_SCHEMA)
.build()
);
Assertions.assertThat(((Struct)((Struct)record.value()).get("after")).getInt32("Id")).isEqualTo(1);
connection.execute("INSERT INTO MyTableOne VALUES(2, 'b')");
records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("server1.dbo.MyTableOne")).hasSize(1);
record = records.recordsForTopic("server1.dbo.MyTableOne").get(0);
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.MyTableOne.Value")
.field("Id", Schema.INT32_SCHEMA)
.field("ColA", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
assertSchemaMatchesStruct(
(Struct)record.key(),
SchemaBuilder.struct()
.name("server1.dbo.MyTableOne.Key")
.field("Id", Schema.INT32_SCHEMA)
.build()
);
Assertions.assertThat(((Struct)((Struct)record.value()).get("after")).getInt32("Id")).isEqualTo(2);
connection.execute(
"CREATE TABLE MyTableTwo (Id int primary key, ColB varchar(30))"
);
TestHelper.enableTableCdc(connection, "MyTableTwo");
connection.execute("INSERT INTO MyTableTwo VALUES(3, 'b')");
records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("server1.dbo.MyTableTwo")).hasSize(1);
record = records.recordsForTopic("server1.dbo.MyTableTwo").get(0);
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.MyTableTwo.Value")
.field("Id", Schema.INT32_SCHEMA)
.field("ColB", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
assertSchemaMatchesStruct(
(Struct)record.key(),
SchemaBuilder.struct()
.name("server1.dbo.MyTableTwo.Key")
.field("Id", Schema.INT32_SCHEMA)
.build()
);
Assertions.assertThat(((Struct)((Struct)record.value()).get("after")).getInt32("Id")).isEqualTo(3);
}
}

View File

@ -1,44 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.sql.SQLException;
import org.junit.Before;
import org.junit.BeforeClass;
import io.debezium.config.Configuration;
import io.debezium.connector.sqlserver.SqlServerConnectorConfig.SnapshotMode;
import io.debezium.connector.sqlserver.util.TestHelper;
import io.debezium.util.Testing;
/**
* Integration test to verify different SQL Server datatypes.
* The types are discovered during snapshotting phase.
*
* @author Jiri Pechanec
*/
public class DatatypesFromSnapshotIT extends AbstractSqlServerDatatypesTest {
@BeforeClass
public static void beforeClass() throws SQLException {
createTables();
}
@Before
public void before() throws Exception {
initializeConnectorTestFramework();
Testing.Debug.enable();
Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
Thread.sleep(1000);
}
}

View File

@ -1,171 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.List;
import org.apache.kafka.connect.data.Decimal;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.fest.assertions.Assertions;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import io.debezium.config.Configuration;
import io.debezium.connector.sqlserver.SqlServerConnectorConfig.SnapshotMode;
import io.debezium.connector.sqlserver.util.TestHelper;
import io.debezium.embedded.AbstractConnectorTest;
import io.debezium.relational.RelationalDatabaseConnectorConfig.DecimalHandlingMode;
import io.debezium.util.Testing;
/**
* Tests for numeric/decimal columsn with precise, string and decimal options
*
* @author Pradeep Mamillapalli
*
*/
public class SQLServerNumericColumnIT extends AbstractConnectorTest {
private SqlServerConnection connection;
/**
* Create 2 Tables. Each table has 4 columns cola: Decimal(8,4) type with 8
* precision and 4 scale colb: Decimal - Default precision(18) and default
* scale(0) colc: numeric(7,1) - 7 precision and 1 scale cold: numeric-
* Default precision(18) and default scale(0)
*
* @throws SQLException
*/
@Before
public void before() throws SQLException {
TestHelper.createTestDatabase();
connection = TestHelper.testConnection();
connection.execute(
"CREATE TABLE tablenuma (id int IDENTITY(1,1) primary key, cola DECIMAL(8, 4),colb DECIMAL, colc numeric(8,1), cold numeric)",
"CREATE TABLE tablenumb (id int IDENTITY(1,1) primary key, cola DECIMAL(8, 4),colb DECIMAL, colc numeric(8,1), cold numeric)",
"CREATE TABLE tablenumc (id int IDENTITY(1,1) primary key, cola DECIMAL(8, 4),colb DECIMAL, colc numeric(8,1), cold numeric)",
"CREATE TABLE tablenumd (id int IDENTITY(1,1) primary key, cola DECIMAL(8, 4),colb DECIMAL, colc numeric(8,1), cold numeric)");
TestHelper.enableTableCdc(connection, "tablea");
TestHelper.enableTableCdc(connection, "tableb");
TestHelper.enableTableCdc(connection, "tablec");
TestHelper.enableTableCdc(connection, "tabled");
initializeConnectorTestFramework();
Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
}
@After
public void after() throws SQLException {
if (connection != null) {
connection.close();
}
}
/**
* Insert 1 Record into tablenuma with {@code DecimalHandlingMode.STRING}
* mode Assertions: - Connector is running - 1 Record are streamed out of
* cdc - Assert cola, colb, colc, cold are exactly equal to the input
* values.
*
* @throws Exception
*/
@Test
public void decimalModeConfigString() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.tablenuma")
.with(SqlServerConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.STRING).build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
connection.execute("INSERT INTO tablenuma VALUES (111.1111, 1111111, 1111111.1, 1111111 );");
final SourceRecords records = consumeRecordsByTopic(1);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.tablenuma");
Assertions.assertThat(tableA).hasSize(1);
final Struct valueA = (Struct) tableA.get(0).value();
assertSchema(valueA, Schema.OPTIONAL_STRING_SCHEMA);
Assertions.assertThat(((Struct) valueA.get("after")).get("cola")).isEqualTo("111.1111");
Assertions.assertThat(((Struct) valueA.get("after")).get("colb")).isEqualTo("1111111");
Assertions.assertThat(((Struct) valueA.get("after")).get("colc")).isEqualTo("1111111.1");
Assertions.assertThat(((Struct) valueA.get("after")).get("cold")).isEqualTo("1111111");
stopConnector();
}
/**
* Insert 1 Record into tablenumb with {@code DecimalHandlingMode.DOUBLE}
* mode Assertions: - Connector is running - 1 Record are streamed out of
* cdc - Assert cola, colb, colc, cold are exactly equal to the input values
* in double format
*
* @throws Exception
*/
@Test
public void decimalModeConfigDouble() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.tablenumb")
.with(SqlServerConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE).build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
connection.execute("INSERT INTO tablenumb VALUES (222.2222, 22222, 22222.2, 2222222 );");
final SourceRecords records = consumeRecordsByTopic(1);
final List<SourceRecord> results = records.recordsForTopic("server1.dbo.tablenumb");
Assertions.assertThat(results).hasSize(1);
final Struct valueA = (Struct) results.get(0).value();
assertSchema(valueA, Schema.OPTIONAL_FLOAT64_SCHEMA);
Assertions.assertThat(((Struct) valueA.get("after")).get("cola")).isEqualTo(222.2222d);
Assertions.assertThat(((Struct) valueA.get("after")).get("colb")).isEqualTo(22222d);
Assertions.assertThat(((Struct) valueA.get("after")).get("colc")).isEqualTo(22222.2d);
Assertions.assertThat(((Struct) valueA.get("after")).get("cold")).isEqualTo(2222222d);
stopConnector();
}
/**
* Insert 1 Record into tablenumc with {@code DecimalHandlingMode.PRECISE}
* mode Assertions: - Connector is running - 1 Record are streamed out of
* cdc - Assert cola, colb, colc, cold are bytes
*
* @throws Exception
*/
@Test
public void decimalModeConfigPrecise() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.tablenumc")
.with(SqlServerConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.PRECISE).build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
connection.execute("INSERT INTO tablenumc VALUES (333.3333, 3333, 3333.3, 33333333 );");
final SourceRecords records = consumeRecordsByTopic(1);
final List<SourceRecord> results = records.recordsForTopic("server1.dbo.tablenumc");
Assertions.assertThat(results).hasSize(1);
final Struct valueA = (Struct) results.get(0).value();
Assertions.assertThat(valueA.schema().field("after").schema().field("cola").schema()).isEqualTo(Decimal.builder(4).parameter("connect.decimal.precision", "8").optional().schema());
Assertions.assertThat(valueA.schema().field("after").schema().field("colb").schema()).isEqualTo(Decimal.builder(0).parameter("connect.decimal.precision", "18").optional().schema());
Assertions.assertThat(valueA.schema().field("after").schema().field("colc").schema()).isEqualTo(Decimal.builder(1).parameter("connect.decimal.precision", "8").optional().schema());
Assertions.assertThat(valueA.schema().field("after").schema().field("cold").schema()).isEqualTo(Decimal.builder(0).parameter("connect.decimal.precision", "18").optional().schema());
Assertions.assertThat(((Struct) valueA.get("after")).get("cola")).isEqualTo(BigDecimal.valueOf(333.3333));
Assertions.assertThat(((Struct) valueA.get("after")).get("colb")).isEqualTo(BigDecimal.valueOf(3333));
Assertions.assertThat(((Struct) valueA.get("after")).get("colc")).isEqualTo(BigDecimal.valueOf(3333.3));
Assertions.assertThat(((Struct) valueA.get("after")).get("cold")).isEqualTo(BigDecimal.valueOf(33333333));
stopConnector();
}
private void assertSchema(Struct valueA, Schema expected) {
Assertions.assertThat(valueA.schema().field("after").schema().field("cola").schema()).isEqualTo(expected);
Assertions.assertThat(valueA.schema().field("after").schema().field("colb").schema()).isEqualTo(expected);
Assertions.assertThat(valueA.schema().field("after").schema().field("colc").schema()).isEqualTo(expected);
Assertions.assertThat(valueA.schema().field("after").schema().field("cold").schema()).isEqualTo(expected);
}
}

View File

@ -1,322 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import static io.debezium.connector.sqlserver.SqlServerConnectorConfig.SNAPSHOT_LOCKING_MODE;
import static org.fest.assertions.Assertions.assertThat;
import static org.junit.Assert.assertNull;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;
import org.apache.kafka.connect.data.Decimal;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.fest.assertions.Assertions;
import org.fest.assertions.MapAssert;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import io.debezium.config.Configuration;
import io.debezium.connector.sqlserver.SqlServerConnectorConfig.SnapshotLockingMode;
import io.debezium.connector.sqlserver.SqlServerConnectorConfig.SnapshotMode;
import io.debezium.connector.sqlserver.util.TestHelper;
import io.debezium.data.SchemaAndValueField;
import io.debezium.data.SourceRecordAssert;
import io.debezium.doc.FixFor;
import io.debezium.embedded.AbstractConnectorTest;
import io.debezium.heartbeat.Heartbeat;
import io.debezium.time.Timestamp;
import io.debezium.util.Testing;
/**
* Integration test for the Debezium SQL Server connector.
*
* @author Jiri Pechanec
*/
public class SnapshotIT extends AbstractConnectorTest {
private static final int INITIAL_RECORDS_PER_TABLE = 500;
private static final int STREAMING_RECORDS_PER_TABLE = 500;
private SqlServerConnection connection;
@Before
public void before() throws SQLException {
TestHelper.createTestDatabase();
connection = TestHelper.testConnection();
connection.execute(
"CREATE TABLE table1 (id int, name varchar(30), price decimal(8,2), ts datetime2(0), primary key(id))"
);
// Populate database
for (int i = 0; i < INITIAL_RECORDS_PER_TABLE; i++) {
connection.execute(
String.format("INSERT INTO table1 VALUES(%s, '%s', %s, '%s')", i, "name" + i, new BigDecimal(i + ".23"), "2018-07-18 13:28:56")
);
}
TestHelper.enableTableCdc(connection, "table1");
initializeConnectorTestFramework();
Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
}
@After
public void after() throws SQLException {
if (connection != null) {
connection.close();
}
// TestHelper.dropTestDatabase();
}
@Test
public void takeSnapshotInExclusiveMode() throws Exception {
takeSnapshot(SnapshotLockingMode.EXCLUSIVE);
}
@Test
public void takeSnapshotInSnapshotMode() throws Exception {
takeSnapshot(SnapshotLockingMode.SNAPSHOT);
}
@Test
public void takeSnapshotInNoneMode() throws Exception {
takeSnapshot(SnapshotLockingMode.NONE);
}
private void takeSnapshot(SnapshotLockingMode lockingMode) throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SNAPSHOT_LOCKING_MODE.name(), lockingMode.getValue())
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
final SourceRecords records = consumeRecordsByTopic(INITIAL_RECORDS_PER_TABLE);
final List<SourceRecord> table1 = records.recordsForTopic("server1.dbo.table1");
assertThat(table1).hasSize(INITIAL_RECORDS_PER_TABLE);
for (int i = 0; i < INITIAL_RECORDS_PER_TABLE; i++) {
final SourceRecord record1 = table1.get(i);
final List<SchemaAndValueField> expectedKey1 = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, i)
);
final List<SchemaAndValueField> expectedRow1 = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, i),
new SchemaAndValueField("name", Schema.OPTIONAL_STRING_SCHEMA, "name" + i),
new SchemaAndValueField("price", Decimal.builder(2).parameter("connect.decimal.precision", "8").optional().build(), new BigDecimal(i + ".23")),
new SchemaAndValueField("ts", Timestamp.builder().optional().schema(), 1_531_920_536_000l)
);
final Struct key1 = (Struct)record1.key();
final Struct value1 = (Struct)record1.value();
assertRecord(key1, expectedKey1);
assertRecord((Struct)value1.get("after"), expectedRow1);
assertThat(record1.sourceOffset()).includes(
MapAssert.entry("snapshot", true),
MapAssert.entry("snapshot_completed", i == INITIAL_RECORDS_PER_TABLE - 1));
assertNull(value1.get("before"));
}
}
@Test
public void takeSnapshotAndStartStreaming() throws Exception {
final Configuration config = TestHelper.defaultConfig().build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
// Ignore initial records
consumeRecordsByTopic(INITIAL_RECORDS_PER_TABLE);
testStreaming();
}
private void testStreaming() throws SQLException, InterruptedException {
for (int i = 0; i < STREAMING_RECORDS_PER_TABLE; i++) {
final int id = i + INITIAL_RECORDS_PER_TABLE;
connection.execute(
String.format("INSERT INTO table1 VALUES(%s, '%s', %s, '%s')", id, "name" + id, new BigDecimal(id + ".23"), "2018-07-18 13:28:56")
);
}
final SourceRecords records = consumeRecordsByTopic(STREAMING_RECORDS_PER_TABLE);
final List<SourceRecord> table1 = records.recordsForTopic("server1.dbo.table1");
assertThat(table1).hasSize(INITIAL_RECORDS_PER_TABLE);
for (int i = 0; i < INITIAL_RECORDS_PER_TABLE; i++) {
final int id = i + INITIAL_RECORDS_PER_TABLE;
final SourceRecord record1 = table1.get(i);
final List<SchemaAndValueField> expectedKey1 = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, id)
);
final List<SchemaAndValueField> expectedRow1 = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, id),
new SchemaAndValueField("name", Schema.OPTIONAL_STRING_SCHEMA, "name" + id),
new SchemaAndValueField("price", Decimal.builder(2).parameter("connect.decimal.precision", "8").optional().build(), new BigDecimal(id + ".23")),
new SchemaAndValueField("ts", Timestamp.builder().optional().schema(), 1_531_920_536_000l)
);
final Struct key1 = (Struct)record1.key();
final Struct value1 = (Struct)record1.value();
assertRecord(key1, expectedKey1);
assertRecord((Struct)value1.get("after"), expectedRow1);
assertThat(record1.sourceOffset()).hasSize(1);
Assert.assertTrue(record1.sourceOffset().containsKey("change_lsn"));
assertNull(value1.get("before"));
}
}
@Test
public void takeSchemaOnlySnapshotAndStartStreaming() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
testStreaming();
}
@Test
@FixFor("DBZ-1031")
public void takeSnapshotFromTableWithReservedName() throws Exception {
connection.execute(
"CREATE TABLE [User] (id int, name varchar(30), primary key(id))"
);
for (int i = 0; i < INITIAL_RECORDS_PER_TABLE; i++) {
connection.execute(
String.format("INSERT INTO [User] VALUES(%s, '%s')", i, "name" + i)
);
}
TestHelper.enableTableCdc(connection, "User");
initializeConnectorTestFramework();
Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
final Configuration config = TestHelper.defaultConfig()
.with("table.whitelist", "dbo.User")
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
final SourceRecords records = consumeRecordsByTopic(INITIAL_RECORDS_PER_TABLE);
final List<SourceRecord> user = records.recordsForTopic("server1.dbo.User");
assertThat(user).hasSize(INITIAL_RECORDS_PER_TABLE);
for (int i = 0; i < INITIAL_RECORDS_PER_TABLE; i++) {
final SourceRecord record1 = user.get(i);
final List<SchemaAndValueField> expectedKey1 = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, i)
);
final List<SchemaAndValueField> expectedRow1 = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, i),
new SchemaAndValueField("name", Schema.OPTIONAL_STRING_SCHEMA, "name" + i)
);
final Struct key1 = (Struct)record1.key();
final Struct value1 = (Struct)record1.value();
assertRecord(key1, expectedKey1);
assertRecord((Struct)value1.get("after"), expectedRow1);
assertThat(record1.sourceOffset()).includes(
MapAssert.entry("snapshot", true),
MapAssert.entry("snapshot_completed", i == INITIAL_RECORDS_PER_TABLE - 1));
assertNull(value1.get("before"));
}
}
@Test
public void takeSchemaOnlySnapshotAndSendHeartbeat() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.with(Heartbeat.HEARTBEAT_INTERVAL, 300_000)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
final SourceRecord record = consumeRecord();
Assertions.assertThat(record.topic()).startsWith("__debezium-heartbeat");
}
@Test
@FixFor("DBZ-1067")
public void blacklistColumn() throws Exception {
connection.execute(
"CREATE TABLE blacklist_column_table_a (id int, name varchar(30), amount integer primary key(id))",
"CREATE TABLE blacklist_column_table_b (id int, name varchar(30), amount integer primary key(id))"
);
connection.execute("INSERT INTO blacklist_column_table_a VALUES(10, 'some_name', 120)");
connection.execute("INSERT INTO blacklist_column_table_b VALUES(11, 'some_name', 447)");
TestHelper.enableTableCdc(connection, "blacklist_column_table_a");
TestHelper.enableTableCdc(connection, "blacklist_column_table_b");
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.COLUMN_BLACKLIST, "dbo.blacklist_column_table_a.amount")
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.blacklist_column_table_a,dbo.blacklist_column_table_b")
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
final SourceRecords records = consumeRecordsByTopic(2);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.blacklist_column_table_a");
final List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.blacklist_column_table_b");
Schema expectedSchemaA = SchemaBuilder.struct()
.optional()
.name("server1.dbo.blacklist_column_table_a.Value")
.field("id", Schema.INT32_SCHEMA)
.field("name", Schema.OPTIONAL_STRING_SCHEMA)
.build();
Struct expectedValueA = new Struct(expectedSchemaA)
.put("id", 10)
.put("name", "some_name");
Schema expectedSchemaB = SchemaBuilder.struct()
.optional()
.name("server1.dbo.blacklist_column_table_b.Value")
.field("id", Schema.INT32_SCHEMA)
.field("name", Schema.OPTIONAL_STRING_SCHEMA)
.field("amount", Schema.OPTIONAL_INT32_SCHEMA)
.build();
Struct expectedValueB = new Struct(expectedSchemaB)
.put("id", 11)
.put("name", "some_name")
.put("amount", 447);
Assertions.assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldIsEqualTo(expectedValueA)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA);
Assertions.assertThat(tableB).hasSize(1);
SourceRecordAssert.assertThat(tableB.get(0))
.valueAfterFieldIsEqualTo(expectedValueB)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaB);
stopConnector();
}
private void assertRecord(Struct record, List<SchemaAndValueField> expected) {
expected.forEach(schemaAndValueField -> schemaAndValueField.assertFor(record));
}
}

View File

@ -1,32 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import org.junit.Before;
import org.junit.Test;
import static org.fest.assertions.Assertions.assertThat;
public class SourceInfoTest {
private SourceInfo source;
@Before
public void beforeEach() {
source = new SourceInfo("serverX");
source.setChangeLsn(Lsn.NULL);
}
@Test
public void versionIsPresent() {
assertThat(source.struct().getString(SourceInfo.DEBEZIUM_VERSION_KEY)).isEqualTo(Module.version());
}
@Test
public void connectorIsPresent() {
assertThat(source.struct().getString(SourceInfo.DEBEZIUM_CONNECTOR_KEY)).isEqualTo(Module.name());
}
}

View File

@ -1,668 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.sql.SQLException;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct;
import org.fest.assertions.Assertions;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import io.debezium.config.Configuration;
import io.debezium.connector.sqlserver.SqlServerConnectorConfig.SnapshotMode;
import io.debezium.connector.sqlserver.util.TestHelper;
import io.debezium.embedded.AbstractConnectorTest;
import io.debezium.util.Testing;
/**
* Integration test for the Debezium SQL Server connector.
*
* @author Jiri Pechanec
*/
public class SqlServerChangeTableSetIT extends AbstractConnectorTest {
private SqlServerConnection connection;
@Before
public void before() throws SQLException {
TestHelper.createTestDatabase();
connection = TestHelper.testConnection();
connection.execute(
"CREATE TABLE tablea (id int primary key, cola varchar(30))",
"CREATE TABLE tableb (id int primary key, colb varchar(30))",
"CREATE TABLE tablec (id int primary key, colc varchar(30))"
);
TestHelper.enableTableCdc(connection, "tablea");
TestHelper.enableTableCdc(connection, "tableb");
initializeConnectorTestFramework();
Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
}
@After
public void after() throws SQLException {
if (connection != null) {
connection.close();
}
}
@Test
public void addTable() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 2;
final int ID_START = 10;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
// Enable CDC for already existing table
TestHelper.enableTableCdc(connection, "tablec");
// CDC for newly added table
connection.execute(
"CREATE TABLE tabled (id int primary key, cold varchar(30))"
);
TestHelper.enableTableCdc(connection, "tabled");
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i;
connection.execute(
"INSERT INTO tablec VALUES(" + id + ", 'c')"
);
connection.execute(
"INSERT INTO tabled VALUES(" + id + ", 'd')"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablec")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tabled")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tablec").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tablec.Value")
.field("id", Schema.INT32_SCHEMA)
.field("colc", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
});
records.recordsForTopic("server1.dbo.tabled").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tabled.Value")
.field("id", Schema.INT32_SCHEMA)
.field("cold", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
});
}
@Test
public void removeTable() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 2;
final int ID_START_1 = 10;
final int ID_START_2 = 100;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_1 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
// Disable CDC for a table
TestHelper.disableTableCdc(connection, "tableb");
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_2 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a2')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b2')"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).isNullOrEmpty();
}
@Test
public void addColumnToTableEndOfBatch() throws Exception {
addColumnToTable(true);
}
@Test
public void addColumnToTableMiddleOfBatch() throws Exception {
addColumnToTable(false);
}
private void addColumnToTable(boolean pauseAfterCaptureChange) throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 2;
final int ID_START_1 = 10;
final int ID_START_2 = 100;
final int ID_START_3 = 1000;
final int ID_START_4 = 10000;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_1 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("colb", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
});
// Enable a second capture instance
connection.execute("ALTER TABLE dbo.tableb ADD newcol INT NOT NULL DEFAULT 0");
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_2 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a2')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b2', 2)"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("colb", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
});
TestHelper.enableTableCdc(connection, "tableb", "after_change");
if (pauseAfterCaptureChange) {
Thread.sleep(5_000);
}
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_3 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a3')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b3', 3)"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("colb", Schema.OPTIONAL_STRING_SCHEMA)
.field("newcol", Schema.INT32_SCHEMA)
.build()
);
});
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_4 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a4')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b4', 4)"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("colb", Schema.OPTIONAL_STRING_SCHEMA)
.field("newcol", Schema.INT32_SCHEMA)
.build()
);
});
}
@Test
public void removeColumnFromTable() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 2;
final int ID_START_1 = 10;
final int ID_START_2 = 100;
final int ID_START_3 = 1000;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_1 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("colb", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
});
// Enable a second capture instance
connection.execute("ALTER TABLE dbo.tableb DROP COLUMN colb");
TestHelper.enableTableCdc(connection, "tableb", "after_change");
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_2 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a2')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ")"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.build()
);
});
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_3 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a3')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ")"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.build()
);
});
}
@Test
public void readHistoryAfterRestart() throws Exception {
final int RECORDS_PER_TABLE = 1;
final int TABLES = 2;
final int ID_START_1 = 10;
final int ID_START_2 = 100;
final int ID_START_3 = 1000;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_1 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
// Enable a second capture instance
connection.execute("ALTER TABLE dbo.tableb DROP COLUMN colb");
TestHelper.enableTableCdc(connection, "tableb", "after_change");
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_2 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a2')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ")"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
stopConnector();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_3 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a3')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ")"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.build()
);
});
}
@Test
public void renameColumn() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 2;
final int ID_START_1 = 10;
final int ID_START_2 = 100;
final int ID_START_3 = 1000;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_1 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("colb", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
});
// CDC must be disabled, otherwise rename fails
TestHelper.disableTableCdc(connection, "tableb");
// Enable a second capture instance
connection.execute("exec sp_rename 'tableb.colb', 'newcolb';");
TestHelper.enableTableCdc(connection, "tableb", "after_change");
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_2 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a2')"
);
connection.execute(
"INSERT INTO tableb(id,newcolb) VALUES(" + id + ", 'b2')"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("newcolb", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
});
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_3 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a3')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b3')"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("newcolb", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
});
}
@Test
public void changeColumn() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 2;
final int ID_START_1 = 10;
final int ID_START_2 = 100;
final int ID_START_3 = 1000;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_1 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", '" + id + "')"
);
}
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("colb", Schema.OPTIONAL_STRING_SCHEMA)
.build()
);
final Struct value = ((Struct)record.value()).getStruct("after");
final int id = value.getInt32("id");
final String colb = value.getString("colb");
Assertions.assertThat(Integer.toString(id)).isEqualTo(colb);
});
// Enable a second capture instance
connection.execute("ALTER TABLE dbo.tableb ALTER COLUMN colb INT");
TestHelper.enableTableCdc(connection, "tableb", "after_change");
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_2 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a2')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", '" + id + " ')"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("colb", Schema.OPTIONAL_INT32_SCHEMA)
.build()
);
final Struct value = ((Struct)record.value()).getStruct("after");
final int id = value.getInt32("id");
final int colb = value.getInt32("colb");
Assertions.assertThat(id).isEqualTo(colb);
});
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_3 + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a3')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", '" + id + " ')"
);
}
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct(
(Struct)((Struct)record.value()).get("after"),
SchemaBuilder.struct()
.optional()
.name("server1.dbo.tableb.Value")
.field("id", Schema.INT32_SCHEMA)
.field("colb", Schema.OPTIONAL_INT32_SCHEMA)
.build()
);
final Struct value = ((Struct)record.value()).getStruct("after");
final int id = value.getInt32("id");
final int colb = value.getInt32("colb");
Assertions.assertThat(id).isEqualTo(colb);
});
}
}

View File

@ -1,84 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.math.BigInteger;
import java.sql.SQLException;
import org.junit.Before;
import org.junit.Test;
import io.debezium.connector.sqlserver.util.TestHelper;
import io.debezium.util.Testing;
/**
* Integration test for {@link SqlServerConnection}
*
* @author Horia Chiorean (hchiorea@redhat.com)
*/
public class SqlServerConnectionIT {
@Before
public void before() throws SQLException {
TestHelper.dropTestDatabase();
}
@Test
public void shouldEnableCdcForDatabase() throws Exception {
try (SqlServerConnection connection = TestHelper.adminConnection()) {
connection.connect();
connection.execute("CREATE DATABASE testDB");
connection.execute("USE testDB");
// NOTE: you cannot enable CDC on master
TestHelper.enableDbCdc(connection, "testDB");
}
}
@Test
public void shouldEnableCdcWithWrapperFunctionsForTable() throws Exception {
try (SqlServerConnection connection = TestHelper.adminConnection()) {
connection.connect();
connection.execute("CREATE DATABASE testDB");
connection.execute("USE testDB");
// NOTE: you cannot enable CDC on master
TestHelper.enableDbCdc(connection, "testDB");
// create table if exists
String sql = "IF EXISTS (select 1 from sys.objects where name = 'testTable' and type = 'u')\n"
+ "DROP TABLE testTable\n"
+ "CREATE TABLE testTable (ID int not null identity(1, 1) primary key, NUMBER int, TEXT text)";
connection.execute(sql);
// then enable CDC and wrapper functions
TestHelper.enableTableCdc(connection, "testTable");
// insert some data
connection.execute("INSERT INTO testTable (NUMBER, TEXT) values (1, 'aaa')\n"
+ "INSERT INTO testTable (NUMBER, TEXT) values (2, 'bbb')");
// and issue a test call to a CDC wrapper function
Thread.sleep(5_000); // Need to wait to make sure the min_lsn is available
Testing.Print.enable();
connection.query(
"select * from cdc.fn_cdc_get_all_changes_dbo_testTable(sys.fn_cdc_get_min_lsn('dbo_testTable'), sys.fn_cdc_get_max_lsn(), N'all')",
rs -> {
while (rs.next()) {
final BigInteger lsn = new BigInteger(rs.getBytes(1));
final StringBuilder sb = new StringBuilder(lsn.toString());
for (int col = 1; col <= rs.getMetaData().getColumnCount(); col++) {
sb.append(rs.getObject(col)).append(' ');
}
Testing.print(sb.toString());
}
});
Testing.Print.disable();
}
}
}

View File

@ -1,508 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import static org.fest.assertions.Assertions.assertThat;
import static org.junit.Assert.assertNull;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.fest.assertions.Assertions;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import io.debezium.config.Configuration;
import io.debezium.connector.sqlserver.SqlServerConnectorConfig.SnapshotMode;
import io.debezium.connector.sqlserver.util.TestHelper;
import io.debezium.data.SchemaAndValueField;
import io.debezium.data.SourceRecordAssert;
import io.debezium.doc.FixFor;
import io.debezium.embedded.AbstractConnectorTest;
import io.debezium.util.Testing;
/**
* Integration test for the Debezium SQL Server connector.
*
* @author Jiri Pechanec
*/
public class SqlServerConnectorIT extends AbstractConnectorTest {
private SqlServerConnection connection;
@Before
public void before() throws SQLException {
TestHelper.createTestDatabase();
connection = TestHelper.testConnection();
connection.execute(
"CREATE TABLE tablea (id int primary key, cola varchar(30))",
"CREATE TABLE tableb (id int primary key, colb varchar(30))",
"INSERT INTO tablea VALUES(1, 'a')"
);
TestHelper.enableTableCdc(connection, "tablea");
TestHelper.enableTableCdc(connection, "tableb");
initializeConnectorTestFramework();
Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
}
@After
public void after() throws SQLException {
if (connection != null) {
connection.close();
}
}
@Test
public void createAndDelete() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 2;
final int ID_START = 10;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordA = tableA.get(i);
final SourceRecord recordB = tableB.get(i);
final List<SchemaAndValueField> expectedRowA = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, i + ID_START),
new SchemaAndValueField("cola", Schema.OPTIONAL_STRING_SCHEMA, "a"));
final List<SchemaAndValueField> expectedRowB = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, i + ID_START),
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b"));
final Struct keyA = (Struct)recordA.key();
final Struct valueA = (Struct)recordA.value();
assertRecord((Struct)valueA.get("after"), expectedRowA);
assertNull(valueA.get("before"));
final Struct keyB = (Struct)recordB.key();
final Struct valueB = (Struct)recordB.value();
assertRecord((Struct)valueB.get("after"), expectedRowB);
assertNull(valueB.get("before"));
}
connection.execute("DELETE FROM tableB");
final SourceRecords deleteRecords = consumeRecordsByTopic(2 * RECORDS_PER_TABLE);
final List<SourceRecord> deleteTableA = deleteRecords.recordsForTopic("server1.dbo.tablea");
final List<SourceRecord> deleteTableB = deleteRecords.recordsForTopic("server1.dbo.tableb");
Assertions.assertThat(deleteTableA).isNullOrEmpty();
Assertions.assertThat(deleteTableB).hasSize(2 * RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord deleteRecord = deleteTableB.get(i * 2);
final SourceRecord tombstoneRecord = deleteTableB.get(i * 2 + 1);
final List<SchemaAndValueField> expectedDeleteRow = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, i + ID_START),
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b"));
final Struct deleteKey = (Struct)deleteRecord.key();
final Struct deleteValue = (Struct)deleteRecord.value();
assertRecord((Struct)deleteValue.get("before"), expectedDeleteRow);
assertNull(deleteValue.get("after"));
final Struct tombstoneKey = (Struct)tombstoneRecord.key();
final Struct tombstoneValue = (Struct)tombstoneRecord.value();
assertNull(tombstoneValue);
}
stopConnector();
}
@Test
public void update() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int ID_START = 10;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
connection.setAutoCommit(false);
final String[] tableBInserts = new String[RECORDS_PER_TABLE];
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i;
tableBInserts[i] = "INSERT INTO tableb VALUES(" + id + ", 'b')";
}
connection.execute(tableBInserts);
connection.setAutoCommit(true);
connection.execute("UPDATE tableb SET colb='z'");
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
final List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.tableb");
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE * 2);
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordB = tableB.get(i);
final List<SchemaAndValueField> expectedRowB = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, i + ID_START),
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b"));
final Struct keyB = (Struct)recordB.key();
final Struct valueB = (Struct)recordB.value();
assertRecord((Struct)valueB.get("after"), expectedRowB);
assertNull(valueB.get("before"));
}
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordB = tableB.get(i + RECORDS_PER_TABLE);
final List<SchemaAndValueField> expectedBefore = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, i + ID_START),
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b"));
final List<SchemaAndValueField> expectedAfter = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, i + ID_START),
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "z"));
final Struct keyB = (Struct)recordB.key();
final Struct valueB = (Struct)recordB.value();
assertRecord((Struct)valueB.get("before"), expectedBefore);
assertRecord((Struct)valueB.get("after"), expectedAfter);
}
stopConnector();
}
@Test
public void streamChangesWhileStopped() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 2;
final int ID_START = 10;
final int ID_RESTART = 100;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
stopConnector();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_RESTART + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.tablea");
List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = i + ID_RESTART;
final SourceRecord recordA = tableA.get(i);
final SourceRecord recordB = tableB.get(i);
final List<SchemaAndValueField> expectedRowA = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, id),
new SchemaAndValueField("cola", Schema.OPTIONAL_STRING_SCHEMA, "a"));
final List<SchemaAndValueField> expectedRowB = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, id),
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b"));
final Struct valueA = (Struct)recordA.value();
assertRecord((Struct)valueA.get("after"), expectedRowA);
assertNull(valueA.get("before"));
final Struct valueB = (Struct)recordB.value();
assertRecord((Struct)valueB.get("after"), expectedRowB);
assertNull(valueB.get("before"));
}
}
@Test
@FixFor("DBZ-1069")
public void verifyOffsets() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 2;
final int ID_START = 10;
final int ID_RESTART = 100;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.build();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
for (int i = 0; !connection.getMaxLsn().isAvailable(); i++) {
if (i == 30) {
org.junit.Assert.fail("Initial changes not writtent to CDC structures");
}
Testing.debug("Waiting for initial changes to be propagated to CDC structures");
Thread.sleep(1000);
}
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
List<SourceRecord> records = consumeRecordsByTopic(1 + RECORDS_PER_TABLE * TABLES).allRecordsInOrder();
records = records.subList(1, records.size());
for (Iterator<SourceRecord> it = records.iterator(); it.hasNext();) {
SourceRecord record = it.next();
assertThat(record.sourceOffset().get("snapshot")).as("Snapshot phase").isEqualTo(true);
if (it.hasNext()) {
assertThat(record.sourceOffset().get("snapshot_completed")).as("Snapshot in progress").isEqualTo(false);
}
else {
assertThat(record.sourceOffset().get("snapshot_completed")).as("Snapshot completed").isEqualTo(true);
}
}
stopConnector();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_RESTART + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
final SourceRecords sourceRecords = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = sourceRecords.recordsForTopic("server1.dbo.tablea");
final List<SourceRecord> tableB = sourceRecords.recordsForTopic("server1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = i + ID_RESTART;
final SourceRecord recordA = tableA.get(i);
final SourceRecord recordB = tableB.get(i);
final List<SchemaAndValueField> expectedRowA = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, id),
new SchemaAndValueField("cola", Schema.OPTIONAL_STRING_SCHEMA, "a"));
final List<SchemaAndValueField> expectedRowB = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, id),
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b"));
final Struct valueA = (Struct)recordA.value();
assertRecord((Struct)valueA.get("after"), expectedRowA);
assertNull(valueA.get("before"));
final Struct valueB = (Struct)recordB.value();
assertRecord((Struct)valueB.get("after"), expectedRowB);
assertNull(valueB.get("before"));
assertThat(recordA.sourceOffset().get("snapshot")).as("Streaming phase").isNull();
assertThat(recordA.sourceOffset().get("snapshot_completed")).as("Streaming phase").isNull();
assertThat(recordA.sourceOffset().get("change_lsn")).as("LSN present").isNotNull();
assertThat(recordB.sourceOffset().get("snapshot")).as("Streaming phase").isNull();
assertThat(recordB.sourceOffset().get("snapshot_completed")).as("Streaming phase").isNull();
assertThat(recordB.sourceOffset().get("change_lsn")).as("LSN present").isNotNull();
}
}
@Test
public void whitelistTable() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 1;
final int ID_START = 10;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.tableb")
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.tableb");
Assertions.assertThat(tableA == null || tableA.isEmpty()).isTrue();
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE);
stopConnector();
}
@Test
public void blacklistTable() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 1;
final int ID_START = 10;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.with(SqlServerConnectorConfig.TABLE_BLACKLIST, "dbo.tablea")
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')"
);
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')"
);
}
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.tableb");
Assertions.assertThat(tableA == null || tableA.isEmpty()).isTrue();
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE);
stopConnector();
}
@Test
@FixFor("DBZ-1067")
public void blacklistColumn() throws Exception {
connection.execute(
"CREATE TABLE blacklist_column_table_a (id int, name varchar(30), amount integer primary key(id))",
"CREATE TABLE blacklist_column_table_b (id int, name varchar(30), amount integer primary key(id))"
);
TestHelper.enableTableCdc(connection, "blacklist_column_table_a");
TestHelper.enableTableCdc(connection, "blacklist_column_table_b");
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.with(SqlServerConnectorConfig.COLUMN_BLACKLIST, "dbo.blacklist_column_table_a.amount")
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
connection.execute("INSERT INTO blacklist_column_table_a VALUES(10, 'some_name', 120)");
connection.execute("INSERT INTO blacklist_column_table_b VALUES(11, 'some_name', 447)");
final SourceRecords records = consumeRecordsByTopic(2);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.blacklist_column_table_a");
final List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.blacklist_column_table_b");
Schema expectedSchemaA = SchemaBuilder.struct()
.optional()
.name("server1.dbo.blacklist_column_table_a.Value")
.field("id", Schema.INT32_SCHEMA)
.field("name", Schema.OPTIONAL_STRING_SCHEMA)
.build();
Struct expectedValueA = new Struct(expectedSchemaA)
.put("id", 10)
.put("name", "some_name");
Schema expectedSchemaB = SchemaBuilder.struct()
.optional()
.name("server1.dbo.blacklist_column_table_b.Value")
.field("id", Schema.INT32_SCHEMA)
.field("name", Schema.OPTIONAL_STRING_SCHEMA)
.field("amount", Schema.OPTIONAL_INT32_SCHEMA)
.build();
Struct expectedValueB = new Struct(expectedSchemaB)
.put("id", 11)
.put("name", "some_name")
.put("amount", 447);
Assertions.assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldIsEqualTo(expectedValueA)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA);
Assertions.assertThat(tableB).hasSize(1);
SourceRecordAssert.assertThat(tableB.get(0))
.valueAfterFieldIsEqualTo(expectedValueB)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaB);
stopConnector();
}
/**
* Passing the "applicationName" property which can be asserted from the connected sessions".
*/
@Test
@FixFor("DBZ-964")
public void shouldPropagateDatabaseDriverProperties() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_SCHEMA_ONLY)
.with("database.applicationName", "Debezium App DBZ-964")
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
// consuming one record to make sure the connector establishes the DB connection which happens asynchronously
// after the start() call
connection.execute("INSERT INTO tablea VALUES(964, 'a')");
consumeRecordsByTopic(1);
connection.query("select count(1) from sys.dm_exec_sessions where program_name = 'Debezium App DBZ-964'", rs -> {
rs.next();
assertThat(rs.getInt(1)).isEqualTo(1);
});
}
private void assertRecord(Struct record, List<SchemaAndValueField> expected) {
expected.forEach(schemaAndValueField -> schemaAndValueField.assertFor(record));
}
}

View File

@ -1,207 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver.util;
import java.nio.file.Path;
import java.sql.SQLException;
import java.util.Objects;
import io.debezium.config.Configuration;
import io.debezium.connector.sqlserver.SqlServerConnection;
import io.debezium.connector.sqlserver.SqlServerConnectorConfig;
import io.debezium.jdbc.JdbcConfiguration;
import io.debezium.relational.history.FileDatabaseHistory;
import io.debezium.util.IoUtil;
import io.debezium.util.Testing;
/**
* @author Horia Chiorean (hchiorea@redhat.com)
*/
public class TestHelper {
public static final Path DB_HISTORY_PATH = Testing.Files.createTestingPath("file-db-history-connect.txt").toAbsolutePath();
public static final String TEST_DATABASE = "testdb";
private static final String STATEMENTS_PLACEHOLDER = "#";
private static final String ENABLE_DB_CDC = "IF EXISTS(select 1 from sys.databases where name='#' AND is_cdc_enabled=0)\n"
+ "EXEC sys.sp_cdc_enable_db";
private static final String DISABLE_DB_CDC = "IF EXISTS(select 1 from sys.databases where name='#' AND is_cdc_enabled=1)\n"
+ "EXEC sys.sp_cdc_disable_db";
private static final String ENABLE_TABLE_CDC = "IF EXISTS(select 1 from sys.tables where name = '#' AND is_tracked_by_cdc=0)\n"
+ "EXEC sys.sp_cdc_enable_table @source_schema = N'dbo', @source_name = N'#', @role_name = NULL, @supports_net_changes = 0";
private static final String ENABLE_TABLE_CDC_WITH_CUSTOM_CAPTURE = "EXEC sys.sp_cdc_enable_table @source_schema = N'dbo', @source_name = N'%s', @capture_instance = N'%s', @role_name = NULL, @supports_net_changes = 0";
private static final String DISABLE_TABLE_CDC = "EXEC sys.sp_cdc_disable_table @source_schema = N'dbo', @source_name = N'#', @capture_instance = 'all'";
private static final String CDC_WRAPPERS_DML;
static {
try {
ClassLoader classLoader = TestHelper.class.getClassLoader();
CDC_WRAPPERS_DML = IoUtil.read(classLoader.getResourceAsStream("generate_cdc_wrappers.sql"));
}
catch (Exception e) {
throw new RuntimeException("Cannot load SQL Server statements", e);
}
}
public static JdbcConfiguration adminJdbcConfig() {
return JdbcConfiguration.copy(Configuration.fromSystemProperties("database."))
.withDefault(JdbcConfiguration.DATABASE, "master")
.withDefault(JdbcConfiguration.HOSTNAME, "localhost")
.withDefault(JdbcConfiguration.PORT, 1433)
.withDefault(JdbcConfiguration.USER, "sa")
.withDefault(JdbcConfiguration.PASSWORD, "Password!")
.build();
}
public static JdbcConfiguration defaultJdbcConfig() {
return JdbcConfiguration.copy(Configuration.fromSystemProperties("database."))
.withDefault(JdbcConfiguration.DATABASE, TEST_DATABASE)
.withDefault(JdbcConfiguration.HOSTNAME, "localhost")
.withDefault(JdbcConfiguration.PORT, 1433)
.withDefault(JdbcConfiguration.USER, "sa")
.withDefault(JdbcConfiguration.PASSWORD, "Password!")
.build();
}
/**
* Returns a default configuration suitable for most test cases. Can be amended/overridden in individual tests as
* needed.
*/
public static Configuration.Builder defaultConfig() {
JdbcConfiguration jdbcConfiguration = defaultJdbcConfig();
Configuration.Builder builder = Configuration.create();
jdbcConfiguration.forEach(
(field, value) -> builder.with(SqlServerConnectorConfig.DATABASE_CONFIG_PREFIX + field, value)
);
return builder.with(SqlServerConnectorConfig.LOGICAL_NAME, "server1")
.with(SqlServerConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH);
}
public static void createTestDatabase() {
// NOTE: you cannot enable CDC for the "master" db (the default one) so
// all tests must use a separate database...
try (SqlServerConnection connection = adminConnection()) {
connection.connect();
try {
connection.execute("USE testDB");
disableDbCdc(connection, "testDB");
}
catch (SQLException e) {
}
connection.execute("USE master");
String sql = "IF EXISTS(select 1 from sys.databases where name='testDB') DROP DATABASE testDB\n"
+ "CREATE DATABASE testDB\n";
connection.execute(sql);
connection.execute("USE testDB");
connection.execute("ALTER DATABASE testDB SET ALLOW_SNAPSHOT_ISOLATION ON");
// NOTE: you cannot enable CDC on master
enableDbCdc(connection, "testDB");
}
catch (SQLException e) {
throw new IllegalStateException("Error while initiating test database", e);
}
}
public static void dropTestDatabase() {
try (SqlServerConnection connection = adminConnection()) {
connection.connect();
try {
connection.execute("USE testDB");
disableDbCdc(connection, "testDB");
}
catch (SQLException e) {
}
connection.execute("USE master");
String sql = "IF EXISTS(select 1 from sys.databases where name='testDB') DROP DATABASE testDB";
connection.execute(sql);
}
catch (SQLException e) {
throw new IllegalStateException("Error while dropping test database", e);
}
}
public static SqlServerConnection adminConnection() {
return new SqlServerConnection(TestHelper.adminJdbcConfig());
}
public static SqlServerConnection testConnection() {
return new SqlServerConnection(TestHelper.defaultJdbcConfig());
}
/**
* Enables CDC for a given database, if not already enabled.
*
* @param name
* the name of the DB, may not be {@code null}
* @throws SQLException
* if anything unexpected fails
*/
public static void enableDbCdc(SqlServerConnection connection, String name) throws SQLException {
Objects.requireNonNull(name);
connection.execute(ENABLE_DB_CDC.replace(STATEMENTS_PLACEHOLDER, name));
}
/**
* Disables CDC for a given database, if not already disabled.
*
* @param name
* the name of the DB, may not be {@code null}
* @throws SQLException
* if anything unexpected fails
*/
protected static void disableDbCdc(SqlServerConnection connection, String name) throws SQLException {
Objects.requireNonNull(name);
connection.execute(DISABLE_DB_CDC.replace(STATEMENTS_PLACEHOLDER, name));
}
/**
* Enables CDC for a table if not already enabled and generates the wrapper
* functions for that table.
*
* @param name
* the name of the table, may not be {@code null}
* @throws SQLException if anything unexpected fails
*/
public static void enableTableCdc(SqlServerConnection connection, String name) throws SQLException {
Objects.requireNonNull(name);
String enableCdcForTableStmt = ENABLE_TABLE_CDC.replace(STATEMENTS_PLACEHOLDER, name);
String generateWrapperFunctionsStmts = CDC_WRAPPERS_DML.replaceAll(STATEMENTS_PLACEHOLDER, name);
connection.execute(enableCdcForTableStmt, generateWrapperFunctionsStmts);
}
/**
* Enables CDC for a table with a custom capture name
* functions for that table.
*
* @param name
* the name of the table, may not be {@code null}
* @throws SQLException if anything unexpected fails
*/
public static void enableTableCdc(SqlServerConnection connection, String tableName, String captureName) throws SQLException {
Objects.requireNonNull(tableName);
Objects.requireNonNull(captureName);
String enableCdcForTableStmt = String.format(ENABLE_TABLE_CDC_WITH_CUSTOM_CAPTURE, tableName, captureName);
connection.execute(enableCdcForTableStmt);
}
/**
* Disables CDC for a table for which it was enabled before.
*
* @param name
* the name of the table, may not be {@code null}
* @throws SQLException if anything unexpected fails
*/
public static void disableTableCdc(SqlServerConnection connection, String name) throws SQLException {
Objects.requireNonNull(name);
String disableCdcForTableStmt = DISABLE_TABLE_CDC.replace(STATEMENTS_PLACEHOLDER, name);
connection.execute(disableCdcForTableStmt);
}
}

View File

@ -1,33 +0,0 @@
IF EXISTS(select 1 from sys.tables where name = '#' AND is_tracked_by_cdc=1)
BEGIN
RETURN
END
ELSE
BEGIN
DECLARE @wrapper_functions TABLE(
function_name SYSNAME,
create_script NVARCHAR(MAX))
INSERT INTO @wrapper_functions
EXEC sys.sp_cdc_generate_wrapper_function
DECLARE @create_script NVARCHAR(MAX)
DECLARE #hfunctions CURSOR LOCAL FAST_FORWARD
FOR
SELECT create_script
FROM @wrapper_functions
OPEN #hfunctions
FETCH #hfunctions
INTO @create_script
WHILE (@@fetch_status <> -1)
BEGIN
EXEC sp_executesql @create_script
FETCH #hfunctions
INTO @create_script
END
CLOSE #hfunctions
DEALLOCATE #hfunctions
END

View File

@ -1,15 +0,0 @@
# Direct log messages to stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Target=System.out
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n
# Root logger option
log4j.rootLogger=INFO, stdout
# Set up the default logging to be INFO level, then override specific units
log4j.logger.io.debezium=INFO
log4j.logger.io.debezium.embedded.EmbeddedEngine$EmbeddedConfig=WARN
#log4j.logger.io.debezium.embedded.EmbeddedEngine=DEBUG
log4j.logger.io.debezium.core=DEBUG
log4j.logger.io.debezium.connector.sqlserver=TRACE

View File

@ -24,7 +24,6 @@
<properties>
<!-- Databases -->
<version.oracle.driver>12.1.0.2</version.oracle.driver>
<version.sqlserver.driver>6.4.0.jre8</version.sqlserver.driver>
<!-- Debezium parent -->
<version.debezium>${project.version}</version.debezium>
@ -32,13 +31,6 @@
<dependencyManagement>
<dependencies>
<!--SQL Server -->
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${version.sqlserver.driver}</version>
</dependency>
<!-- Debezium artifacts -->
<dependency>
<groupId>io.debezium</groupId>
@ -80,7 +72,6 @@
</dependencyManagement>
<modules>
<module>debezium-connector-sqlserver</module>
</modules>
<build>