DBZ-2171 avoid divisive language

This commit is contained in:
rkerner 2020-08-11 17:19:44 +02:00 committed by Jiri Pechanec
parent 4c3cf9f93e
commit 2c5c093587
112 changed files with 2883 additions and 602 deletions

View File

@ -93,7 +93,7 @@ public FieldSelectorBuilder renameFields(String fullyQualifiedFieldReplacements)
/**
* Builds the filter selector that returns the field filter for a given collection identifier, using the comma-separated
* list of fully-qualified field names (for details, see {@link MongoDbConnectorConfig#FIELD_BLACKLIST}) defining
* list of fully-qualified field names (for details, see {@link MongoDbConnectorConfig#FIELD_EXCLUDE_LIST}) defining
* which fields (if any) should be excluded, and using the comma-separated list of fully-qualified field replacements
* (for details, see {@link MongoDbConnectorConfig#FIELD_RENAMES}) defining which fields (if any) should be
* renamed.

View File

@ -32,26 +32,26 @@ public final class Filters {
* @param config the configuration; may not be null
*/
public Filters(Configuration config) {
String dbWhitelist = config.getString(MongoDbConnectorConfig.DATABASE_WHITELIST);
String dbBlacklist = config.getString(MongoDbConnectorConfig.DATABASE_BLACKLIST);
if (dbWhitelist != null && !dbWhitelist.trim().isEmpty()) {
databaseFilter = Predicates.includes(dbWhitelist);
String dbIncludeList = config.getFallbackStringProperty(MongoDbConnectorConfig.DATABASE_INCLUDE_LIST, MongoDbConnectorConfig.DATABASE_WHITELIST);
String dbExcludeList = config.getFallbackStringProperty(MongoDbConnectorConfig.DATABASE_EXCLUDE_LIST, MongoDbConnectorConfig.DATABASE_BLACKLIST);
if (dbIncludeList != null && !dbIncludeList.trim().isEmpty()) {
databaseFilter = Predicates.includes(dbIncludeList);
}
else if (dbBlacklist != null && !dbBlacklist.trim().isEmpty()) {
databaseFilter = Predicates.excludes(dbBlacklist);
else if (dbExcludeList != null && !dbExcludeList.trim().isEmpty()) {
databaseFilter = Predicates.excludes(dbExcludeList);
}
else {
databaseFilter = (db) -> true;
}
String collectionWhitelist = config.getString(MongoDbConnectorConfig.COLLECTION_WHITELIST);
String collectionBlacklist = config.getString(MongoDbConnectorConfig.COLLECTION_BLACKLIST);
String collectionIncludeList = config.getFallbackStringProperty(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, MongoDbConnectorConfig.COLLECTION_WHITELIST);
String collectionExcludeList = config.getFallbackStringProperty(MongoDbConnectorConfig.COLLECTION_EXCLUDE_LIST, MongoDbConnectorConfig.COLLECTION_BLACKLIST);
final Predicate<CollectionId> collectionFilter;
if (collectionWhitelist != null && !collectionWhitelist.trim().isEmpty()) {
collectionFilter = Predicates.includes(collectionWhitelist, CollectionId::namespace);
if (collectionIncludeList != null && !collectionIncludeList.trim().isEmpty()) {
collectionFilter = Predicates.includes(collectionIncludeList, CollectionId::namespace);
}
else if (collectionBlacklist != null && !collectionBlacklist.trim().isEmpty()) {
collectionFilter = Predicates.excludes(collectionBlacklist, CollectionId::namespace);
else if (collectionExcludeList != null && !collectionExcludeList.trim().isEmpty()) {
collectionFilter = Predicates.excludes(collectionExcludeList, CollectionId::namespace);
}
else {
collectionFilter = (id) -> true;
@ -61,7 +61,7 @@ else if (collectionBlacklist != null && !collectionBlacklist.trim().isEmpty()) {
// Define the field selector that provides the field filter to exclude or rename fields in a document ...
fieldSelector = FieldSelector.builder()
.excludeFields(config.getString(MongoDbConnectorConfig.FIELD_BLACKLIST))
.excludeFields(config.getFallbackStringProperty(MongoDbConnectorConfig.FIELD_EXCLUDE_LIST, MongoDbConnectorConfig.FIELD_BLACKLIST))
.renameFields(config.getString(MongoDbConnectorConfig.FIELD_RENAMES))
.build();
}

View File

@ -26,6 +26,9 @@
*/
public class MongoDbConnectorConfig extends CommonConnectorConfig {
protected static final String COLLECTION_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG = "\"collection.include.list\" or \"collection.whitelist\" is already specified";
protected static final String DATABASE_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG = "\"database.include.list\" or \"database.whitelist\" is already specified";
/**
* The set of predefined SnapshotMode options or aliases.
*/
@ -236,48 +239,94 @@ public static SnapshotMode parse(String value, String defaultValue) {
/**
* A comma-separated list of regular expressions that match the databases to be monitored.
* May not be used with {@link #DATABASE_BLACKLIST}.
* Must not be used with {@link #DATABASE_EXCLUDE_LIST}.
*/
public static final Field DATABASE_WHITELIST = Field.create("database.whitelist")
.withDisplayName("DB Whitelist")
public static final Field DATABASE_INCLUDE_LIST = Field.create("database.include.list")
.withDisplayName("Include Databases")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.HIGH)
.withValidation(Field::isListOfRegex,
MongoDbConnectorConfig::validateDatabaseBlacklist)
.withValidation(Field::isListOfRegex, MongoDbConnectorConfig::validateDatabaseExcludeList)
.withDescription("The databases for which changes are to be captured");
/**
* A comma-separated list of regular expressions that match the databases to be excluded.
* May not be used with {@link #DATABASE_WHITELIST}.
* Old, backwards-compatible "whitelist" property.
*/
public static final Field DATABASE_BLACKLIST = Field.create("database.blacklist")
.withDisplayName("DB Blacklist")
@Deprecated
public static final Field DATABASE_WHITELIST = Field.create("database.whitelist")
.withDisplayName("Deprecated: Include Databases")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withValidation(Field::isListOfRegex, MongoDbConnectorConfig::validateDatabaseExcludeList)
.withInvisibleRecommender()
.withDescription("The databases for which changes are to be captured (deprecated, use \"" + DATABASE_INCLUDE_LIST.name() + "\" instead)");
/**
* A comma-separated list of regular expressions that match the databases to be excluded.
* Must not be used with {@link #DATABASE_INCLUDE_LIST}.
*/
public static final Field DATABASE_EXCLUDE_LIST = Field.create("database.exclude.list")
.withDisplayName("Exclude Databases")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.HIGH)
.withValidation(Field::isListOfRegex)
.withDescription("The databases for which changes are to be excluded");
/**
* Old, backwards-compatible "blacklist" property.
*/
@Deprecated
public static final Field DATABASE_BLACKLIST = Field.create("database.blacklist")
.withDisplayName("Deprecated: Exclude Databases")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withValidation(Field::isListOfRegex)
.withInvisibleRecommender()
.withDescription("The databases for which changes are to be excluded (deprecated, use \"" + DATABASE_EXCLUDE_LIST.name() + "\" instead)");
/**
* A comma-separated list of regular expressions that match the fully-qualified namespaces of collections to be monitored.
* Fully-qualified namespaces for collections are of the form {@code <databaseName>.<collectionName>}.
* May not be used with {@link #COLLECTION_BLACKLIST}.
* Must not be used with {@link #COLLECTION_EXCLUDE_LIST}.
*/
public static final Field COLLECTION_WHITELIST = Field.create("collection.whitelist")
.withDisplayName("Collections")
public static final Field COLLECTION_INCLUDE_LIST = Field.create("collection.include.list")
.withDisplayName("Include Collections")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.HIGH)
.withValidation(Field::isListOfRegex,
MongoDbConnectorConfig::validateCollectionBlacklist)
MongoDbConnectorConfig::validateCollectionExcludeList)
.withDescription("The collections for which changes are to be captured");
/**
* Old, backwards-compatible "whitelist" property.
*/
@Deprecated
public static final Field COLLECTION_WHITELIST = Field.create("collection.whitelist")
.withDisplayName("Deprecated: Include Collections")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withValidation(Field::isListOfRegex, MongoDbConnectorConfig::validateCollectionExcludeList)
.withInvisibleRecommender()
.withDescription("The collections for which changes are to be captured (deprecated, use \"" + COLLECTION_INCLUDE_LIST.name() + "\" instead)");
/**
* A comma-separated list of regular expressions that match the fully-qualified namespaces of collections to be excluded from
* monitoring. Fully-qualified namespaces for collections are of the form {@code <databaseName>.<collectionName>}.
* May not be used with {@link #COLLECTION_WHITELIST}.
* Must not be used with {@link #COLLECTION_INCLUDE_LIST}.
*/
public static final Field COLLECTION_EXCLUDE_LIST = Field.create("collection.exclude.list")
.withValidation(Field::isListOfRegex)
.withInvisibleRecommender();
/**
* Old, backwards-compatible "blacklist" property.
*/
@Deprecated
public static final Field COLLECTION_BLACKLIST = Field.create("collection.blacklist")
.withValidation(Field::isListOfRegex)
.withInvisibleRecommender();
@ -288,12 +337,25 @@ public static SnapshotMode parse(String value, String defaultValue) {
* <databaseName>.<collectionName>.<fieldName>.<nestedFieldName>}, where {@code <databaseName>} and
* {@code <collectionName>} may contain the wildcard ({@code *}) which matches any characters.
*/
public static final Field FIELD_BLACKLIST = Field.create("field.blacklist")
public static final Field FIELD_EXCLUDE_LIST = Field.create("field.exclude.list")
.withDisplayName("Exclude Fields")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.MEDIUM)
.withDescription("");
.withDescription("A comma-separated list of the fully-qualified names of fields that should be excluded from change event message values");
/**
* Old, backwards-compatible "blacklist" property.
*/
@Deprecated
public static final Field FIELD_BLACKLIST = Field.create("field.blacklist")
.withDisplayName("Deprecated: Exclude Fields")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withInvisibleRecommender()
.withDescription("A comma-separated list of the fully-qualified names of fields that should be excluded from change event message values (deprecated, use \""
+ FIELD_EXCLUDE_LIST.name() + "\" instead)");
/**
* A comma-separated list of the fully-qualified replacements of fields that should be used to rename fields in change
@ -369,10 +431,15 @@ public static SnapshotMode parse(String value, String defaultValue) {
SSL_ALLOW_INVALID_HOSTNAMES)
.events(
DATABASE_WHITELIST,
DATABASE_INCLUDE_LIST,
DATABASE_BLACKLIST,
DATABASE_EXCLUDE_LIST,
COLLECTION_WHITELIST,
COLLECTION_INCLUDE_LIST,
COLLECTION_BLACKLIST,
COLLECTION_EXCLUDE_LIST,
FIELD_BLACKLIST,
FIELD_EXCLUDE_LIST,
FIELD_RENAMES)
.connector(
MAX_COPY_THREADS,
@ -413,19 +480,21 @@ private static int validateHosts(Configuration config, Field field, ValidationOu
return count;
}
private static int validateCollectionBlacklist(Configuration config, Field field, ValidationOutput problems) {
return validateBlacklistField(config, problems, COLLECTION_WHITELIST, COLLECTION_BLACKLIST);
private static int validateCollectionExcludeList(Configuration config, Field field, ValidationOutput problems) {
String includeList = config.getFallbackStringProperty(COLLECTION_INCLUDE_LIST, COLLECTION_WHITELIST);
String excludeList = config.getFallbackStringProperty(COLLECTION_EXCLUDE_LIST, COLLECTION_BLACKLIST);
if (includeList != null && excludeList != null) {
problems.accept(COLLECTION_EXCLUDE_LIST, excludeList, COLLECTION_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG);
return 1;
}
return 0;
}
private static int validateDatabaseBlacklist(Configuration config, Field field, ValidationOutput problems) {
return validateBlacklistField(config, problems, DATABASE_WHITELIST, DATABASE_BLACKLIST);
}
private static int validateBlacklistField(Configuration config, ValidationOutput problems, Field fieldWhitelist, Field fieldBlacklist) {
String whitelist = config.getString(fieldWhitelist);
String blacklist = config.getString(fieldBlacklist);
if (whitelist != null && blacklist != null) {
problems.accept(fieldBlacklist, blacklist, "Whitelist is already specified");
private static int validateDatabaseExcludeList(Configuration config, Field field, ValidationOutput problems) {
String includeList = config.getFallbackStringProperty(DATABASE_INCLUDE_LIST, DATABASE_WHITELIST);
String excludeList = config.getFallbackStringProperty(DATABASE_EXCLUDE_LIST, DATABASE_BLACKLIST);
if (includeList != null && excludeList != null) {
problems.accept(DATABASE_EXCLUDE_LIST, excludeList, DATABASE_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG);
return 1;
}
return 0;

View File

@ -113,7 +113,7 @@ public boolean tableInformationComplete() {
@Override
public void assureNonEmptySchema() {
if (collections.isEmpty()) {
LOGGER.warn("After applying blacklist/whitelist filters there are no tables to monitor, please check your configuration");
LOGGER.warn(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING);
}
}

View File

@ -7,6 +7,7 @@
import io.debezium.config.Configuration;
import io.debezium.config.Field;
import io.debezium.util.Testing;
/**
* A helper for easily building connector configurations for testing.
@ -45,23 +46,48 @@ public Configurator maxBatchSize(int maxBatchSize) {
}
public Configurator includeDatabases(String regexList) {
return with(MongoDbConnectorConfig.DATABASE_WHITELIST, regexList);
if (Math.random() >= 0.5) {
Testing.debug("Using \"" + MongoDbConnectorConfig.DATABASE_WHITELIST.name() + "\" config property");
return with(MongoDbConnectorConfig.DATABASE_WHITELIST, regexList);
}
Testing.debug("Using \"" + MongoDbConnectorConfig.DATABASE_INCLUDE_LIST.name() + "\" config property");
return with(MongoDbConnectorConfig.DATABASE_INCLUDE_LIST, regexList);
}
public Configurator excludeDatabases(String regexList) {
return with(MongoDbConnectorConfig.DATABASE_BLACKLIST, regexList);
if (Math.random() >= 0.5) {
Testing.debug("Using \"" + MongoDbConnectorConfig.DATABASE_BLACKLIST.name() + "\" config property");
return with(MongoDbConnectorConfig.DATABASE_BLACKLIST, regexList);
}
Testing.debug("Using \"" + MongoDbConnectorConfig.DATABASE_EXCLUDE_LIST.name() + "\" config property");
return with(MongoDbConnectorConfig.DATABASE_EXCLUDE_LIST, regexList);
}
public Configurator includeCollections(String regexList) {
return with(MongoDbConnectorConfig.COLLECTION_WHITELIST, regexList);
if (Math.random() >= 0.5) {
Testing.debug("Using \"" + MongoDbConnectorConfig.COLLECTION_WHITELIST.name() + "\" config property");
return with(MongoDbConnectorConfig.COLLECTION_WHITELIST, regexList);
}
Testing.debug("Using \"" + MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST.name() + "\" config property");
return with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, regexList);
}
public Configurator excludeCollections(String regexList) {
return with(MongoDbConnectorConfig.COLLECTION_BLACKLIST, regexList);
if (Math.random() >= 0.5) {
Testing.debug("Using \"" + MongoDbConnectorConfig.COLLECTION_BLACKLIST.name() + "\" config property");
return with(MongoDbConnectorConfig.COLLECTION_BLACKLIST, regexList);
}
Testing.debug("Using \"" + MongoDbConnectorConfig.COLLECTION_EXCLUDE_LIST.name() + "\" config property");
return with(MongoDbConnectorConfig.COLLECTION_EXCLUDE_LIST, regexList);
}
public Configurator excludeFields(String blacklist) {
return with(MongoDbConnectorConfig.FIELD_BLACKLIST, blacklist);
public Configurator excludeFields(String excludeList) {
if (Math.random() >= 0.5) {
Testing.debug("Using \"" + MongoDbConnectorConfig.FIELD_BLACKLIST.name() + "\" config property");
return with(MongoDbConnectorConfig.FIELD_BLACKLIST, excludeList);
}
Testing.debug("Using \"" + MongoDbConnectorConfig.FIELD_EXCLUDE_LIST.name() + "\" config property");
return with(MongoDbConnectorConfig.FIELD_EXCLUDE_LIST, excludeList);
}
public Configurator renameFields(String renames) {

View File

@ -40,8 +40,8 @@ public void setUp() {
@Test
public void shouldCreateMovieDatabase() {
useConfiguration(config.edit()
.with(MongoDbConnectorConfig.DATABASE_WHITELIST, "dbA,dbB")
.with(MongoDbConnectorConfig.COLLECTION_BLACKLIST, "dbB.moviesB")
.with(MongoDbConnectorConfig.DATABASE_INCLUDE_LIST, "dbA,dbB")
.with(MongoDbConnectorConfig.COLLECTION_EXCLUDE_LIST, "dbB.moviesB")
.build());
Testing.print("Configuration: " + config);

View File

@ -0,0 +1,1274 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.mongodb;
import static io.debezium.connector.mongodb.MongoDbSchema.COMPACT_JSON_SETTINGS;
import static io.debezium.data.Envelope.FieldName.AFTER;
import static org.fest.assertions.Assertions.assertThat;
import static org.fest.assertions.Fail.fail;
import java.util.Arrays;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.bson.Document;
import org.bson.types.ObjectId;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoDatabase;
import com.mongodb.client.model.InsertOneOptions;
import io.debezium.config.Configuration;
import io.debezium.connector.mongodb.ConnectionContext.MongoPrimary;
import io.debezium.embedded.AbstractConnectorTest;
import io.debezium.util.Testing;
// todo: extend AbstractMongoConnectorIT?
public class FieldExcludeListIT extends AbstractConnectorTest {
private static final String SERVER_NAME = "serverX";
private static final String PATCH = "patch";
private Configuration config;
private MongoDbTaskContext context;
@Before
public void beforeEach() {
Debug.disable();
Print.disable();
stopConnector();
initializeConnectorTestFramework();
}
@After
public void afterEach() {
try {
stopConnector();
}
finally {
if (context != null) {
context.getConnectionContext().shutdown();
}
}
}
@Test
public void shouldNotExcludeFieldsForEventOfOtherCollection() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
assertReadRecord("*.c2.name,*.c2.active", obj, AFTER, obj.toJson(COMPACT_JSON_SETTINGS));
}
@Test
public void shouldExcludeFieldsForReadEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
// @formatter:off
String expected = "{"
+ "\"_id\": {\"$oid\": \"" + objId + "\"},"
+ "\"phone\": {\"$numberLong\": \"123\"},"
+ "\"scores\": [1.2,3.4,5.6]"
+ "}";
// @formatter:on
assertReadRecord("*.c1.name,*.c1.active", obj, AFTER, expected);
}
@Test
public void shouldNotExcludeMissingFieldsForReadEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
assertReadRecord("*.c1.missing", obj, AFTER, obj.toJson(COMPACT_JSON_SETTINGS));
}
@Test
public void shouldExcludeNestedFieldsForReadEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("address", new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
// @formatter:off
String expected = "{"
+ "\"_id\": {\"$oid\": \"" + objId + "\"},"
+ "\"phone\": {\"$numberLong\": \"123\"},"
+ "\"address\": {"
+ "\"street\": \"Claude Debussylaan\","
+ "\"city\": \"Amsterdam\""
+ "},"
+ "\"scores\": [1.2,3.4,5.6]"
+ "}";
// @formatter:on
assertReadRecord("*.c1.name,*.c1.active,*.c1.address.number", obj, AFTER, expected);
}
@Test
public void shouldNotExcludeNestedMissingFieldsForReadEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("address", new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
assertReadRecord("*.c1.address.missing", obj, AFTER, obj.toJson(COMPACT_JSON_SETTINGS));
}
@Test
public void shouldExcludeFieldsForInsertEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
// @formatter:off
String expected = "{"
+ "\"_id\": {\"$oid\": \"" + objId + "\"},"
+ "\"phone\": {\"$numberLong\": \"123\"},"
+ "\"scores\": [1.2,3.4,5.6]"
+ "}";
// @formatter:on
assertInsertRecord("*.c1.name,*.c1.active", obj, AFTER, expected);
}
@Test
public void shouldNotExcludeMissingFieldsForInsertEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
assertInsertRecord("*.c1.missing", obj, AFTER, obj.toJson(COMPACT_JSON_SETTINGS));
}
@Test
public void shouldExcludeNestedFieldsForInsertEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("address", new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
// @formatter:off
String expected = "{"
+ "\"_id\": {\"$oid\": \"" + objId + "\"},"
+ "\"phone\": {\"$numberLong\": \"123\"},"
+ "\"address\": {"
+ "\"street\": \"Claude Debussylaan\","
+ "\"city\": \"Amsterdam\""
+ "},"
+ "\"scores\": [1.2,3.4,5.6]"
+ "}";
// @formatter:on
assertInsertRecord("*.c1.name,*.c1.active,*.c1.address.number", obj, AFTER, expected);
}
@Test
public void shouldNotExcludeNestedMissingFieldsForInsertEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("address", new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
assertInsertRecord("*.c1.address.missing", obj, AFTER, obj.toJson(COMPACT_JSON_SETTINGS));
}
@Test
public void shouldExcludeFieldsForUpdateEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 456L)
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6, 7.8));
Document updateObj = new Document()
.append("phone", 123L)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"phone\": {\"$numberLong\": \"123\"},"
+ "\"scores\": [1.2,3.4,5.6]"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name,*.c1.active", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldNotExcludeMissingFieldsForUpdateEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 456L)
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6, 7.8));
Document updateObj = new Document()
.append("phone", 123L)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"phone\": {\"$numberLong\": \"123\"},"
+ "\"scores\": [1.2,3.4,5.6]"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.missing", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeNestedFieldsForUpdateEventWithEmbeddedDocument() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 456L)
.append("address", new Document()
.append("number", 35L)
.append("street", "Claude Debussylaane")
.append("city", "Amsterdame"))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6, 7.8));
Document updateObj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("address", new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"address\": {"
+ "\"street\": \"Claude Debussylaan\","
+ "\"city\": \"Amsterdam\""
+ "},"
+ "\"phone\": {\"$numberLong\": \"123\"},"
+ "\"scores\": [1.2,3.4,5.6]"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name,*.c1.active,*.c1.address.number", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldNotExcludeNestedMissingFieldsForUpdateEventWithEmbeddedDocument() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 456L)
.append("address", new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame"))
.append("active", false)
.append("scores", Arrays.asList(1.2, 3.4, 5.6, 7.8));
Document updateObj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("address", new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"active\": true,"
+ "\"address\": {"
+ "\"number\": {\"$numberLong\": \"34\"},"
+ "\"street\": \"Claude Debussylaan\","
+ "\"city\": \"Amsterdam\""
+ "},"
+ "\"phone\": {\"$numberLong\": \"123\"},"
+ "\"scores\": [1.2,3.4,5.6]"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.address.missing", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeNestedFieldsForUpdateEventWithArrayOfEmbeddedDocuments() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 456L)
.append("addresses", Arrays.asList(
new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame"),
new Document()
.append("number", 8L)
.append("street", "Fragkokklisiass")
.append("city", "Athense")))
.append("active", false)
.append("scores", Arrays.asList(1.2, 3.4, 5.6, 7.8));
Document updateObj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("addresses", Arrays.asList(
new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"),
new Document()
.append("number", 7L)
.append("street", "Fragkokklisias")
.append("city", "Athens")))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"active\": true,"
+ "\"addresses\": ["
+ "{"
+ "\"street\": \"Claude Debussylaan\","
+ "\"city\": \"Amsterdam\""
+ "},"
+ "{"
+ "\"street\": \"Fragkokklisias\","
+ "\"city\": \"Athens\""
+ "}"
+ "],"
+ "\"phone\": {\"$numberLong\": \"123\"},"
+ "\"scores\": [1.2,3.4,5.6]"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name,*.c1.addresses.number", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldNotExcludeNestedFieldsForUpdateEventWithArrayOfArrays() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally Mae")
.append("phone", 456L)
.append("addresses", Arrays.asList(
Collections.singletonList(new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame")),
Collections.singletonList(new Document()
.append("number", 8L)
.append("street", "Fragkokklisiass")
.append("city", "Athenss"))))
.append("active", false)
.append("scores", Arrays.asList(1.2, 3.4, 5.6, 7.8));
Document updateObj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("addresses", Arrays.asList(
Collections.singletonList(new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam")),
Collections.singletonList(new Document()
.append("number", 7L)
.append("street", "Fragkokklisias")
.append("city", "Athens"))))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"active\": true,"
+ "\"addresses\": ["
+ "["
+ "{"
+ "\"number\": {\"$numberLong\": \"34\"},"
+ "\"street\": \"Claude Debussylaan\","
+ "\"city\": \"Amsterdam\""
+ "}"
+ "],"
+ "["
+ "{"
+ "\"number\": {\"$numberLong\": \"7\"},"
+ "\"street\": \"Fragkokklisias\","
+ "\"city\": \"Athens\""
+ "}"
+ "]"
+ "],"
+ "\"phone\": {\"$numberLong\": \"123\"},"
+ "\"scores\": [1.2,3.4,5.6]"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name,*.c1.addresses.number", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeFieldsForSetTopLevelFieldUpdateEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally May")
.append("phone", 456L);
Document updateObj = new Document()
.append("name", "Sally")
.append("phone", 123L);
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"phone\": {\"$numberLong\": \"123\"}"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeFieldsForUnsetTopLevelFieldUpdateEvent() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 456L)
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
Document updateObj = new Document()
.append("name", "")
.append("phone", "");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$unset\": {"
+ "\"phone\": true"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name", objId, obj, updateObj, false, PATCH, expected);
}
@Test
public void shouldExcludeNestedFieldsForSetTopLevelFieldUpdateEventWithEmbeddedDocument() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally May")
.append("phone", 456L)
.append("address", new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame"));
Document updateObj = new Document()
.append("name", "Sally")
.append("phone", 123L)
.append("address", new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"));
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"address\": {"
+ "\"street\": \"Claude Debussylaan\","
+ "\"city\": \"Amsterdam\""
+ "},"
+ "\"phone\": {\"$numberLong\": \"123\"}"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name,*.c1.address.number", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeNestedFieldsForSetTopLevelFieldUpdateEventWithArrayOfEmbeddedDocuments() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally May")
.append("phone", 456L)
.append("addresses", Arrays.asList(
new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame"),
new Document()
.append("number", 8L)
.append("street", "Fragkokklisiass")
.append("city", "Athense")));
Document updateObj = new Document()
.append("name", "Sally")
.append("phone", 123L)
.append("addresses", Arrays.asList(
new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"),
new Document()
.append("number", 7L)
.append("street", "Fragkokklisias")
.append("city", "Athens")));
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"addresses\": ["
+ "{"
+ "\"street\": \"Claude Debussylaan\","
+ "\"city\": \"Amsterdam\""
+ "},"
+ "{"
+ "\"street\": \"Fragkokklisias\","
+ "\"city\": \"Athens\""
+ "}"
+ "],"
+ "\"phone\": {\"$numberLong\": \"123\"}"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name,*.c1.addresses.number", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldNotExcludeNestedFieldsForSetTopLevelFieldUpdateEventWithArrayOfArrays() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally May")
.append("phone", 456L)
.append("addresses", Arrays.asList(
Collections.singletonList(new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame")),
Collections.singletonList(new Document()
.append("number", 8L)
.append("street", "Fragkokklisiass")
.append("city", "Athense"))));
Document updateObj = new Document()
.append("name", "Sally")
.append("phone", 123L)
.append("addresses", Arrays.asList(
Collections.singletonList(new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam")),
Collections.singletonList(new Document()
.append("number", 7L)
.append("street", "Fragkokklisias")
.append("city", "Athens"))));
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"addresses\": ["
+ "["
+ "{"
+ "\"number\": {\"$numberLong\": \"34\"},"
+ "\"street\": \"Claude Debussylaan\","
+ "\"city\": \"Amsterdam\""
+ "}"
+ "],"
+ "["
+ "{"
+ "\"number\": {\"$numberLong\": \"7\"},"
+ "\"street\": \"Fragkokklisias\","
+ "\"city\": \"Athens\""
+ "}"
+ "]"
+ "],"
+ "\"phone\": {\"$numberLong\": \"123\"}"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name,*.c1.addresses.number", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeNestedFieldsForSetNestedFieldUpdateEventWithEmbeddedDocument() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally May")
.append("phone", 456L)
.append("address", new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame"));
Document updateObj = new Document()
.append("name", "Sally")
.append("address.number", 34L)
.append("address.street", "Claude Debussylaan")
.append("address.city", "Amsterdam");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"address.city\": \"Amsterdam\","
+ "\"address.street\": \"Claude Debussylaan\""
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name,*.c1.address.number", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeNestedFieldsForSetNestedFieldUpdateEventWithArrayOfEmbeddedDocuments() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally May")
.append("addresses", Arrays.asList(
new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame")));
Document updateObj = new Document()
.append("name", "Sally")
.append("addresses.0.number", 34L)
.append("addresses.0.street", "Claude Debussylaan")
.append("addresses.0.city", "Amsterdam");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"addresses.0.city\": \"Amsterdam\","
+ "\"addresses.0.street\": \"Claude Debussylaan\","
+ "\"name\": \"Sally\""
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.addresses.number", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldNotExcludeNestedFieldsForSetNestedFieldUpdateEventWithArrayOfArrays() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally May")
.append("addresses", Arrays.asList(
Collections.singletonList(new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame")),
Collections.singletonList(new Document()
.append("number", 8L)
.append("street", "Fragkokklisiass")
.append("city", "Athense"))));
Document updateObj = new Document()
.append("name", "Sally")
.append("addresses.0.0.number", 34L)
.append("addresses.0.0.street", "Claude Debussylaan")
.append("addresses.0.0.city", "Amsterdam");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"addresses.0.0.city\": \"Amsterdam\","
+ "\"addresses.0.0.number\": {\"$numberLong\": \"34\"},"
+ "\"addresses.0.0.street\": \"Claude Debussylaan\","
+ "\"name\": \"Sally\""
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.addresses.number", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeNestedFieldsForSetNestedFieldUpdateEventWithSeveralArrays() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally May")
.append("addresses", Arrays.asList(Collections.singletonMap("second",
Arrays.asList(
new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame")))));
Document updateObj = new Document()
.append("name", "Sally")
.append("addresses.0.second.0.number", 34L)
.append("addresses.0.second.0.street", "Claude Debussylaan")
.append("addresses.0.second.0.city", "Amsterdam");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"addresses.0.second.0.city\": \"Amsterdam\","
+ "\"addresses.0.second.0.street\": \"Claude Debussylaan\","
+ "\"name\": \"Sally\""
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.addresses.second.number", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeFieldsForSetNestedFieldUpdateEventWithArrayOfEmbeddedDocuments() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally May")
.append("addresses", Arrays.asList(
new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame")));
Document updateObj = new Document()
.append("name", "Sally")
.append("addresses.0.0.number", 34L)
.append("addresses.0.0.street", "Claude Debussylaan")
.append("addresses.0.0.city", "Amsterdam");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"name\": \"Sally\""
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.addresses", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeFieldsForSetToArrayFieldUpdateEventWithArrayOfEmbeddedDocuments() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally May")
.append("addresses", Arrays.asList(
new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame")));
Document updateObj = new Document()
.append("name", "Sally")
.append("addresses.0", new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"));
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$set\": {"
+ "\"name\": \"Sally\""
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.addresses", objId, obj, updateObj, PATCH, expected);
}
@Test
public void shouldExcludeNestedFieldsForUnsetNestedFieldUpdateEventWithEmbeddedDocument() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 456L)
.append("address", new Document()
.append("number", 45L)
.append("street", "Claude Debussylaann")
.append("city", "Amsterdame"))
.append("active", false)
.append("scores", Arrays.asList(1.2, 3.4, 5.6, 7.8));
Document updateObj = new Document()
.append("name", "")
.append("address.number", "")
.append("address.street", "")
.append("address.city", "");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$unset\": {"
+ "\"address.city\": true,"
+ "\"address.street\": true"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.name,*.c1.address.number", objId, obj, updateObj, false, PATCH, expected);
}
@Test
public void shouldExcludeNestedFieldsForUnsetNestedFieldUpdateEventWithArrayOfEmbeddedDocuments() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("addresses", Arrays.asList(
new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"),
new Document()
.append("number", 7L)
.append("street", "Fragkokklisias")
.append("city", "Athens")))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
Document updateObj = new Document()
.append("name", "")
.append("addresses.0.number", "")
.append("addresses.0.street", "")
.append("addresses.0.city", "");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$unset\": {"
+ "\"addresses.0.city\": true,"
+ "\"addresses.0.street\": true,"
+ "\"name\": true"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.addresses.number", objId, obj, updateObj, false, PATCH, expected);
}
@Test
public void shouldNotExcludeNestedFieldsForUnsetNestedFieldUpdateEventWithArrayOfArrays() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("phone", 123L)
.append("addresses", Arrays.asList(
Arrays.asList(
new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam"))))
.append("active", true)
.append("scores", Arrays.asList(1.2, 3.4, 5.6));
Document updateObj = new Document()
.append("name", "")
.append("addresses.0.0.number", "")
.append("addresses.0.0.street", "")
.append("addresses.0.0.city", "");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$unset\": {"
+ "\"addresses.0.0.city\": true,"
+ "\"addresses.0.0.number\": true,"
+ "\"addresses.0.0.street\": true,"
+ "\"name\": true"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.addresses.number", objId, obj, updateObj, false, PATCH, expected);
}
@Test
public void shouldExcludeNestedFieldsForUnsetNestedFieldUpdateEventWithSeveralArrays() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("addresses", Arrays.asList(Collections.singletonMap("second",
Arrays.asList(
new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam")))));
Document updateObj = new Document()
.append("name", "")
.append("addresses.0.second.0.number", "")
.append("addresses.0.second.0.street", "")
.append("addresses.0.second.0.city", "");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$unset\": {"
+ "\"addresses.0.second.0.city\": true,"
+ "\"addresses.0.second.0.street\": true,"
+ "\"name\": true"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.addresses.second.number", objId, obj, updateObj, false, PATCH, expected);
}
@Test
public void shouldExcludeFieldsForUnsetNestedFieldUpdateEventWithArrayOfEmbeddedDocuments() throws InterruptedException {
ObjectId objId = new ObjectId();
Document obj = new Document()
.append("_id", objId)
.append("name", "Sally")
.append("addresses", Arrays.asList(
new Document()
.append("number", 34L)
.append("street", "Claude Debussylaan")
.append("city", "Amsterdam")));
Document updateObj = new Document()
.append("name", "")
.append("addresses.0.number", "")
.append("addresses.0.street", "")
.append("addresses.0.city", "");
// @formatter:off
String expected = "{"
+ "\"$v\": 1,"
+ "\"$unset\": {"
+ "\"name\": true"
+ "}"
+ "}";
// @formatter:on
assertUpdateRecord("*.c1.addresses", objId, obj, updateObj, false, PATCH, expected);
}
@Test
public void shouldExcludeFieldsForDeleteEvent() throws InterruptedException {
config = getConfiguration("*.c1.name,*.c1.active");
context = new MongoDbTaskContext(config);
TestHelper.cleanDatabase(primary(), "dbA");
ObjectId objId = new ObjectId();
Document obj = new Document("_id", objId);
storeDocuments("dbA", "c1", obj);
start(MongoDbConnector.class, config);
SourceRecords snapshotRecords = consumeRecordsByTopic(1);
assertThat(snapshotRecords.topics().size()).isEqualTo(1);
assertThat(snapshotRecords.allRecordsInOrder().size()).isEqualTo(1);
// Wait for streaming to start and perform an update
waitForStreamingRunning("mongodb", SERVER_NAME);
deleteDocuments("dbA", "c1", objId);
// Get the delete records (1 delete and 1 tombstone)
SourceRecords deleteRecords = consumeRecordsByTopic(2);
assertThat(deleteRecords.topics().size()).isEqualTo(1);
assertThat(deleteRecords.allRecordsInOrder().size()).isEqualTo(2);
// Only validating delete record, non-tombstone
SourceRecord record = deleteRecords.allRecordsInOrder().get(0);
Struct value = getValue(record);
String json = value.getString(AFTER);
if (json == null) {
json = value.getString(PATCH);
}
assertThat(json).isNull();
}
@Test
public void shouldExcludeFieldsForDeleteTombstoneEvent() throws InterruptedException {
config = getConfiguration("*.c1.name,*.c1.active");
context = new MongoDbTaskContext(config);
TestHelper.cleanDatabase(primary(), "dbA");
ObjectId objId = new ObjectId();
Document obj = new Document("_id", objId);
storeDocuments("dbA", "c1", obj);
start(MongoDbConnector.class, config);
SourceRecords snapshotRecords = consumeRecordsByTopic(1);
assertThat(snapshotRecords.topics().size()).isEqualTo(1);
assertThat(snapshotRecords.allRecordsInOrder().size()).isEqualTo(1);
// Wait for streaming to start and perform an update
waitForStreamingRunning("mongodb", SERVER_NAME);
deleteDocuments("dbA", "c1", objId);
// Get the delete records (1 delete and 1 tombstone)
SourceRecords deleteRecords = consumeRecordsByTopic(2);
assertThat(deleteRecords.topics().size()).isEqualTo(1);
assertThat(deleteRecords.allRecordsInOrder().size()).isEqualTo(2);
// Only validating tombstone record, non-delete
SourceRecord record = deleteRecords.allRecordsInOrder().get(1);
Struct value = getValue(record);
assertThat(value).isNull();
}
private Configuration getConfiguration(String blackList) {
return TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.FIELD_EXCLUDE_LIST, blackList)
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbA.c1")
.with(MongoDbConnectorConfig.LOGICAL_NAME, SERVER_NAME)
.build();
}
private Struct getValue(SourceRecord record) {
return (Struct) record.value();
}
private BiConsumer<String, Throwable> connectionErrorHandler(int numErrorsBeforeFailing) {
AtomicInteger attempts = new AtomicInteger();
return (desc, error) -> {
if (attempts.incrementAndGet() > numErrorsBeforeFailing) {
fail("Unable to connect to primary after " + numErrorsBeforeFailing + " errors trying to " + desc + ": " + error);
}
logger.error("Error while attempting to {}: {}", desc, error.getMessage(), error);
};
}
private MongoPrimary primary() {
ReplicaSet replicaSet = ReplicaSet.parse(context.getConnectionContext().hosts());
return context.getConnectionContext().primaryFor(replicaSet, context.filters(), connectionErrorHandler(3));
}
private void storeDocuments(String dbName, String collectionName, Document... documents) {
primary().execute("store documents", mongo -> {
Testing.debug("Storing in '" + dbName + "." + collectionName + "' document");
MongoDatabase db = mongo.getDatabase(dbName);
MongoCollection<Document> coll = db.getCollection(collectionName);
coll.drop();
for (Document document : documents) {
InsertOneOptions insertOptions = new InsertOneOptions().bypassDocumentValidation(true);
assertThat(document).isNotNull();
assertThat(document.size()).isGreaterThan(0);
coll.insertOne(document, insertOptions);
}
});
}
private void updateDocuments(String dbName, String collectionName, ObjectId objId, Document document, boolean doSet) {
primary().execute("update", mongo -> {
MongoDatabase db = mongo.getDatabase(dbName);
MongoCollection<Document> coll = db.getCollection(collectionName);
Document filter = Document.parse("{\"_id\": {\"$oid\": \"" + objId + "\"}}");
coll.updateOne(filter, new Document().append(doSet ? "$set" : "$unset", document));
});
}
private void deleteDocuments(String dbName, String collectionName, ObjectId objId) {
primary().execute("delete", mongo -> {
MongoDatabase db = mongo.getDatabase(dbName);
MongoCollection<Document> coll = db.getCollection(collectionName);
Document filter = Document.parse("{\"_id\": {\"$oid\": \"" + objId + "\"}}");
coll.deleteOne(filter);
});
}
private void assertReadRecord(String blackList, Document snapshotRecord, String field, String expected) throws InterruptedException {
config = getConfiguration(blackList);
context = new MongoDbTaskContext(config);
TestHelper.cleanDatabase(primary(), "dbA");
storeDocuments("dbA", "c1", snapshotRecord);
start(MongoDbConnector.class, config);
SourceRecords snapshotRecords = consumeRecordsByTopic(1);
assertThat(snapshotRecords.topics().size()).isEqualTo(1);
assertThat(snapshotRecords.allRecordsInOrder().size()).isEqualTo(1);
SourceRecord record = snapshotRecords.allRecordsInOrder().get(0);
Struct value = getValue(record);
assertThat(value.get(field)).isEqualTo(expected);
}
private void assertInsertRecord(String blackList, Document insertRecord, String field, String expected) throws InterruptedException {
config = getConfiguration(blackList);
context = new MongoDbTaskContext(config);
TestHelper.cleanDatabase(primary(), "dbA");
start(MongoDbConnector.class, config);
waitForSnapshotToBeCompleted("mongodb", SERVER_NAME);
storeDocuments("dbA", "c1", insertRecord);
// Get the insert records
SourceRecords insertRecords = consumeRecordsByTopic(1);
assertThat(insertRecords.topics().size()).isEqualTo(1);
assertThat(insertRecords.allRecordsInOrder().size()).isEqualTo(1);
SourceRecord record = insertRecords.allRecordsInOrder().get(0);
Struct value = getValue(record);
assertThat(value.get(field)).isEqualTo(expected);
}
private void assertUpdateRecord(String blackList, ObjectId objectId, Document snapshotRecord, Document updateRecord,
String field, String expected)
throws InterruptedException {
assertUpdateRecord(blackList, objectId, snapshotRecord, updateRecord, true, field, expected);
}
private void assertUpdateRecord(String blackList, ObjectId objectId, Document snapshotRecord, Document updateRecord,
boolean doSet, String field, String expected)
throws InterruptedException {
config = getConfiguration(blackList);
context = new MongoDbTaskContext(config);
TestHelper.cleanDatabase(primary(), "dbA");
storeDocuments("dbA", "c1", snapshotRecord);
start(MongoDbConnector.class, config);
// Get the snapshot records
SourceRecords snapshotRecords = consumeRecordsByTopic(1);
assertThat(snapshotRecords.topics().size()).isEqualTo(1);
assertThat(snapshotRecords.allRecordsInOrder().size()).isEqualTo(1);
// Wait for streaming to start and perform an update
waitForStreamingRunning("mongodb", SERVER_NAME);
updateDocuments("dbA", "c1", objectId, updateRecord, doSet);
// Get the update records
SourceRecords updateRecords = consumeRecordsByTopic(1);
assertThat(updateRecords.topics().size()).isEqualTo(1);
assertThat(updateRecords.allRecordsInOrder().size()).isEqualTo(1);
SourceRecord record = updateRecords.allRecordsInOrder().get(0);
Struct value = getValue(record);
Document expectedDoc = TestHelper.getDocumentWithoutLanguageVersion(expected);
Document actualDoc = TestHelper.getDocumentWithoutLanguageVersion(value.getString(field));
assertThat(actualDoc).isEqualTo(expectedDoc);
}
}

View File

@ -1413,7 +1413,7 @@ private static Configuration getConfiguration(String fieldRenames) {
private static Configuration getConfiguration(String fieldRenames, String database, String collection) {
Configuration.Builder builder = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, database + "." + collection)
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, database + "." + collection)
.with(MongoDbConnectorConfig.LOGICAL_NAME, SERVER_NAME);
if (fieldRenames != null && !"".equals(fieldRenames.trim())) {

View File

@ -59,6 +59,7 @@
import io.debezium.embedded.AbstractConnectorTest;
import io.debezium.heartbeat.Heartbeat;
import io.debezium.junit.logging.LogInterceptor;
import io.debezium.schema.DatabaseSchema;
import io.debezium.util.Collect;
import io.debezium.util.IoUtil;
import io.debezium.util.Testing;
@ -122,9 +123,13 @@ public void shouldFailToValidateInvalidConfiguration() {
assertNoConfigurationErrors(result, MongoDbConnectorConfig.PASSWORD);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.AUTO_DISCOVER_MEMBERS);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.DATABASE_WHITELIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.DATABASE_INCLUDE_LIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.DATABASE_BLACKLIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.DATABASE_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.COLLECTION_WHITELIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.COLLECTION_BLACKLIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.COLLECTION_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.MAX_COPY_THREADS);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.MAX_QUEUE_SIZE);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.MAX_BATCH_SIZE);
@ -155,9 +160,13 @@ public void shouldValidateAcceptableConfiguration() {
assertNoConfigurationErrors(result, MongoDbConnectorConfig.PASSWORD);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.AUTO_DISCOVER_MEMBERS);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.DATABASE_WHITELIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.DATABASE_INCLUDE_LIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.DATABASE_BLACKLIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.DATABASE_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.COLLECTION_WHITELIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.COLLECTION_BLACKLIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.COLLECTION_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.MAX_COPY_THREADS);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.MAX_QUEUE_SIZE);
assertNoConfigurationErrors(result, MongoDbConnectorConfig.MAX_BATCH_SIZE);
@ -176,7 +185,7 @@ public void shouldConsumeAllEventsFromDatabase() throws InterruptedException, IO
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -365,7 +374,7 @@ public void shouldConsumeAllEventsFromDatabaseWithSkippedOperations() throws Int
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.with(MongoDbConnectorConfig.SKIPPED_OPERATIONS, "u")
.build();
@ -453,7 +462,7 @@ public void shouldConsumeAllEventsFromDatabaseWithCustomAuthSource() throws Inte
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -485,7 +494,7 @@ public void shouldConsumeAllEventsFromDatabaseWithCustomAuthSource() throws Inte
.with(MongoDbConnectorConfig.PASSWORD, "pass")
.with(MongoDbConnectorConfig.AUTH_SOURCE, authDbName)
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -544,7 +553,7 @@ public void shouldSupportDbRef() throws InterruptedException, IOException {
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -614,7 +623,7 @@ public void shouldConsumeEventsFromCollectionWithReplacedTopicName() throws Inte
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.dbz865.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.dbz865.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -656,7 +665,7 @@ public void shouldConsumeEventsFromCollectionWithReplacedTopicName() throws Inte
// ---------------------------------------------------------------------------------------------------------------
// Stop the connector
// ---------------------------------------------------------------------------------------------------------------
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(NO_MONITORED_TABLES_WARNING)).isFalse());
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isFalse());
}
@Test
@ -668,7 +677,7 @@ public void testEmptySchemaWarningAfterApplyingCollectionFilters() throws Except
// Use the DB configuration to define the connector's configuration...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.dbz865.my_products")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.dbz865.my_products")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -694,7 +703,7 @@ public void testEmptySchemaWarningAfterApplyingCollectionFilters() throws Except
// Consume all records
consumeRecordsByTopic(12);
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(NO_MONITORED_TABLES_WARNING)).isTrue());
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isTrue());
}
protected void verifyFromInitialSync(SourceRecord record, AtomicBoolean foundLast) {
@ -718,7 +727,7 @@ public void shouldConsumeTransaction() throws InterruptedException, IOException
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -812,7 +821,7 @@ public void shouldResumeTransactionInMiddle() throws InterruptedException, IOExc
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -912,7 +921,7 @@ public void shouldSnapshotDocumentContainingFieldNamedOp() throws Exception {
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -1084,7 +1093,7 @@ public void shouldUseSSL() throws InterruptedException, IOException {
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.with(MongoDbConnectorConfig.MAX_FAILED_CONNECTIONS, 0)
.with(MongoDbConnectorConfig.SSL_ENABLED, true)
@ -1106,7 +1115,7 @@ public void shouldEmitHeartbeatMessages() throws InterruptedException, IOExcepti
// Use the DB configuration to define the connector's configuration ...
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.mhb")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.mhb")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.with(Heartbeat.HEARTBEAT_INTERVAL, "1")
.build();
@ -1183,7 +1192,7 @@ public void shouldEmitHeartbeatMessages() throws InterruptedException, IOExcepti
public void shouldOutputRecordsInCloudEventsFormat() throws Exception {
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -1218,7 +1227,7 @@ public void shouldOutputRecordsInCloudEventsFormat() throws Exception {
@Test
public void shouldGenerateRecordForInsertEvent() throws Exception {
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -1261,7 +1270,7 @@ public void shouldGenerateRecordForInsertEvent() throws Exception {
@Test
public void shouldGenerateRecordForUpdateEvent() throws Exception {
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -1318,7 +1327,7 @@ public void shouldGenerateRecordForUpdateEvent() throws Exception {
@Test
public void shouldGeneratorRecordForDeleteEvent() throws Exception {
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -1376,7 +1385,7 @@ public void shouldGeneratorRecordForDeleteEvent() throws Exception {
@FixFor("DBZ-582")
public void shouldGenerateRecordForDeleteEventWithoutTombstone() throws Exception {
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.with(MongoDbConnectorConfig.TOMBSTONES_ON_DELETE, false)
.build();
@ -1427,7 +1436,7 @@ public void shouldGenerateRecordForDeleteEventWithoutTombstone() throws Exceptio
@Test
public void shouldGenerateRecordsWithCorrectlySerializedId() throws Exception {
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -1496,7 +1505,7 @@ private static void assertSourceRecordKeyFieldIsEqualTo(SourceRecord record, Str
@Test
public void shouldSupportDbRef2() throws Exception {
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();
@ -1548,7 +1557,7 @@ public void shouldSupportDbRef2() throws Exception {
@Test
public void shouldReplicateContent() throws Exception {
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbA.contacts")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbA.contacts")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.with(MongoDbConnectorConfig.SNAPSHOT_MODE, MongoDbConnectorConfig.SnapshotMode.INITIAL)
.build();
@ -1701,7 +1710,7 @@ public void shouldReplicateContent() throws Exception {
public void shouldNotReplicateSnapshot() throws Exception {
// todo: this configuration causes NPE at MongoDbStreamingChangeEventSource.java:143
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbA.contacts")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbA.contacts")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.with(MongoDbConnectorConfig.SNAPSHOT_MODE, MongoDbConnectorConfig.SnapshotMode.NEVER)
.build();
@ -1754,7 +1763,7 @@ public void shouldNotReplicateSnapshot() throws Exception {
@FixFor("DBZ-1880")
public void shouldGenerateRecordForUpdateEventUsingLegacyV1SourceInfo() throws Exception {
config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.with(CommonConnectorConfig.SOURCE_STRUCT_MAKER_VERSION, "v1")
.with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo")
.build();

View File

@ -27,7 +27,7 @@ public void testLifecycle() throws Exception {
this.config = TestHelper.getConfiguration()
.edit()
.with(MongoDbConnectorConfig.SNAPSHOT_MODE, MongoDbConnectorConfig.SnapshotMode.INITIAL)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.build();
this.context = new MongoDbTaskContext(config);
@ -68,7 +68,7 @@ public void testSnapshotOnlyMetrics() throws Exception {
this.config = TestHelper.getConfiguration()
.edit()
.with(MongoDbConnectorConfig.SNAPSHOT_MODE, MongoDbConnectorConfig.SnapshotMode.INITIAL)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.build();
this.context = new MongoDbTaskContext(config);
@ -108,7 +108,7 @@ public void testStreamingOnlyMetrics() throws Exception {
this.config = TestHelper.getConfiguration()
.edit()
.with(MongoDbConnectorConfig.SNAPSHOT_MODE, MongoDbConnectorConfig.SnapshotMode.NEVER)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbit.*")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbit.*")
.build();
this.context = new MongoDbTaskContext(config);

View File

@ -27,7 +27,7 @@ public class TransactionMetadataIT extends AbstractMongoConnectorIT {
public void transactionMetadata() throws Exception {
config = TestHelper.getConfiguration()
.edit()
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, "dbA.c1")
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, "dbA.c1")
.with(MongoDbConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(MongoDbConnectorConfig.PROVIDE_TRANSACTION_METADATA, true)
.build();

View File

@ -51,7 +51,7 @@ public void beforeEach() {
// Use the DB configuration to define the connector's configuration ...
Configuration config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, DB_NAME + "." + this.getCollectionName())
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, DB_NAME + "." + this.getCollectionName())
.with(MongoDbConnectorConfig.LOGICAL_NAME, SERVER_NAME)
.build();
@ -97,7 +97,7 @@ protected void restartConnectorWithoutEmittingTombstones() {
// reconfigure and restart
Configuration config = TestHelper.getConfiguration().edit()
.with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MongoDbConnectorConfig.COLLECTION_WHITELIST, DB_NAME + "." + this.getCollectionName())
.with(MongoDbConnectorConfig.COLLECTION_INCLUDE_LIST, DB_NAME + "." + this.getCollectionName())
.with(MongoDbConnectorConfig.LOGICAL_NAME, SERVER_NAME)
.with(MongoDbConnectorConfig.TOMBSTONES_ON_DELETE, false)
.build();

View File

@ -3,6 +3,7 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Target=System.out
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n
log4j.appender.stdout.threshold=WARN
# Root logger option
log4j.rootLogger=INFO, stdout

View File

@ -115,7 +115,7 @@
<mysql.port>3306</mysql.port>
<mysql.gtid.port>3306</mysql.gtid.port>
<mysql.gtid.replica.port>3306</mysql.gtid.replica.port>
<mysql.replica.port>3306</mysql.replica.port> <!-- by default use master as 'replica' -->
<mysql.replica.port>3306</mysql.replica.port> <!-- by default use primary as 'replica' -->
<mysql.init.timeout>60000</mysql.init.timeout> <!-- 60 seconds -->
<!--
By default, we should use the docker image maintained by the MySQL team. This property is changed with different profiles.

View File

@ -636,7 +636,7 @@ protected void handleServerStop(Event event) {
}
/**
* Handle the supplied event that is sent by a master to a slave to let the slave know that the master is still alive. Not
* Handle the supplied event that is sent by a primary to a replica to let the replica know that the primary is still alive. Not
* written to a binary log.
*
* @param event the server stopped event to be processed; may not be null
@ -646,8 +646,8 @@ protected void handleServerHeartbeat(Event event) {
}
/**
* Handle the supplied event that signals that an out of the ordinary event that occurred on the master. It notifies the slave
* that something happened on the master that might cause data to be in an inconsistent state.
* Handle the supplied event that signals that an out of the ordinary event that occurred on the master. It notifies the replica
* that something happened on the primary that might cause data to be in an inconsistent state.
*
* @param event the server stopped event to be processed; may not be null
*/

View File

@ -109,13 +109,15 @@ public static class Builder {
*/
public Builder(Configuration config) {
this.config = config;
setFiltersFromStrings(config.getString(MySqlConnectorConfig.DATABASE_WHITELIST),
config.getString(MySqlConnectorConfig.DATABASE_BLACKLIST),
config.getString(MySqlConnectorConfig.TABLE_WHITELIST),
config.getString(MySqlConnectorConfig.TABLE_BLACKLIST));
setFiltersFromStrings(
config.getFallbackStringProperty(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, MySqlConnectorConfig.DATABASE_WHITELIST),
config.getFallbackStringProperty(MySqlConnectorConfig.DATABASE_EXCLUDE_LIST, MySqlConnectorConfig.DATABASE_BLACKLIST),
config.getFallbackStringProperty(MySqlConnectorConfig.TABLE_INCLUDE_LIST, MySqlConnectorConfig.TABLE_WHITELIST),
config.getFallbackStringProperty(MySqlConnectorConfig.TABLE_EXCLUDE_LIST, MySqlConnectorConfig.TABLE_BLACKLIST));
// Define the filter that excludes blacklisted columns, truncated columns, and masked columns ...
this.columnFilter = ColumnNameFilterFactory.createBlacklistFilter(config.getString(MySqlConnectorConfig.COLUMN_BLACKLIST));
this.columnFilter = ColumnNameFilterFactory
.createExcludeListFilter(config.getFallbackStringProperty(MySqlConnectorConfig.COLUMN_EXCLUDE_LIST, MySqlConnectorConfig.COLUMN_BLACKLIST));
}
/**
@ -125,26 +127,41 @@ public Builder(Configuration config) {
* @return this
*/
public Builder setFiltersFromOffsets(Map<String, ?> offsets) {
setFiltersFromStrings((String) offsets.get(SourceInfo.DATABASE_WHITELIST_KEY), (String) offsets.get(SourceInfo.DATABASE_BLACKLIST_KEY),
(String) offsets.get(SourceInfo.TABLE_WHITELIST_KEY), (String) offsets.get(SourceInfo.TABLE_BLACKLIST_KEY));
String dbIncludeList = (String) offsets.get(SourceInfo.DATABASE_INCLUDE_LIST_KEY);
if (null == dbIncludeList) {
dbIncludeList = (String) offsets.get(SourceInfo.DATABASE_WHITELIST_KEY);
}
String dbExcludeList = (String) offsets.get(SourceInfo.DATABASE_EXCLUDE_LIST_KEY);
if (null == dbExcludeList) {
dbExcludeList = (String) offsets.get(SourceInfo.DATABASE_BLACKLIST_KEY);
}
String tableIncludeList = (String) offsets.get(SourceInfo.TABLE_INCLUDE_LIST_KEY);
if (null == tableIncludeList) {
tableIncludeList = (String) offsets.get(SourceInfo.TABLE_WHITELIST_KEY);
}
String tableExcludeList = (String) offsets.get(SourceInfo.TABLE_EXCLUDE_LIST_KEY);
if (null == tableExcludeList) {
tableExcludeList = (String) offsets.get(SourceInfo.TABLE_BLACKLIST_KEY);
}
setFiltersFromStrings(dbIncludeList, dbExcludeList, tableIncludeList, tableExcludeList);
return this;
}
private void setFiltersFromStrings(String dbWhitelist,
String dbBlacklist,
String tableWhitelist,
String tableBlacklist) {
private void setFiltersFromStrings(String dbIncludeList,
String dbExcludeList,
String tableIncludeList,
String tableExcludeList) {
Predicate<String> dbFilter = Selectors.databaseSelector()
.includeDatabases(dbWhitelist)
.excludeDatabases(dbBlacklist)
.includeDatabases(dbIncludeList)
.excludeDatabases(dbExcludeList)
.build();
// Define the filter using the whitelists and blacklists for tables and database names ...
// Define the filter using the include and exclude lists for tables and database names ...
Predicate<TableId> tableFilter = Selectors.tableSelector()
.includeDatabases(dbWhitelist)
.excludeDatabases(dbBlacklist)
.includeTables(tableWhitelist)
.excludeTables(tableBlacklist)
.includeDatabases(dbIncludeList)
.excludeDatabases(dbExcludeList)
.includeTables(tableIncludeList)
.excludeTables(tableExcludeList)
.build();
// Ignore built-in databases and tables ...

View File

@ -36,6 +36,7 @@
public class MySqlConnectorConfig extends RelationalDatabaseConnectorConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(MySqlConnectorConfig.class);
protected static final String DATABASE_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG = "\"database.include.list\" or \"database.whitelist\" is already specified";
/**
* The set of predefined BigIntUnsignedHandlingMode options or aliases.
@ -482,7 +483,9 @@ public static GtidNewChannelPosition parse(String value, String defaultValue) {
protected static final int DEFAULT_SNAPSHOT_FETCH_SIZE = Integer.MIN_VALUE;
private static final String DATABASE_WHITELIST_NAME = "database.whitelist";
private static final String DATABASE_INCLUDE_LIST_NAME = "database.include.list";
private static final String DATABASE_BLACKLIST_NAME = "database.blacklist";
private static final String DATABASE_EXCLUDE_LIST_NAME = "database.exclude.list";
/**
* Default size of the binlog buffer used for examining transactions and
@ -606,7 +609,7 @@ public static GtidNewChannelPosition parse(String value, String defaultValue) {
"Password to unlock the keystore file (store password) specified by 'ssl.trustore' configuration property or the 'javax.net.ssl.trustStore' system or JVM property.");
public static final Field TABLES_IGNORE_BUILTIN = RelationalDatabaseConnectorConfig.TABLE_IGNORE_BUILTIN
.withDependents(DATABASE_WHITELIST_NAME);
.withDependents(DATABASE_INCLUDE_LIST_NAME, DATABASE_WHITELIST_NAME);
public static final Field JDBC_DRIVER = Field.create("database.jdbc.driver")
.withDisplayName("Jdbc Driver Class Name")
@ -616,50 +619,78 @@ public static GtidNewChannelPosition parse(String value, String defaultValue) {
.withImportance(Importance.LOW)
.withValidation(Field::isClassName)
.withDescription("JDBC Driver class name used to connect to the MySQL database server.");
/**
* A comma-separated list of regular expressions that match database names to be monitored.
* May not be used with {@link #DATABASE_BLACKLIST}.
* Must not be used with {@link #DATABASE_BLACKLIST}.
*/
public static final Field DATABASE_WHITELIST = Field.create(DATABASE_WHITELIST_NAME)
.withDisplayName("Databases")
public static final Field DATABASE_INCLUDE_LIST = Field.create(DATABASE_INCLUDE_LIST_NAME)
.withDisplayName("Include Databases")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.HIGH)
.withDependents(TABLE_WHITELIST_NAME)
.withDependents(TABLE_INCLUDE_LIST_NAME, TABLE_WHITELIST_NAME)
.withDescription("The databases for which changes are to be captured");
/**
* A comma-separated list of regular expressions that match database names to be excluded from monitoring.
* May not be used with {@link #DATABASE_WHITELIST}.
* Old, backwards-compatible "whitelist" property.
*/
public static final Field DATABASE_BLACKLIST = Field.create(DATABASE_BLACKLIST_NAME)
@Deprecated
public static final Field DATABASE_WHITELIST = Field.create(DATABASE_WHITELIST_NAME)
.withDisplayName("Deprecated: Include Databases")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withInvisibleRecommender()
.withDependents(TABLE_INCLUDE_LIST_NAME, TABLE_WHITELIST_NAME)
.withDescription("The databases for which changes are to be captured (deprecated, use \"" + DATABASE_INCLUDE_LIST.name() + "\" instead)");
/**
* A comma-separated list of regular expressions that match database names to be excluded from monitoring.
* Must not be used with {@link #DATABASE_INCLUDE_LIST}.
*/
public static final Field DATABASE_EXCLUDE_LIST = Field.create(DATABASE_EXCLUDE_LIST_NAME)
.withDisplayName("Exclude Databases")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.MEDIUM)
.withValidation(MySqlConnectorConfig::validateDatabaseBlacklist)
.withValidation(MySqlConnectorConfig::validateDatabaseExcludeList)
.withInvisibleRecommender()
.withDescription("");
.withDescription("A comma-separated list of regular expressions that match database names to be excluded from monitoring");
/**
* Old, backwards-compatible "blacklist" property.
*/
@Deprecated
public static final Field DATABASE_BLACKLIST = Field.create(DATABASE_BLACKLIST_NAME)
.withDisplayName("Deprecated: Exclude Databases")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withValidation(MySqlConnectorConfig::validateDatabaseExcludeList)
.withInvisibleRecommender()
.withDescription("A comma-separated list of regular expressions that match database names to be excluded from monitoring (deprecated, use \""
+ DATABASE_EXCLUDE_LIST.name() + "\" instead)");
/**
* A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog
* position in the MySQL server. Only the GTID ranges that have sources matching one of these include patterns will
* be used.
* May not be used with {@link #GTID_SOURCE_EXCLUDES}.
* Must not be used with {@link #GTID_SOURCE_EXCLUDES}.
*/
public static final Field GTID_SOURCE_INCLUDES = Field.create("gtid.source.includes")
.withDisplayName("Include GTID sources")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.HIGH)
.withDependents(TABLE_WHITELIST_NAME)
.withDependents(TABLE_INCLUDE_LIST_NAME, TABLE_WHITELIST_NAME)
.withDescription("The source UUIDs used to include GTID ranges when determine the starting position in the MySQL server's binlog.");
/**
* A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog
* position in the MySQL server. Only the GTID ranges that have sources matching none of these exclude patterns will
* be used.
* May not be used with {@link #GTID_SOURCE_INCLUDES}.
* Must not be used with {@link #GTID_SOURCE_INCLUDES}.
*/
public static final Field GTID_SOURCE_EXCLUDES = Field.create("gtid.source.excludes")
.withDisplayName("Exclude GTID sources")
@ -884,9 +915,9 @@ public static GtidNewChannelPosition parse(String value, String defaultValue) {
CommonConnectorConfig.POLL_INTERVAL_MS,
BUFFER_SIZE_FOR_BINLOG_READER, Heartbeat.HEARTBEAT_INTERVAL,
Heartbeat.HEARTBEAT_TOPICS_PREFIX, DATABASE_HISTORY, INCLUDE_SCHEMA_CHANGES, INCLUDE_SQL_QUERY,
TABLE_WHITELIST, TABLE_BLACKLIST, TABLES_IGNORE_BUILTIN,
DATABASE_WHITELIST, DATABASE_BLACKLIST,
COLUMN_BLACKLIST, MSG_KEY_COLUMNS,
TABLE_WHITELIST, TABLE_INCLUDE_LIST, TABLE_BLACKLIST, TABLE_EXCLUDE_LIST, TABLES_IGNORE_BUILTIN,
DATABASE_WHITELIST, DATABASE_INCLUDE_LIST, DATABASE_BLACKLIST, DATABASE_EXCLUDE_LIST,
COLUMN_BLACKLIST, COLUMN_EXCLUDE_LIST, MSG_KEY_COLUMNS,
RelationalDatabaseConnectorConfig.MASK_COLUMN_WITH_HASH,
RelationalDatabaseConnectorConfig.MASK_COLUMN,
RelationalDatabaseConnectorConfig.TRUNCATE_COLUMN,
@ -970,8 +1001,10 @@ protected static ConfigDef configDef() {
KafkaDatabaseHistory.RECOVERY_POLL_INTERVAL_MS, DATABASE_HISTORY,
DatabaseHistory.SKIP_UNPARSEABLE_DDL_STATEMENTS, DatabaseHistory.DDL_FILTER,
DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL);
Field.group(config, "Events", INCLUDE_SCHEMA_CHANGES, INCLUDE_SQL_QUERY, TABLES_IGNORE_BUILTIN, DATABASE_WHITELIST, TABLE_WHITELIST,
COLUMN_BLACKLIST, TABLE_BLACKLIST, DATABASE_BLACKLIST, MSG_KEY_COLUMNS,
Field.group(config, "Events", INCLUDE_SCHEMA_CHANGES, INCLUDE_SQL_QUERY, TABLES_IGNORE_BUILTIN,
DATABASE_WHITELIST, DATABASE_INCLUDE_LIST, TABLE_WHITELIST, TABLE_INCLUDE_LIST,
COLUMN_BLACKLIST, COLUMN_EXCLUDE_LIST, TABLE_BLACKLIST, TABLE_EXCLUDE_LIST,
DATABASE_BLACKLIST, DATABASE_EXCLUDE_LIST, MSG_KEY_COLUMNS,
RelationalDatabaseConnectorConfig.MASK_COLUMN_WITH_HASH,
RelationalDatabaseConnectorConfig.MASK_COLUMN,
RelationalDatabaseConnectorConfig.TRUNCATE_COLUMN,
@ -1020,21 +1053,11 @@ private static int validateInconsistentSchemaHandlingModeNotIgnore(Configuration
return 0;
}
private static int validateDatabaseBlacklist(Configuration config, Field field, ValidationOutput problems) {
String whitelist = config.getString(DATABASE_WHITELIST);
String blacklist = config.getString(DATABASE_BLACKLIST);
if (whitelist != null && blacklist != null) {
problems.accept(DATABASE_BLACKLIST, blacklist, "Whitelist is already specified");
return 1;
}
return 0;
}
private static int validateTableBlacklist(Configuration config, Field field, ValidationOutput problems) {
String whitelist = config.getString(TABLE_WHITELIST);
String blacklist = config.getString(TABLE_BLACKLIST);
if (whitelist != null && blacklist != null) {
problems.accept(TABLE_BLACKLIST, blacklist, "Whitelist is already specified");
private static int validateDatabaseExcludeList(Configuration config, Field field, ValidationOutput problems) {
String includeList = config.getFallbackStringProperty(DATABASE_INCLUDE_LIST, DATABASE_WHITELIST);
String excludeList = config.getFallbackStringProperty(DATABASE_EXCLUDE_LIST, DATABASE_BLACKLIST);
if (includeList != null && excludeList != null) {
problems.accept(DATABASE_EXCLUDE_LIST, excludeList, DATABASE_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG);
return 1;
}
return 0;

View File

@ -385,19 +385,25 @@ else if (b == null || b.isEmpty()) {
return false;
}
// otherwise, we have filter info
// if either whitelist has been added to, then we may have new tables
// if either include lists has been added to, then we may have new tables
if (hasExclusiveElements.apply(config.getString(MySqlConnectorConfig.DATABASE_WHITELIST), sourceInfo.getDatabaseWhitelist())) {
if (hasExclusiveElements.apply(
config.getFallbackStringProperty(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, MySqlConnectorConfig.DATABASE_WHITELIST),
sourceInfo.getDatabaseIncludeList())) {
return true;
}
if (hasExclusiveElements.apply(config.getString(MySqlConnectorConfig.TABLE_WHITELIST), sourceInfo.getTableWhitelist())) {
if (hasExclusiveElements.apply(
config.getFallbackStringProperty(MySqlConnectorConfig.TABLE_INCLUDE_LIST, MySqlConnectorConfig.TABLE_WHITELIST),
sourceInfo.getTableIncludeList())) {
return true;
}
// if either blacklist has been removed from, then we may have new tables
if (hasExclusiveElements.apply(sourceInfo.getDatabaseBlacklist(), config.getString(MySqlConnectorConfig.DATABASE_BLACKLIST))) {
if (hasExclusiveElements.apply(sourceInfo.getDatabaseExcludeList(),
config.getFallbackStringProperty(MySqlConnectorConfig.DATABASE_EXCLUDE_LIST, MySqlConnectorConfig.DATABASE_BLACKLIST))) {
return true;
}
if (hasExclusiveElements.apply(sourceInfo.getTableBlacklist(), config.getString(MySqlConnectorConfig.TABLE_BLACKLIST))) {
if (hasExclusiveElements.apply(sourceInfo.getTableExcludeList(),
config.getFallbackStringProperty(MySqlConnectorConfig.TABLE_EXCLUDE_LIST, MySqlConnectorConfig.TABLE_BLACKLIST))) {
return true;
}
// otherwise, false.

View File

@ -47,7 +47,7 @@
/**
* Component that records the schema history for databases hosted by a MySQL database server. The schema information includes
* the {@link Tables table definitions} and the Kafka Connect {@link #schemaFor(TableId) Schema}s for each table, where the
* {@link Schema} excludes any columns that have been {@link MySqlConnectorConfig#COLUMN_BLACKLIST specified} in the
* {@link Schema} excludes any columns that have been {@link MySqlConnectorConfig#COLUMN_EXCLUDE_LIST specified} in the
* configuration.
* <p>
* The history is changed by {@link #applyDdl(SourceInfo, String, String, DatabaseStatementStringConsumer) applying DDL

View File

@ -393,7 +393,7 @@ protected void execute() {
while (rs.next() && isRunning()) {
TableId id = new TableId(dbName, null, rs.getString(1));
final boolean shouldRecordTableSchema = shouldRecordTableSchema(schema, filters, id);
// Apply only when the whitelist table list is not dynamically reconfigured
// Apply only when the table include list is not dynamically reconfigured
if ((createTableFilters == filters && shouldRecordTableSchema) || createTableFilters.tableFilter().test(id)) {
createTablesMap.computeIfAbsent(dbName, k -> new ArrayList<>()).add(id);
}
@ -425,9 +425,11 @@ protected void execute() {
* + and then sort the tableIds list based on the above list
* +
*/
List<Pattern> tableWhitelistPattern = Strings.listOfRegex(context.config().getString(MySqlConnectorConfig.TABLE_WHITELIST), Pattern.CASE_INSENSITIVE);
List<Pattern> tableIncludeListPattern = Strings.listOfRegex(
context.config().getFallbackStringProperty(MySqlConnectorConfig.TABLE_INCLUDE_LIST, MySqlConnectorConfig.TABLE_WHITELIST),
Pattern.CASE_INSENSITIVE);
List<TableId> tableIdsSorted = new ArrayList<>();
tableWhitelistPattern.forEach(pattern -> {
tableIncludeListPattern.forEach(pattern -> {
List<TableId> tablesMatchedByPattern = capturedTableIds.stream().filter(t -> pattern.asPredicate().test(t.toString()))
.collect(Collectors.toList());
tablesMatchedByPattern.forEach(t -> {
@ -888,7 +890,7 @@ protected void readBinlogPosition(int step, SourceInfo source, JdbcConnection my
source.startSnapshot();
}
else {
logger.info("Step {}: read binlog position of MySQL master", step);
logger.info("Step {}: read binlog position of MySQL primary server", step);
String showMasterStmt = "SHOW MASTER STATUS";
sql.set(showMasterStmt);
mysql.query(sql.get(), rs -> {

View File

@ -112,9 +112,13 @@ final class SourceInfo extends AbstractSourceInfo {
public static final String THREAD_KEY = "thread";
public static final String QUERY_KEY = "query";
public static final String DATABASE_WHITELIST_KEY = "database_whitelist";
public static final String DATABASE_INCLUDE_LIST_KEY = "database_include_list";
public static final String DATABASE_BLACKLIST_KEY = "database_blacklist";
public static final String DATABASE_EXCLUDE_LIST_KEY = "database_exclude_list";
public static final String TABLE_WHITELIST_KEY = "table_whitelist";
public static final String TABLE_INCLUDE_LIST_KEY = "table_include_list";
public static final String TABLE_BLACKLIST_KEY = "table_blacklist";
public static final String TABLE_EXCLUDE_LIST_KEY = "table_exclude_list";
public static final String RESTART_PREFIX = "RESTART_";
private String currentGtidSet;
@ -136,10 +140,10 @@ final class SourceInfo extends AbstractSourceInfo {
private boolean lastSnapshot = true;
private boolean nextSnapshot = false;
private String currentQuery = null;
private String databaseWhitelist;
private String databaseBlacklist;
private String tableWhitelist;
private String tableBlacklist;
private String databaseIncludeList;
private String databaseExcludeList;
private String tableIncludeList;
private String tableExcludeList;
private Set<TableId> tableIds;
private String databaseName;
@ -275,10 +279,10 @@ private Map<String, Object> offsetUsingPosition(long rowsToSkip) {
map.put(SNAPSHOT_KEY, true);
}
if (hasFilterInfo()) {
map.put(DATABASE_WHITELIST_KEY, databaseWhitelist);
map.put(DATABASE_BLACKLIST_KEY, databaseBlacklist);
map.put(TABLE_WHITELIST_KEY, tableWhitelist);
map.put(TABLE_BLACKLIST_KEY, tableBlacklist);
map.put(DATABASE_INCLUDE_LIST_KEY, databaseIncludeList);
map.put(DATABASE_EXCLUDE_LIST_KEY, databaseExcludeList);
map.put(TABLE_INCLUDE_LIST_KEY, tableIncludeList);
map.put(TABLE_EXCLUDE_LIST_KEY, tableExcludeList);
}
return map;
}
@ -434,10 +438,10 @@ public void completeSnapshot() {
* @param config the configuration
*/
public void setFilterDataFromConfig(Configuration config) {
this.databaseWhitelist = config.getString(MySqlConnectorConfig.DATABASE_WHITELIST);
this.databaseBlacklist = config.getString(MySqlConnectorConfig.DATABASE_BLACKLIST);
this.tableWhitelist = config.getString(MySqlConnectorConfig.TABLE_WHITELIST);
this.tableBlacklist = config.getString(MySqlConnectorConfig.TABLE_BLACKLIST);
this.databaseIncludeList = config.getFallbackStringProperty(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, MySqlConnectorConfig.DATABASE_WHITELIST);
this.databaseExcludeList = config.getFallbackStringProperty(MySqlConnectorConfig.DATABASE_EXCLUDE_LIST, MySqlConnectorConfig.DATABASE_BLACKLIST);
this.tableIncludeList = config.getFallbackStringProperty(MySqlConnectorConfig.TABLE_INCLUDE_LIST, MySqlConnectorConfig.TABLE_WHITELIST);
this.tableExcludeList = config.getFallbackStringProperty(MySqlConnectorConfig.TABLE_EXCLUDE_LIST, MySqlConnectorConfig.TABLE_BLACKLIST);
}
/**
@ -461,24 +465,24 @@ public boolean hasFilterInfo() {
* 2. The initial snapshot occurred in a version of Debezium that did not store the filter information in the
* offsets / the connector was not configured to store filter information.
*/
return databaseWhitelist != null || databaseBlacklist != null ||
tableWhitelist != null || tableBlacklist != null;
return databaseIncludeList != null || databaseExcludeList != null ||
tableIncludeList != null || tableExcludeList != null;
}
public String getDatabaseWhitelist() {
return databaseWhitelist;
public String getDatabaseIncludeList() {
return databaseIncludeList;
}
public String getDatabaseBlacklist() {
return databaseBlacklist;
public String getDatabaseExcludeList() {
return databaseExcludeList;
}
public String getTableWhitelist() {
return tableWhitelist;
public String getTableIncludeList() {
return tableIncludeList;
}
public String getTableBlacklist() {
return tableBlacklist;
public String getTableExcludeList() {
return tableExcludeList;
}
/**
@ -501,19 +505,42 @@ public void setOffset(Map<String, ?> sourceOffset) {
this.restartEventsToSkip = longOffsetValue(sourceOffset, EVENTS_TO_SKIP_OFFSET_KEY);
nextSnapshot = booleanOffsetValue(sourceOffset, SNAPSHOT_KEY);
lastSnapshot = nextSnapshot;
this.databaseWhitelist = (String) sourceOffset.get(DATABASE_WHITELIST_KEY);
this.databaseBlacklist = (String) sourceOffset.get(DATABASE_BLACKLIST_KEY);
this.tableWhitelist = (String) sourceOffset.get(TABLE_WHITELIST_KEY);
this.tableBlacklist = (String) sourceOffset.get(TABLE_BLACKLIST_KEY);
if (sourceOffset.containsKey(DATABASE_INCLUDE_LIST_KEY)) {
this.databaseIncludeList = (String) sourceOffset.get(DATABASE_INCLUDE_LIST_KEY);
}
else {
this.databaseIncludeList = (String) sourceOffset.get(DATABASE_WHITELIST_KEY);
}
if (sourceOffset.containsKey(DATABASE_EXCLUDE_LIST_KEY)) {
this.databaseExcludeList = (String) sourceOffset.get(DATABASE_EXCLUDE_LIST_KEY);
}
else {
this.databaseExcludeList = (String) sourceOffset.get(DATABASE_BLACKLIST_KEY);
}
if (sourceOffset.containsKey(TABLE_INCLUDE_LIST_KEY)) {
this.tableIncludeList = (String) sourceOffset.get(TABLE_INCLUDE_LIST_KEY);
}
else {
this.tableIncludeList = (String) sourceOffset.get(TABLE_WHITELIST_KEY);
}
if (sourceOffset.containsKey(TABLE_EXCLUDE_LIST_KEY)) {
this.tableExcludeList = (String) sourceOffset.get(TABLE_EXCLUDE_LIST_KEY);
}
else {
this.tableExcludeList = (String) sourceOffset.get(TABLE_BLACKLIST_KEY);
}
}
}
public static boolean offsetsHaveFilterInfo(Map<String, ?> sourceOffset) {
return sourceOffset != null &&
sourceOffset.containsKey(DATABASE_BLACKLIST_KEY) ||
return sourceOffset != null && (sourceOffset.containsKey(DATABASE_BLACKLIST_KEY) ||
sourceOffset.containsKey(DATABASE_EXCLUDE_LIST_KEY) ||
sourceOffset.containsKey(DATABASE_WHITELIST_KEY) ||
sourceOffset.containsKey(DATABASE_INCLUDE_LIST_KEY) ||
sourceOffset.containsKey(TABLE_BLACKLIST_KEY) ||
sourceOffset.containsKey(TABLE_WHITELIST_KEY);
sourceOffset.containsKey(TABLE_EXCLUDE_LIST_KEY) ||
sourceOffset.containsKey(TABLE_WHITELIST_KEY) ||
sourceOffset.containsKey(TABLE_INCLUDE_LIST_KEY));
}
private long longOffsetValue(Map<String, ?> values, String key) {

View File

@ -1,4 +1,4 @@
-- In production you would almost certainly limit the replication user must be on the follower (slave) machine,
-- In production you would almost certainly limit the replication user must be on the follower (replica) machine,
-- to prevent other clients accessing the log from other machines. For example, 'replicator'@'follower.acme.com'.
-- However, in this database we'll grant 3 users different privileges:
--
@ -17,5 +17,5 @@ GRANT ALL PRIVILEGES ON *.* TO 'mysqlreplica'@'%';
-- Start the GTID-based replication ...
CHANGE MASTER TO MASTER_HOST='alt-database-gtids', MASTER_PORT=3306, MASTER_USER='replicator', MASTER_PASSWORD = 'replpass', MASTER_AUTO_POSITION=1;
-- And start the slave ...
-- And start the replica ...
START SLAVE;

View File

@ -1,4 +1,4 @@
-- In production you would almost certainly limit the replication user must be on the follower (slave) machine,
-- In production you would almost certainly limit the replication user must be on the follower (replica) machine,
-- to prevent other clients accessing the log from other machines. For example, 'replicator'@'follower.acme.com'.
-- However, in this database we'll grant 3 users different privileges:
--
@ -17,5 +17,5 @@ GRANT ALL PRIVILEGES ON *.* TO 'mysqlreplica'@'%';
-- Start the GTID-based replication ...
CHANGE MASTER TO MASTER_HOST='database-gtids', MASTER_PORT=3306, MASTER_USER='replicator', MASTER_PASSWORD = 'replpass', MASTER_AUTO_POSITION=1;
-- And start the slave ...
-- And start the replica ...
START SLAVE;

View File

@ -1,4 +1,4 @@
-- In production you would almost certainly limit the replication user must be on the follower (slave) machine,
-- In production you would almost certainly limit the replication user must be on the follower (replica) machine,
-- to prevent other clients accessing the log from other machines. For example, 'replicator'@'follower.acme.com'.
-- However, in this database we'll grant 3 users different privileges:
--

View File

@ -52,7 +52,7 @@ binlog_format = row
[mysqld-5.6]
# ----------------------------------------------
# Enable GTIDs on this master
# Enable GTIDs on this primary server
# ----------------------------------------------
gtid_mode = on
enforce_gtid_consistency = on
@ -69,7 +69,7 @@ default_authentication_plugin = mysql_native_password
[mysqld-5.7]
# ----------------------------------------------
# Enable GTIDs on this master
# Enable GTIDs on this primary server
# ----------------------------------------------
gtid_mode = on
enforce_gtid_consistency = on
@ -82,7 +82,7 @@ default_authentication_plugin = mysql_native_password
[mysqld-8.0]
# ----------------------------------------------
# Enable GTIDs on this master
# Enable GTIDs on this primary server
# ----------------------------------------------
gtid_mode = on
enforce_gtid_consistency = on

View File

@ -34,7 +34,7 @@ symbolic-links=0
#pid-file=/var/run/mysqld/mysqld.pid
# ----------------------------------------------
# Enable GTIDs on this master
# Enable GTIDs on this primary server
# ----------------------------------------------
log_slave_updates = on

View File

@ -81,15 +81,15 @@ protected static void waitForGtidSetsToMatch(Configuration master, Configuration
try {
GtidSet replicaGtidSet = null;
while (true) {
Testing.debug("Checking replica's GTIDs and comparing to master's...");
Testing.debug("Checking replica's GTIDs and comparing to primary's...");
replicaGtidSet = readAvailableGtidSet(replica);
// The replica will have extra sources, so check whether the replica has everything in the master ...
if (masterGtidSet.isContainedWithin(replicaGtidSet)) {
Testing.debug("Replica's GTIDs are caught up to the master's.");
Testing.debug("Replica's GTIDs are caught up to the primary's.");
sw.stop();
return;
}
Testing.debug("Waiting for replica's GTIDs to catch up to master's...");
Testing.debug("Waiting for replica's GTIDs to catch up to primary's...");
Thread.sleep(100);
}
}
@ -108,7 +108,7 @@ protected static void waitForGtidSetsToMatch(Configuration master, Configuration
// Timed out waiting for them to match ...
checker.interrupt();
}
Testing.print("Waited a total of " + sw.durations().statistics().getTotalAsString() + " for the replica to catch up to the master.");
Testing.print("Waited a total of " + sw.durations().statistics().getTotalAsString() + " for the replica to catch up to the primary.");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
@ -136,7 +136,7 @@ protected Map<String, String> readSystemVariables(Configuration config) throws E
// Now get the master GTID source ...
String serverUuid = variables.get("server_uuid");
if (serverUuid != null && !serverUuid.trim().isEmpty()) {
// We are using GTIDs, so look for the known GTID set that has the master and slave GTID sources ...
// We are using GTIDs, so look for the known GTID set that has the master and replica GTID sources ...
String availableServerGtidStr = context.knownGtidSet();
if (availableServerGtidStr != null && !availableServerGtidStr.trim().isEmpty()) {
GtidSet gtidSet = new GtidSet(availableServerGtidStr);

View File

@ -83,7 +83,7 @@ public void shouldCorrectlyManageRollback() throws SQLException, InterruptedExce
.with(MySqlConnectorConfig.SERVER_NAME, DATABASE.getServerName())
.with(MySqlConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED)
.with(MySqlConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(MySqlConnectorConfig.BUFFER_SIZE_FOR_BINLOG_READER, 10_000)
@ -152,7 +152,7 @@ public void shouldProcessSavepoint() throws SQLException, InterruptedException {
.with(MySqlConnectorConfig.SERVER_NAME, DATABASE.getServerName())
.with(MySqlConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED)
.with(MySqlConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH)
@ -214,7 +214,7 @@ public void shouldProcessLargeTransaction() throws SQLException, InterruptedExce
.with(MySqlConnectorConfig.SERVER_NAME, DATABASE.getServerName())
.with(MySqlConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED)
.with(MySqlConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(MySqlConnectorConfig.BUFFER_SIZE_FOR_BINLOG_READER, 9)
@ -286,7 +286,7 @@ public void shouldProcessRolledBackSavepoint() throws SQLException, InterruptedE
.with(MySqlConnectorConfig.SERVER_NAME, DATABASE.getServerName())
.with(MySqlConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED)
.with(MySqlConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH)

View File

@ -321,6 +321,46 @@ public void shouldFilterAllRecordsBasedOnDatabaseWhitelistFilter() throws Except
assertThat(reader.getMetrics().getNumberOfSkippedEvents()).isEqualTo(0);
}
/**
* Setup a DATABASE_INCLUDE_LIST filter that filters all events.
* Verify all events are properly filtered.
* Verify numberOfFilteredEvents metric is incremented correctly.
*/
@Test
@FixFor("DBZ-1206")
public void shouldFilterAllRecordsBasedOnDatabaseIncludeListFilter() throws Exception {
// Define configuration that will ignore all events from MySQL source.
config = simpleConfig()
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, "db-does-not-exist")
.build();
final Filters filters = new Filters.Builder(config).build();
context = new MySqlTaskContext(config, filters);
context.start();
context.source().setBinlogStartPoint("", 0L); // start from beginning
context.initializeHistory();
reader = new BinlogReader("binlog", context, new AcceptAllPredicate());
// Start reading the binlog ...
reader.start();
// Lets wait for at least 35 events to be filtered.
final int expectedFilterCount = 35;
final long numberFiltered = filterAtLeast(expectedFilterCount, 20, TimeUnit.SECONDS);
// All events should have been filtered.
assertThat(numberFiltered).isGreaterThanOrEqualTo(expectedFilterCount);
// There should be no schema changes
assertThat(schemaChanges.recordCount()).isEqualTo(0);
// There should be no records
assertThat(store.collectionCount()).isEqualTo(0);
// There should be no skipped
assertThat(reader.getMetrics().getNumberOfSkippedEvents()).isEqualTo(0);
}
@Test
@FixFor("DBZ-183")
public void shouldHandleTimestampTimezones() throws Exception {
@ -330,8 +370,8 @@ public void shouldHandleTimestampTimezones() throws Exception {
String tableName = "dbz_85_fractest";
config = simpleConfig().with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, REGRESSION_DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.TABLE_WHITELIST, REGRESSION_DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, REGRESSION_DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, REGRESSION_DATABASE.qualifiedTableName(tableName))
.build();
Filters filters = new Filters.Builder(config).build();
context = new MySqlTaskContext(config, filters);
@ -373,8 +413,8 @@ public void shouldHandleMySQLTimeCorrectly() throws Exception {
String tableName = "dbz_342_timetest";
config = simpleConfig().with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, REGRESSION_DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.TABLE_WHITELIST, REGRESSION_DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, REGRESSION_DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, REGRESSION_DATABASE.qualifiedTableName(tableName))
.build();
Filters filters = new Filters.Builder(config).build();
context = new MySqlTaskContext(config, filters);

View File

@ -10,6 +10,7 @@
import io.debezium.config.Configuration;
import io.debezium.config.Field;
import io.debezium.relational.history.FileDatabaseHistory;
import io.debezium.util.Testing;
/**
* A helper for easily building connector configurations for testing.
@ -35,23 +36,48 @@
}
/* package local */ Configurator includeDatabases(String regexList) {
return with(MySqlConnectorConfig.DATABASE_WHITELIST, regexList);
if (Math.random() >= 0.5) {
Testing.debug("Using \"" + MySqlConnectorConfig.DATABASE_WHITELIST.name() + "\" config property");
return with(MySqlConnectorConfig.DATABASE_WHITELIST, regexList);
}
Testing.debug("Using \"" + MySqlConnectorConfig.DATABASE_INCLUDE_LIST.name() + "\" config property");
return with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, regexList);
}
/* package local */ Configurator excludeDatabases(String regexList) {
return with(MySqlConnectorConfig.DATABASE_BLACKLIST, regexList);
if (Math.random() >= 0.5) {
Testing.debug("Using \"" + MySqlConnectorConfig.DATABASE_BLACKLIST.name() + "\" config property");
return with(MySqlConnectorConfig.DATABASE_BLACKLIST, regexList);
}
Testing.debug("Using \"" + MySqlConnectorConfig.DATABASE_EXCLUDE_LIST.name() + "\" config property");
return with(MySqlConnectorConfig.DATABASE_EXCLUDE_LIST, regexList);
}
/* package local */ Configurator includeTables(String regexList) {
return with(MySqlConnectorConfig.TABLE_WHITELIST, regexList);
if (Math.random() >= 0.5) {
Testing.debug("Using \"" + MySqlConnectorConfig.TABLE_WHITELIST.name() + "\" config property");
return with(MySqlConnectorConfig.TABLE_WHITELIST, regexList);
}
Testing.debug("Using \"" + MySqlConnectorConfig.TABLE_INCLUDE_LIST.name() + "\" config property");
return with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, regexList);
}
/* package local */ Configurator excludeTables(String regexList) {
return with(MySqlConnectorConfig.TABLE_BLACKLIST, regexList);
if (Math.random() >= 0.5) {
Testing.debug("Using \"" + MySqlConnectorConfig.TABLE_BLACKLIST.name() + "\" config property");
return with(MySqlConnectorConfig.TABLE_BLACKLIST, regexList);
}
Testing.debug("Using \"" + MySqlConnectorConfig.TABLE_EXCLUDE_LIST.name() + "\" config property");
return with(MySqlConnectorConfig.TABLE_EXCLUDE_LIST, regexList);
}
/* package local */ Configurator excludeColumns(String regexList) {
return with(MySqlConnectorConfig.COLUMN_BLACKLIST, regexList);
if (Math.random() >= 0.5) {
Testing.debug("Using \"" + MySqlConnectorConfig.COLUMN_BLACKLIST.name() + "\" config property");
return with(MySqlConnectorConfig.COLUMN_BLACKLIST, regexList);
}
Testing.debug("Using \"" + MySqlConnectorConfig.COLUMN_EXCLUDE_LIST.name() + "\" config property");
return with(MySqlConnectorConfig.COLUMN_EXCLUDE_LIST, regexList);
}
/* package local */ Configurator truncateColumns(int length, String fullyQualifiedTableNames) {

View File

@ -158,6 +158,23 @@ public void shouldRecoverSourceInfoFromOffsetWithNonZeroBinlogCoordinatesAndNonZ
@Test
public void shouldRecoverSourceInfoFromOffsetWithFilterData() {
final String databaseWhitelist = "a,b";
final String tableWhitelist = "c.foo,d.bar,d.baz";
Map<String, String> offset = offset(10, 10);
offset.put(SourceInfo.DATABASE_INCLUDE_LIST_KEY, databaseWhitelist);
offset.put(SourceInfo.TABLE_INCLUDE_LIST_KEY, tableWhitelist);
sourceWith(offset);
assertThat(source.hasFilterInfo()).isTrue();
assertEquals(databaseWhitelist, source.getDatabaseIncludeList());
assertEquals(tableWhitelist, source.getTableIncludeList());
// confirm other filter info is null
assertThat(source.getDatabaseExcludeList()).isNull();
assertThat(source.getTableExcludeList()).isNull();
}
@Test
public void shouldRecoverSourceInfoFromOffsetWithFilterDataOld() {
final String databaseWhitelist = "a,b";
final String tableWhitelist = "c.foo,d.bar,d.baz";
Map<String, String> offset = offset(10, 10);
@ -166,11 +183,11 @@ public void shouldRecoverSourceInfoFromOffsetWithFilterData() {
sourceWith(offset);
assertThat(source.hasFilterInfo()).isTrue();
assertEquals(databaseWhitelist, source.getDatabaseWhitelist());
assertEquals(tableWhitelist, source.getTableWhitelist());
assertEquals(databaseWhitelist, source.getDatabaseIncludeList());
assertEquals(tableWhitelist, source.getTableIncludeList());
// confirm other filter info is null
assertThat(source.getDatabaseBlacklist()).isNull();
assertThat(source.getTableBlacklist()).isNull();
assertThat(source.getDatabaseExcludeList()).isNull();
assertThat(source.getTableExcludeList()).isNull();
}
@Test
@ -182,6 +199,26 @@ public void setOffsetFilterFromFilter() {
sourceWith(offset);
assertThat(!source.hasFilterInfo());
final Configuration configuration = Configuration.create()
.with(MySqlConnectorConfig.DATABASE_EXCLUDE_LIST, databaseBlacklist)
.with(MySqlConnectorConfig.TABLE_EXCLUDE_LIST, tableBlacklist)
.build();
source.setFilterDataFromConfig(configuration);
assertThat(source.hasFilterInfo()).isTrue();
assertEquals(databaseBlacklist, source.getDatabaseExcludeList());
assertEquals(tableBlacklist, source.getTableExcludeList());
}
@Test
public void setOffsetFilterFromFilterOld() {
final String databaseBlacklist = "a,b";
final String tableBlacklist = "c.foo, d.bar, d.baz";
Map<String, String> offset = offset(10, 10);
sourceWith(offset);
assertThat(!source.hasFilterInfo());
final Configuration configuration = Configuration.create()
.with(MySqlConnectorConfig.DATABASE_BLACKLIST, databaseBlacklist)
.with(MySqlConnectorConfig.TABLE_BLACKLIST, tableBlacklist)
@ -189,8 +226,8 @@ public void setOffsetFilterFromFilter() {
source.setFilterDataFromConfig(configuration);
assertThat(source.hasFilterInfo()).isTrue();
assertEquals(databaseBlacklist, source.getDatabaseBlacklist());
assertEquals(tableBlacklist, source.getTableBlacklist());
assertEquals(databaseBlacklist, source.getDatabaseExcludeList());
assertEquals(tableBlacklist, source.getTableExcludeList());
}
@Test

View File

@ -35,6 +35,7 @@
import io.debezium.config.CommonConnectorConfig;
import io.debezium.config.Configuration;
import io.debezium.config.Field;
import io.debezium.connector.mysql.MySQLConnection.MySqlVersion;
import io.debezium.connector.mysql.MySqlConnectorConfig.SecureConnectionMode;
import io.debezium.connector.mysql.MySqlConnectorConfig.SnapshotLockingMode;
@ -52,6 +53,7 @@
import io.debezium.relational.history.DatabaseHistory;
import io.debezium.relational.history.FileDatabaseHistory;
import io.debezium.relational.history.KafkaDatabaseHistory;
import io.debezium.schema.DatabaseSchema;
import io.debezium.util.Testing;
/**
@ -129,10 +131,15 @@ public void shouldFailToValidateInvalidConfiguration() {
assertNoConfigurationErrors(result, MySqlConnectorConfig.SERVER_ID);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLES_IGNORE_BUILTIN);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_WHITELIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_INCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_BLACKLIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_WHITELIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_INCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_BLACKLIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.COLUMN_BLACKLIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.COLUMN_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.CONNECTION_TIMEOUT_MS);
assertNoConfigurationErrors(result, MySqlConnectorConfig.KEEP_ALIVE);
assertNoConfigurationErrors(result, MySqlConnectorConfig.KEEP_ALIVE_INTERVAL_MS);
@ -180,10 +187,15 @@ public void shouldValidateValidConfigurationWithSSL() {
assertNoConfigurationErrors(result, MySqlConnectorConfig.SERVER_ID);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLES_IGNORE_BUILTIN);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_WHITELIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_INCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_BLACKLIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_WHITELIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_INCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_BLACKLIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.COLUMN_BLACKLIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.COLUMN_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.CONNECTION_TIMEOUT_MS);
assertNoConfigurationErrors(result, MySqlConnectorConfig.KEEP_ALIVE);
assertNoConfigurationErrors(result, MySqlConnectorConfig.KEEP_ALIVE_INTERVAL_MS);
@ -231,10 +243,15 @@ public void shouldValidateAcceptableConfiguration() {
assertNoConfigurationErrors(result, MySqlConnectorConfig.SERVER_ID);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLES_IGNORE_BUILTIN);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_WHITELIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_INCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_BLACKLIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.DATABASE_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_WHITELIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_INCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_BLACKLIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.TABLE_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.COLUMN_BLACKLIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.COLUMN_EXCLUDE_LIST);
assertNoConfigurationErrors(result, MySqlConnectorConfig.MSG_KEY_COLUMNS);
assertNoConfigurationErrors(result, MySqlConnectorConfig.CONNECTION_TIMEOUT_MS);
assertNoConfigurationErrors(result, MySqlConnectorConfig.KEEP_ALIVE);
@ -309,6 +326,15 @@ private Optional<Header> getHeaderField(SourceRecord record, String fieldName) {
@Test
public void shouldConsumeAllEventsFromDatabaseUsingSnapshot() throws SQLException, InterruptedException {
shouldConsumeAllEventsFromDatabaseUsingSnapshotByField(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, 18765);
}
@Test
public void shouldConsumeAllEventsFromDatabaseUsingSnapshotOld() throws SQLException, InterruptedException {
shouldConsumeAllEventsFromDatabaseUsingSnapshotByField(MySqlConnectorConfig.DATABASE_WHITELIST, 18775);
}
private void shouldConsumeAllEventsFromDatabaseUsingSnapshotByField(Field dbIncludeListField, int serverId) throws SQLException, InterruptedException {
String masterPort = System.getProperty("database.port", "3306");
String replicaPort = System.getProperty("database.replica.port", "3306");
boolean replicaIsMaster = masterPort.equals(replicaPort);
@ -324,11 +350,11 @@ public void shouldConsumeAllEventsFromDatabaseUsingSnapshot() throws SQLExceptio
.with(MySqlConnectorConfig.PORT, System.getProperty("database.replica.port", "3306"))
.with(MySqlConnectorConfig.USER, "snapper")
.with(MySqlConnectorConfig.PASSWORD, "snapperpass")
.with(MySqlConnectorConfig.SERVER_ID, 18765)
.with(MySqlConnectorConfig.SERVER_ID, serverId)
.with(MySqlConnectorConfig.SERVER_NAME, DATABASE.getServerName())
.with(MySqlConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED)
.with(MySqlConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, DATABASE.getDatabaseName())
.with(dbIncludeListField, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH)
@ -499,7 +525,7 @@ public void shouldConsumeAllEventsFromDatabaseUsingSnapshot() throws SQLExceptio
// Change our schema with a fully-qualified name; we should still see this event
// ---------------------------------------------------------------------------------------------------------------
// Add a column with default to the 'products' table and explicitly update one record ...
try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName());) {
try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName())) {
try (JdbcConnection connection = db.connect()) {
connection.execute(String.format(
"ALTER TABLE %s.products ADD COLUMN volume FLOAT, ADD COLUMN alias VARCHAR(30) NULL AFTER description",
@ -738,8 +764,8 @@ public void shouldUseOverriddenSelectStatementDuringSnapshotting() throws SQLExc
.with(MySqlConnectorConfig.SERVER_NAME, DATABASE.getServerName())
.with(MySqlConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED)
.with(MySqlConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.getDatabaseName() + ".products")
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.getDatabaseName() + ".products")
.with(MySqlConnectorConfig.SNAPSHOT_SELECT_STATEMENT_OVERRIDES_BY_TABLE, DATABASE.getDatabaseName() + ".products")
.with(MySqlConnectorConfig.SNAPSHOT_SELECT_STATEMENT_OVERRIDES_BY_TABLE + "." + DATABASE.getDatabaseName() + ".products",
String.format("SELECT * from %s.products where id>=108 order by id", DATABASE.getDatabaseName()))
@ -789,8 +815,8 @@ public void shouldUseMultipleOverriddenSelectStatementsDuringSnapshotting() thro
.with(MySqlConnectorConfig.SERVER_NAME, DATABASE.getServerName())
.with(MySqlConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED)
.with(MySqlConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.TABLE_WHITELIST, tables)
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, tables)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, true)
.with(MySqlConnectorConfig.SNAPSHOT_SELECT_STATEMENT_OVERRIDES_BY_TABLE, tables)
.with(MySqlConnectorConfig.SNAPSHOT_SELECT_STATEMENT_OVERRIDES_BY_TABLE + "." + DATABASE.getDatabaseName() + ".products",
@ -832,7 +858,7 @@ public void shouldIgnoreAlterTableForNonCapturedTablesNotStoredInHistory() throw
final String tables = String.format("%s.customers", DATABASE.getDatabaseName(), DATABASE.getDatabaseName());
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.TABLE_WHITELIST, tables)
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, tables)
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, true)
@ -868,7 +894,7 @@ public void shouldSaveSetCharacterSetWhenStoringOnlyMonitoredTables() throws SQL
Testing.Files.delete(DB_HISTORY_PATH);
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.DATABASE_WHITELIST, "no_" + DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, "no_" + DATABASE.getDatabaseName())
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, true)
@ -892,7 +918,7 @@ public void shouldProcessCreateUniqueIndex() throws SQLException, InterruptedExc
final String tables = String.format("%s.migration_test", DATABASE.getDatabaseName(), DATABASE.getDatabaseName());
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.TABLE_WHITELIST, tables)
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, tables)
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.build();
@ -943,7 +969,7 @@ public void shouldIgnoreAlterTableForNonCapturedTablesStoredInHistory() throws S
final String tables = String.format("%s.customers", DATABASE.getDatabaseName(), DATABASE.getDatabaseName());
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.TABLE_WHITELIST, tables)
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, tables)
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.build();
@ -981,7 +1007,7 @@ public void shouldIgnoreCreateIndexForNonCapturedTablesNotStoredInHistory() thro
final String tables = String.format("%s.customers", DATABASE.getDatabaseName(), DATABASE.getDatabaseName());
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.TABLE_WHITELIST, tables)
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, tables)
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, true)
@ -1020,10 +1046,10 @@ public void shouldReceiveSchemaForNonWhitelistedTablesAndDatabases() throws SQLE
final String tables = String.format("%s.customers,%s.orders", DATABASE.getDatabaseName(), DATABASE.getDatabaseName());
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.TABLE_WHITELIST, tables)
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, tables)
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, ".*")
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, ".*")
.build();
dropDatabases();
@ -1053,6 +1079,32 @@ public void shouldReceiveSchemaForNonWhitelistedTablesAndDatabases() throws SQLE
@Test
@FixFor("DBZ-1546")
public void shouldHandleIncludeListTables() throws SQLException, InterruptedException {
Testing.Files.delete(DB_HISTORY_PATH);
final String tables = String.format("%s.customers, %s.orders", DATABASE.getDatabaseName(), DATABASE.getDatabaseName());
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, tables)
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, ".*")
.build();
dropDatabases();
// Start the connector ...
start(MySqlConnector.class, config);
// Consume the first records due to startup and initialization of the database ...
// Testing.Print.enable();
// Two databases
// SET + USE + DROP DB + CREATE DB + 4 tables (2 whitelisted) (DROP + CREATE) TABLE
// USE + DROP DB + CREATE DB + (DROP + CREATE) TABLE
SourceRecords records = consumeRecordsByTopic(1 + 1 + 2 + 2 * 4 + 1 + 2 + 2);
// Records for one of the databases only
assertThat(records.ddlRecordsForDatabase(DATABASE.getDatabaseName()).size()).isEqualTo(1 + 2 + 2 * 4);
stopConnector();
}
@Test
public void shouldHandleWhitelistedTables() throws SQLException, InterruptedException {
Testing.Files.delete(DB_HISTORY_PATH);
@ -1186,7 +1238,7 @@ public void shouldConsumeEventsWithMaskedAndBlacklistedColumns() throws SQLExcep
// Use the DB configuration to define the connector's configuration ...
config = RO_DATABASE.defaultConfig()
.with(MySqlConnectorConfig.COLUMN_BLACKLIST, RO_DATABASE.qualifiedTableName("orders") + ".order_number")
.with(MySqlConnectorConfig.COLUMN_EXCLUDE_LIST, RO_DATABASE.qualifiedTableName("orders") + ".order_number")
.with("column.mask.with.12.chars", RO_DATABASE.qualifiedTableName("customers") + ".email")
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.build();
@ -1482,7 +1534,7 @@ public void shouldNotParseQueryIfServerOptionDisabled() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
// Explicitly configure connector TO parse query
.with(MySqlConnectorConfig.INCLUDE_SQL_QUERY, true)
.build();
@ -1534,7 +1586,7 @@ public void shouldNotParseQueryIfConnectorNotConfiguredTo() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
// Explicitly configure connector to NOT parse query
.with(MySqlConnectorConfig.INCLUDE_SQL_QUERY, false)
.build();
@ -1586,7 +1638,7 @@ public void shouldParseQueryIfAvailableAndConnectorOptionEnabled() throws Except
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
// Explicitly configure connector TO parse query
.with(MySqlConnectorConfig.INCLUDE_SQL_QUERY, true)
.build();
@ -1638,7 +1690,7 @@ public void parseMultipleInsertStatements() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
// Explicitly configure connector TO parse query
.with(MySqlConnectorConfig.INCLUDE_SQL_QUERY, true)
.build();
@ -1701,7 +1753,7 @@ public void parseMultipleRowInsertStatement() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
// Explicitly configure connector TO parse query
.with(MySqlConnectorConfig.INCLUDE_SQL_QUERY, true)
.build();
@ -1762,7 +1814,7 @@ public void parseDeleteQuery() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
// Explicitly configure connector TO parse query
.with(MySqlConnectorConfig.INCLUDE_SQL_QUERY, true)
.build();
@ -1813,7 +1865,7 @@ public void parseMultiRowDeleteQuery() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
// Explicitly configure connector TO parse query
.with(MySqlConnectorConfig.INCLUDE_SQL_QUERY, true)
.build();
@ -1872,7 +1924,7 @@ public void parseUpdateQuery() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
// Explicitly configure connector TO parse query
.with(MySqlConnectorConfig.INCLUDE_SQL_QUERY, true)
.build();
@ -1923,7 +1975,7 @@ public void parseMultiRowUpdateQuery() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
// Explicitly configure connector TO parse query
.with(MySqlConnectorConfig.INCLUDE_SQL_QUERY, true)
.build();
@ -1995,7 +2047,7 @@ public void testEmptySchemaLogWarningWithDatabaseWhitelist() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, "my_database")
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, "my_database")
.build();
start(MySqlConnector.class, config);
@ -2003,7 +2055,7 @@ public void testEmptySchemaLogWarningWithDatabaseWhitelist() throws Exception {
consumeRecordsByTopic(12);
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(NO_MONITORED_TABLES_WARNING)).isTrue());
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isTrue());
}
@Test
@ -2020,7 +2072,7 @@ public void testNoEmptySchemaLogWarningWithDatabaseWhitelist() throws Exception
consumeRecordsByTopic(12);
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(NO_MONITORED_TABLES_WARNING)).isFalse());
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isFalse());
}
@Test
@ -2031,7 +2083,7 @@ public void testEmptySchemaWarningWithTableWhitelist() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("my_products"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("my_products"))
.build();
start(MySqlConnector.class, config);
@ -2041,7 +2093,7 @@ public void testEmptySchemaWarningWithTableWhitelist() throws Exception {
consumeRecordsByTopic(12);
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(NO_MONITORED_TABLES_WARNING)).isTrue());
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isTrue());
}
@Test
@ -2060,7 +2112,7 @@ public void testNoEmptySchemaWarningWithTableWhitelist() throws Exception {
consumeRecordsByTopic(12);
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(NO_MONITORED_TABLES_WARNING)).isFalse());
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isFalse());
}
@Test
@ -2072,7 +2124,7 @@ public void shouldRewriteIdentityKey() throws InterruptedException, SQLException
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
// Explicitly configure connector TO parse query
.with(MySqlConnectorConfig.INCLUDE_SQL_QUERY, true)
// rewrite key from table 'products': from {id} to {id, name}
@ -2103,7 +2155,7 @@ public void shouldOutputRecordsInCloudEventsFormat() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(tableName))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(tableName))
.build();
// Start the connector ...

View File

@ -849,7 +849,7 @@ public void shouldConsumeDatesCorrectlyWhenClientTimezonePrecedesServerTimezoneU
// Use the DB configuration to define the connector's configuration ...
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("dbz_85_fractest"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("dbz_85_fractest"))
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, true)
.build();
// Start the connector ...
@ -927,7 +927,7 @@ public void shouldConsumeDatesCorrectlyWhenClientTimezonePrecedesServerTimezoneU
public void shouldConsumeAllEventsFromDecimalTableInDatabaseUsingBinlogAndNoSnapshot() throws SQLException, InterruptedException {
// Use the DB configuration to define the connector's configuration ...
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("dbz_147_decimalvalues"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("dbz_147_decimalvalues"))
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER.toString())
.with(MySqlConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
@ -969,7 +969,7 @@ public void shouldConsumeAllEventsFromDecimalTableInDatabaseUsingBinlogAndNoSnap
public void shouldConsumeDecimalAsStringFromBinlog() throws SQLException, InterruptedException {
// Use the DB configuration to define the connector's configuration ...
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("dbz_147_decimalvalues"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("dbz_147_decimalvalues"))
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER.toString())
.with(MySqlConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.STRING)
@ -1009,7 +1009,7 @@ public void shouldConsumeDecimalAsStringFromBinlog() throws SQLException, Interr
public void shouldConsumeDecimalAsStringFromSnapshot() throws SQLException, InterruptedException {
// Use the DB configuration to define the connector's configuration ...
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("dbz_147_decimalvalues"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("dbz_147_decimalvalues"))
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, true)
.with(MySqlConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.STRING)

View File

@ -67,7 +67,7 @@ public void afterEach() {
public void testPreciseDecimalHandlingMode() throws SQLException, InterruptedException {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(TABLE_NAME))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(TABLE_NAME))
.with(MySqlConnectorConfig.DECIMAL_HANDLING_MODE, RelationalDatabaseConnectorConfig.DecimalHandlingMode.PRECISE)
.build();
@ -83,7 +83,7 @@ public void testPreciseDecimalHandlingMode() throws SQLException, InterruptedExc
public void testDoubleDecimalHandlingMode() throws SQLException, InterruptedException {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(TABLE_NAME))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(TABLE_NAME))
.with(MySqlConnectorConfig.DECIMAL_HANDLING_MODE, RelationalDatabaseConnectorConfig.DecimalHandlingMode.DOUBLE)
.build();
@ -99,7 +99,7 @@ public void testDoubleDecimalHandlingMode() throws SQLException, InterruptedExce
public void testStringDecimalHandlingMode() throws SQLException, InterruptedException {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName(TABLE_NAME))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName(TABLE_NAME))
.with(MySqlConnectorConfig.DECIMAL_HANDLING_MODE, RelationalDatabaseConnectorConfig.DecimalHandlingMode.STRING)
.build();

View File

@ -65,7 +65,7 @@ public void shouldAlterEnumColumnCharacterSet() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.NEVER)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("test_stations_10"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("test_stations_10"))
.build();
start(MySqlConnector.class, config);
@ -94,7 +94,7 @@ public void shouldAlterEnumColumnCharacterSet() throws Exception {
public void shouldPropagateColumnSourceType() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.NEVER)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("test_stations_10"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("test_stations_10"))
.with("column.propagate.source.type", DATABASE.qualifiedTableName("test_stations_10") + ".type")
.build();

View File

@ -68,7 +68,7 @@ public void testLifecycle() throws Exception {
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("simple"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("simple"))
.with(MySqlConnectorConfig.TABLES_IGNORE_BUILTIN, Boolean.TRUE)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, Boolean.TRUE)
.build());
@ -116,7 +116,7 @@ public void testSnapshotOnlyMetrics() throws Exception {
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY)
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("simple"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("simple"))
.with(MySqlConnectorConfig.TABLES_IGNORE_BUILTIN, Boolean.TRUE)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, Boolean.TRUE)
.build());
@ -139,7 +139,7 @@ public void testSnapshotAndStreamingMetrics() throws Exception {
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("simple"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("simple"))
.with(MySqlConnectorConfig.TABLES_IGNORE_BUILTIN, Boolean.TRUE)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, Boolean.TRUE)
.build());
@ -156,7 +156,7 @@ public void testStreamingOnlyMetrics() throws Exception {
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("simple"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("simple"))
.with(MySqlConnectorConfig.TABLES_IGNORE_BUILTIN, Boolean.TRUE)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, Boolean.TRUE)
.build());

View File

@ -54,7 +54,7 @@ public void afterEach() {
public void shouldNotDuplicateEventsAfterRestart() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("restart_table"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("restart_table"))
.build();
try (MySQLConnection db = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName());) {

View File

@ -58,7 +58,7 @@ public void shouldCorrectlyMigrateTable() throws SQLException, InterruptedExcept
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("monitored") + "," + DATABASE.qualifiedTableName("_monitored_new"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("monitored") + "," + DATABASE.qualifiedTableName("_monitored_new"))
.build();
final MySQLConnection connection = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName());
@ -99,7 +99,7 @@ public void shouldProcessAndWarnOnNonWhitelistedMigrateTable() throws SQLExcepti
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("monitored"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("monitored"))
.build();
final MySQLConnection connection = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName());
@ -150,7 +150,7 @@ public void shouldWarnOnInvalidMigrateTable() throws SQLException, InterruptedEx
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, true)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("monitored"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("monitored"))
.build();
final MySQLConnection connection = MySQLConnection.forTestDatabase(DATABASE.getDatabaseName());

View File

@ -90,7 +90,7 @@ protected Configuration.Builder simpleConfig() {
.with(MySqlConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED)
.with(MySqlConnectorConfig.SERVER_ID, serverId)
.with(MySqlConnectorConfig.SERVER_NAME, serverName)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, databaseName)
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, databaseName)
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(FileDatabaseHistory.FILE_PATH, DB_HISTORY_PATH);
}

View File

@ -53,7 +53,7 @@ public void afterEach() {
public void shouldAlterEnumColumnCharacterSet() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.NEVER)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("t_user_black_list"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("t_user_black_list"))
.build();
start(MySqlConnector.class, config);

View File

@ -60,7 +60,7 @@ public void shouldHandleTinyIntAsNumber() throws SQLException, InterruptedExcept
// Use the DB configuration to define the connector's configuration ...
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("DBZ1773"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("DBZ1773"))
.build();
// Start the connector ...
@ -84,7 +84,7 @@ public void shouldHandleTinyIntOneAsBoolean() throws SQLException, InterruptedEx
// Use the DB configuration to define the connector's configuration ...
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("DBZ1773"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("DBZ1773"))
.with(MySqlConnectorConfig.CUSTOM_CONVERTERS, "boolean")
.with("boolean.type", TinyIntOneToBooleanConverter.class.getName())
.with("boolean.selector", ".*DBZ1773.b")
@ -111,7 +111,7 @@ public void shouldDefaultValueForTinyIntOneAsBoolean() throws SQLException, Inte
// Use the DB configuration to define the connector's configuration ...
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("DBZ2085"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("DBZ2085"))
.with(MySqlConnectorConfig.CUSTOM_CONVERTERS, "boolean")
.with("boolean.type", TinyIntOneToBooleanConverter.class.getName())
.with("boolean.selector", ".*DBZ2085.b")

View File

@ -60,7 +60,7 @@ public void afterEach() {
public void allZeroDateAndTimeTypeTest() throws InterruptedException {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("ALL_ZERO_DATE_AND_TIME_TABLE"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("ALL_ZERO_DATE_AND_TIME_TABLE"))
.build();
start(MySqlConnector.class, config);

View File

@ -572,7 +572,7 @@ public void numericAndDecimalToDecimalTest() throws InterruptedException {
public void dateAndTimeTest() throws InterruptedException {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("DATE_TIME_TABLE"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("DATE_TIME_TABLE"))
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, true)
.build();
start(MySqlConnector.class, config);
@ -640,7 +640,7 @@ public void dateAndTimeTest() throws InterruptedException {
public void timeTypeWithConnectMode() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("DATE_TIME_TABLE"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("DATE_TIME_TABLE"))
.with(MySqlConnectorConfig.TIME_PRECISION_MODE, TemporalPrecisionMode.CONNECT)
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, true)
.build();
@ -784,7 +784,7 @@ record = records.recordsForTopic(DATABASE.topicForTable("DBZ_771_CUSTOMERS")).ge
public void alterDateAndTimeTest() throws Exception {
config = DATABASE.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, MySqlConnectorConfig.SnapshotMode.INITIAL)
.with(MySqlConnectorConfig.TABLE_WHITELIST, DATABASE.qualifiedTableName("ALTER_DATE_TIME"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, DATABASE.qualifiedTableName("ALTER_DATE_TIME"))
.with(DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL, true)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, false)
.build();

View File

@ -13,6 +13,7 @@
import java.nio.file.Path;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.concurrent.CountDownLatch;
@ -225,7 +226,7 @@ private void snapshotOfSingleDatabase(boolean useGlobalLock, boolean storeOnlyMo
@Test
public void shouldCreateSnapshotOfSingleDatabaseUsingReadEvents() throws Exception {
config = simpleConfig().with(MySqlConnectorConfig.DATABASE_WHITELIST, "connector_(.*)_" + DATABASE.getIdentifier()).build();
config = simpleConfig().with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, "connector_(.*)_" + DATABASE.getIdentifier()).build();
context = new MySqlTaskContext(config, new Filters.Builder(config).build());
context.start();
reader = new SnapshotReader("snapshot", context);
@ -501,6 +502,34 @@ public void shouldCreateSnapshotSchemaOnlyRecovery() throws Exception {
}
}
@Test
public void shouldSnapshotTablesInOrderSpecifiedInTableIncludeList() throws Exception {
config = simpleConfig()
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST,
"connector_test_ro_(.*).orders,connector_test_ro_(.*).Products,connector_test_ro_(.*).products_on_hand,connector_test_ro_(.*).dbz_342_timetest")
.build();
context = new MySqlTaskContext(config, new Filters.Builder(config).build());
context.start();
reader = new SnapshotReader("snapshot", context);
reader.uponCompletion(completed::countDown);
reader.generateInsertEvents();
// Start the snapshot ...
reader.start();
// Poll for records ...
List<SourceRecord> records;
LinkedHashSet<String> tablesInOrder = new LinkedHashSet<>();
LinkedHashSet<String> tablesInOrderExpected = getTableNamesInSpecifiedOrder("orders", "Products", "products_on_hand", "dbz_342_timetest");
while ((records = reader.poll()) != null) {
records.forEach(record -> {
VerifyRecord.isValid(record);
if (record.value() != null) {
tablesInOrder.add(getTableNameFromSourceRecord.apply(record));
}
});
}
assertArrayEquals(tablesInOrder.toArray(), tablesInOrderExpected.toArray());
}
@Test
public void shouldSnapshotTablesInOrderSpecifiedInTablesWhitelist() throws Exception {
config = simpleConfig()
@ -557,14 +586,10 @@ public void shouldSnapshotTablesInLexicographicalOrder() throws Exception {
assertArrayEquals(tablesInOrder.toArray(), tablesInOrderExpected.toArray());
}
private Function<SourceRecord, String> getTableNameFromSourceRecord = sourceRecord -> ((Struct) sourceRecord.value()).getStruct("source").getString("table");
private final Function<SourceRecord, String> getTableNameFromSourceRecord = sourceRecord -> ((Struct) sourceRecord.value()).getStruct("source").getString("table");
private LinkedHashSet<String> getTableNamesInSpecifiedOrder(String... tables) {
LinkedHashSet<String> tablesInOrderExpected = new LinkedHashSet<>();
for (String table : tables) {
tablesInOrderExpected.add(table);
}
return tablesInOrderExpected;
return new LinkedHashSet<>(Arrays.asList(tables));
}
@Test

View File

@ -156,7 +156,7 @@ public void shouldRecoverSourceInfoFromOffsetWithNonZeroBinlogCoordinatesAndNonZ
}
@Test
public void shouldRecoverSourceInfoFromOffsetWithFilterData() {
public void shouldRecoverSourceInfoFromOffsetWithFilterDataOld() {
final String databaseWhitelist = "a,b";
final String tableWhitelist = "c.foo,d.bar,d.baz";
Map<String, String> offset = offset(10, 10);
@ -165,15 +165,32 @@ public void shouldRecoverSourceInfoFromOffsetWithFilterData() {
sourceWith(offset);
assertThat(source.hasFilterInfo()).isTrue();
assertEquals(databaseWhitelist, source.getDatabaseWhitelist());
assertEquals(tableWhitelist, source.getTableWhitelist());
assertEquals(databaseWhitelist, source.getDatabaseIncludeList());
assertEquals(tableWhitelist, source.getTableIncludeList());
// confirm other filter info is null
assertThat(source.getDatabaseBlacklist()).isNull();
assertThat(source.getTableBlacklist()).isNull();
assertThat(source.getDatabaseExcludeList()).isNull();
assertThat(source.getTableExcludeList()).isNull();
}
@Test
public void setOffsetFilterFromFilter() {
public void shouldRecoverSourceInfoFromOffsetWithFilterData() {
final String databaseWhitelist = "a,b";
final String tableWhitelist = "c.foo,d.bar,d.baz";
Map<String, String> offset = offset(10, 10);
offset.put(SourceInfo.DATABASE_INCLUDE_LIST_KEY, databaseWhitelist);
offset.put(SourceInfo.TABLE_INCLUDE_LIST_KEY, tableWhitelist);
sourceWith(offset);
assertThat(source.hasFilterInfo()).isTrue();
assertEquals(databaseWhitelist, source.getDatabaseIncludeList());
assertEquals(tableWhitelist, source.getTableIncludeList());
// confirm other filter info is null
assertThat(source.getDatabaseExcludeList()).isNull();
assertThat(source.getTableExcludeList()).isNull();
}
@Test
public void setOffsetFilterFromFilterOld() {
final String databaseBlacklist = "a,b";
final String tableBlacklist = "c.foo, d.bar, d.baz";
Map<String, String> offset = offset(10, 10);
@ -188,8 +205,28 @@ public void setOffsetFilterFromFilter() {
source.setFilterDataFromConfig(configuration);
assertThat(source.hasFilterInfo()).isTrue();
assertEquals(databaseBlacklist, source.getDatabaseBlacklist());
assertEquals(tableBlacklist, source.getTableBlacklist());
assertEquals(databaseBlacklist, source.getDatabaseExcludeList());
assertEquals(tableBlacklist, source.getTableExcludeList());
}
@Test
public void setOffsetFilterFromFilter() {
final String databaseBlacklist = "a,b";
final String tableBlacklist = "c.foo, d.bar, d.baz";
Map<String, String> offset = offset(10, 10);
sourceWith(offset);
assertThat(!source.hasFilterInfo());
final Configuration configuration = Configuration.create()
.with(MySqlConnectorConfig.DATABASE_EXCLUDE_LIST, databaseBlacklist)
.with(MySqlConnectorConfig.TABLE_EXCLUDE_LIST, tableBlacklist)
.build();
source.setFilterDataFromConfig(configuration);
assertThat(source.hasFilterInfo()).isTrue();
assertEquals(databaseBlacklist, source.getDatabaseExcludeList());
assertEquals(tableBlacklist, source.getTableExcludeList());
}
@Test

View File

@ -183,7 +183,7 @@ public Configuration.Builder defaultConfig() {
.with(MySqlConnectorConfig.SERVER_ID, 18765)
.with(MySqlConnectorConfig.SERVER_NAME, getServerName())
.with(MySqlConnectorConfig.POLL_INTERVAL_MS, 10)
.with(MySqlConnectorConfig.DATABASE_WHITELIST, getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_INCLUDE_LIST, getDatabaseName())
.with(MySqlConnectorConfig.DATABASE_HISTORY, FileDatabaseHistory.class)
.with(MySqlConnectorConfig.BUFFER_SIZE_FOR_BINLOG_READER, 10_000);

View File

@ -96,7 +96,7 @@ public void shouldProcessPurgedGtidSet() throws SQLException, InterruptedExcepti
config = ro_database.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(MySqlConnectorConfig.TABLE_WHITELIST, ro_database.qualifiedTableName("customers"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, ro_database.qualifiedTableName("customers"))
.build();
// Start the connector ...
@ -161,7 +161,7 @@ public void shouldProcessPurgedLogsWhenDownAndSnapshotNeeded() throws SQLExcepti
config = database.defaultConfig()
.with(MySqlConnectorConfig.SNAPSHOT_MODE, SnapshotMode.WHEN_NEEDED)
.with(MySqlConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
.with(MySqlConnectorConfig.TABLE_WHITELIST, database.qualifiedTableName("customers"))
.with(MySqlConnectorConfig.TABLE_INCLUDE_LIST, database.qualifiedTableName("customers"))
.build();
// Start the connector ...

View File

@ -70,4 +70,6 @@ VALUES (default, '2016-01-16', 1001, 1, 102),
(default, '2016-01-17', 1002, 2, 105),
(default, '2016-02-18', 1004, 3, 109),
(default, '2016-02-19', 1002, 2, 106),
(default, '16-02-21', 1003, 1, 107);
(default, '16-02-21', 1003, 1, 107);
CREATE DATABASE IF NOT EXISTS emptydb;

View File

@ -89,3 +89,5 @@ CREATE TABLE dbz_342_timetest (
c5 TIME(6)
);
INSERT INTO dbz_342_timetest VALUES ('517:51:04.777', '-13:14:50', '-733:00:00.0011', '-1:59:59.0011', '-838:59:58.999999');
CREATE DATABASE IF NOT EXISTS emptydb;

View File

@ -66,7 +66,7 @@ This is where all the _magic_ happens where we put tie together all the pieces o
Remember this version should match the version of database parameters you cloned in the prior steps.
3. Select the **Template** of interest, I typically picked **Free tier** because it's free ;)
4. Provide your database with a **DB Instance Identifier**.
5. Under **Credentials Settings**, manually assign a **Master Password** and confirm it.
5. Under **Credentials Settings**, manually assign a **RDS Master Password** and confirm it.
6. Under **Connectivity**, you'll want to set that it is _publicly accessible_.
7. You should be able to leave the **VPC security group** to use an existing and it should be set to **default** or whatever you named it to.
In the event you have multiple security groups and you created a new one, select the one of interest to assign.
@ -91,8 +91,8 @@ When you run your JVM process you either need to provide these VM options manual
```
database.dbname=postgres
database.hostname=<your amazon instance endpoint>
database.user=<name of master user, by default this is postgres>
database.password=<your master password>
database.user=<name of RDS master user, by default this is postgres>
database.password=<your RDS master password>
```
## Viewing logs

View File

@ -26,9 +26,9 @@
public class Filters {
protected static final List<String> SYSTEM_SCHEMAS = Arrays.asList("pg_catalog", "information_schema");
protected static final String SYSTEM_SCHEMA_BLACKLIST = String.join(",", SYSTEM_SCHEMAS);
protected static final String SYSTEM_SCHEMA_EXCLUDE_LIST = String.join(",", SYSTEM_SCHEMAS);
protected static final Predicate<String> IS_SYSTEM_SCHEMA = SYSTEM_SCHEMAS::contains;
protected static final String TEMP_TABLE_BLACKLIST = ".*\\.pg_temp.*";
protected static final String TEMP_TABLE_EXCLUDE_LIST = ".*\\.pg_temp.*";
private final TableFilter tableFilter;
private final ColumnNameFilter columnFilter;
@ -39,37 +39,37 @@ public class Filters {
public Filters(PostgresConnectorConfig config) {
// we always want to exclude PG system schemas as they are never part of logical decoding events
String schemaBlacklist = config.schemaBlacklist();
if (schemaBlacklist != null) {
schemaBlacklist = schemaBlacklist + "," + SYSTEM_SCHEMA_BLACKLIST;
String schemaExcludeList = config.schemaExcludeList();
if (schemaExcludeList != null) {
schemaExcludeList = schemaExcludeList + "," + SYSTEM_SCHEMA_EXCLUDE_LIST;
}
else {
schemaBlacklist = SYSTEM_SCHEMA_BLACKLIST;
schemaExcludeList = SYSTEM_SCHEMA_EXCLUDE_LIST;
}
String tableBlacklist = config.tableBlacklist();
if (tableBlacklist != null) {
tableBlacklist = tableBlacklist + "," + TEMP_TABLE_BLACKLIST;
String tableExcludeList = config.tableExcludeList();
if (tableExcludeList != null) {
tableExcludeList = tableExcludeList + "," + TEMP_TABLE_EXCLUDE_LIST;
}
else {
tableBlacklist = TEMP_TABLE_BLACKLIST;
tableExcludeList = TEMP_TABLE_EXCLUDE_LIST;
}
// Define the filter using the whitelists and blacklists for table names ...
// Define the filter using the include/exclude lists for table names ...
this.tableFilter = TableFilter.fromPredicate(Selectors.tableSelector()
.includeTables(config.tableWhitelist())
.excludeTables(tableBlacklist)
.includeSchemas(config.schemaWhitelist())
.excludeSchemas(schemaBlacklist)
.includeTables(config.tableIncludeList())
.excludeTables(tableExcludeList)
.includeSchemas(config.schemaIncludeList())
.excludeSchemas(schemaExcludeList)
.build());
String columnWhitelist = config.columnWhitelist();
if (columnWhitelist != null) {
this.columnFilter = ColumnNameFilterFactory.createWhitelistFilter(config.columnWhitelist());
String columnIncludeList = config.columnIncludeList();
if (columnIncludeList != null) {
this.columnFilter = ColumnNameFilterFactory.createIncludeListFilter(config.columnIncludeList());
}
else {
// Define the filter that excludes blacklisted columns, truncated columns, and masked columns ...
this.columnFilter = ColumnNameFilterFactory.createBlacklistFilter(config.columnBlacklist());
// Define the filter that excludes columns on the exclude list, truncated columns, and masked columns ...
this.columnFilter = ColumnNameFilterFactory.createExcludeListFilter(config.columnExcludeList());
}
}

View File

@ -668,7 +668,7 @@ public static AutoCreateMode parse(String value, String defaultValue) {
"Note this requires that the configured user has access. If the publication already exists, it will be used" +
". i.e CREATE PUBLICATION <publication_name> FOR ALL TABLES;" +
"FILTERED - If no publication exists, the connector will create a new publication for all those tables matching" +
"the current filter configuration (see table/database whitelist/blacklist properties). If the publication already" +
"the current filter configuration (see table/database include/exclude list properties). If the publication already" +
" exists, it will be used. i.e CREATE PUBLICATION <publication_name> FOR TABLE <tbl1, tbl2, etc>");
public static final Field STREAM_PARAMS = Field.create("slot.stream.params")
@ -920,8 +920,7 @@ public PostgresConnectorConfig(Configuration config) {
DEFAULT_SNAPSHOT_FETCH_SIZE);
String hstoreHandlingModeStr = config.getString(PostgresConnectorConfig.HSTORE_HANDLING_MODE);
HStoreHandlingMode hStoreHandlingMode = HStoreHandlingMode.parse(hstoreHandlingModeStr);
this.hStoreHandlingMode = hStoreHandlingMode;
this.hStoreHandlingMode = HStoreHandlingMode.parse(hstoreHandlingModeStr);
this.intervalHandlingMode = IntervalHandlingMode.parse(config.getString(PostgresConnectorConfig.INTERVAL_HANDLING_MODE));
this.snapshotMode = SnapshotMode.parse(config.getString(SNAPSHOT_MODE));
this.schemaRefreshMode = SchemaRefreshMode.parse(config.getString(SCHEMA_REFRESH_MODE));
@ -999,30 +998,6 @@ public Map<String, ConfigValue> validate() {
return getConfig().validate(ALL_FIELDS);
}
protected String schemaBlacklist() {
return getConfig().getString(SCHEMA_BLACKLIST);
}
protected String schemaWhitelist() {
return getConfig().getString(SCHEMA_WHITELIST);
}
protected String tableBlacklist() {
return getConfig().getString(TABLE_BLACKLIST);
}
protected String tableWhitelist() {
return getConfig().getString(TABLE_WHITELIST);
}
protected String columnBlacklist() {
return getConfig().getString(COLUMN_BLACKLIST);
}
protected String columnWhitelist() {
return getConfig().getString(COLUMN_WHITELIST);
}
protected Snapshotter getSnapshotter() {
return this.snapshotMode.getSnapshotter(getConfig());
}

View File

@ -35,7 +35,7 @@
/**
* Component that records the schema information for the {@link PostgresConnector}. The schema information contains
* the {@link Tables table definitions} and the Kafka Connect {@link #schemaFor(TableId) Schema}s for each table, where the
* {@link Schema} excludes any columns that have been {@link PostgresConnectorConfig#COLUMN_BLACKLIST specified} in the
* {@link Schema} excludes any columns that have been {@link PostgresConnectorConfig#COLUMN_EXCLUDE_LIST specified} in the
* configuration.
*
* @author Horia Chiorean

View File

@ -232,7 +232,7 @@ private void skipMessage(final Long lsn) throws SQLException, InterruptedExcepti
* acknowledged with the replication slot, causing any ever growing WAL backlog.
* <p>
* This situation typically occurs if there are changes on the database server,
* (e.g. in a blacklisted database), but none of them is in a whitelisted table.
* (e.g. in an excluded database), but none of them is in table.include.list.
* To prevent this, heartbeats can be used, as they will allow us to commit
* offsets also when not propagating any "real" change event.
* <p>

View File

@ -534,7 +534,7 @@ private static Configuration.Builder getConfigurationBuilder(SnapshotMode snapsh
return TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, snapshotMode.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.SCHEMA_WHITELIST, "outboxsmtit")
.with(PostgresConnectorConfig.TABLE_WHITELIST, "outboxsmtit\\.outbox");
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "outboxsmtit")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "outboxsmtit\\.outbox");
}
}

View File

@ -75,6 +75,7 @@
import io.debezium.junit.SkipWhenKafkaVersion.KafkaVersion;
import io.debezium.junit.logging.LogInterceptor;
import io.debezium.relational.RelationalDatabaseConnectorConfig;
import io.debezium.schema.DatabaseSchema;
import io.debezium.util.Strings;
import io.debezium.util.Testing;
@ -177,11 +178,17 @@ public void shouldValidateConfiguration() throws Exception {
validateField(validatedConfig, PostgresConnectorConfig.SSL_CLIENT_KEY_PASSWORD, null);
validateField(validatedConfig, PostgresConnectorConfig.SSL_ROOT_CERT, null);
validateField(validatedConfig, PostgresConnectorConfig.SCHEMA_WHITELIST, null);
validateField(validatedConfig, PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, null);
validateField(validatedConfig, PostgresConnectorConfig.SCHEMA_BLACKLIST, null);
validateField(validatedConfig, PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, null);
validateField(validatedConfig, PostgresConnectorConfig.TABLE_WHITELIST, null);
validateField(validatedConfig, PostgresConnectorConfig.TABLE_INCLUDE_LIST, null);
validateField(validatedConfig, PostgresConnectorConfig.TABLE_BLACKLIST, null);
validateField(validatedConfig, PostgresConnectorConfig.TABLE_EXCLUDE_LIST, null);
validateField(validatedConfig, PostgresConnectorConfig.COLUMN_BLACKLIST, null);
validateField(validatedConfig, PostgresConnectorConfig.COLUMN_EXCLUDE_LIST, null);
validateField(validatedConfig, PostgresConnectorConfig.COLUMN_WHITELIST, null);
validateField(validatedConfig, PostgresConnectorConfig.COLUMN_INCLUDE_LIST, null);
validateField(validatedConfig, PostgresConnectorConfig.MSG_KEY_COLUMNS, null);
validateField(validatedConfig, PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL);
validateField(validatedConfig, RelationalDatabaseConnectorConfig.SNAPSHOT_LOCK_TIMEOUT_MS,
@ -324,6 +331,29 @@ public void shouldConsumeMessagesFromSnapshot() throws Exception {
TestHelper.execute(SETUP_TABLES_STMT);
final int recordCount = 100;
for (int i = 0; i < recordCount - 1; i++) {
TestHelper.execute(INSERT_STMT);
}
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.MAX_QUEUE_SIZE, recordCount / 2)
.with(PostgresConnectorConfig.MAX_BATCH_SIZE, 10)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1");
start(PostgresConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
SourceRecords records = consumeRecordsByTopic(recordCount);
Assertions.assertThat(records.recordsForTopic("test_server.s1.a")).hasSize(recordCount);
stopConnector();
}
@Test
public void shouldConsumeMessagesFromSnapshotOld() throws Exception {
TestHelper.execute(SETUP_TABLES_STMT);
final int recordCount = 100;
for (int i = 0; i < recordCount - 1; i++) {
TestHelper.execute(INSERT_STMT);
}
@ -351,7 +381,7 @@ public void shouldReceiveChangesForChangePKColumnDefinition() throws Exception {
try {
final PostgresConnectorConfig config = new PostgresConnectorConfig(TestHelper.defaultConfig()
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, Boolean.FALSE)
.with(PostgresConnectorConfig.SCHEMA_WHITELIST, "changepk")
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "changepk")
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.with(PostgresConnectorConfig.SLOT_NAME, slotName)
.build());
@ -733,6 +763,46 @@ public void shouldRecoverFromRetriableException() throws Exception {
TestHelper.dropDefaultReplicationSlot();
}
@Test
public void shouldTakeExcludeListFiltersIntoAccount() throws Exception {
String setupStmt = SETUP_TABLES_STMT +
"CREATE TABLE s1.b (pk SERIAL, aa integer, bb integer, PRIMARY KEY(pk));" +
"ALTER TABLE s1.a ADD COLUMN bb integer;" +
"INSERT INTO s1.a (aa, bb) VALUES (2, 2);" +
"INSERT INTO s1.a (aa, bb) VALUES (3, 3);" +
"INSERT INTO s1.b (aa, bb) VALUES (4, 4);" +
"INSERT INTO s2.a (aa) VALUES (5);";
TestHelper.execute(setupStmt);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "s2")
.with(PostgresConnectorConfig.TABLE_EXCLUDE_LIST, ".+b")
.with(PostgresConnectorConfig.COLUMN_EXCLUDE_LIST, ".+bb");
start(PostgresConnector.class, configBuilder.build());
assertConnectorIsRunning();
// check the records from the snapshot take the filters into account
SourceRecords actualRecords = consumeRecordsByTopic(4); // 3 records in s1.a and 1 in s1.b
assertThat(actualRecords.recordsForTopic(topicName("s2.a"))).isNullOrEmpty();
assertThat(actualRecords.recordsForTopic(topicName("s1.b"))).isNullOrEmpty();
List<SourceRecord> recordsForS1a = actualRecords.recordsForTopic(topicName("s1.a"));
assertThat(recordsForS1a.size()).isEqualTo(3);
AtomicInteger pkValue = new AtomicInteger(1);
recordsForS1a.forEach(record -> {
VerifyRecord.isValidRead(record, PK_FIELD, pkValue.getAndIncrement());
assertFieldAbsent(record, "bb");
});
// insert some more records and verify the filtering behavior
String insertStmt = "INSERT INTO s1.b (aa, bb) VALUES (6, 6);" +
"INSERT INTO s2.a (aa) VALUES (7);";
TestHelper.execute(insertStmt);
assertNoRecordsToConsume();
}
@Test
public void shouldTakeBlacklistFiltersIntoAccount() throws Exception {
String setupStmt = SETUP_TABLES_STMT +
@ -785,7 +855,7 @@ public void shouldTakeColumnWhitelistFilterIntoAccount() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with("column.mask.with.5.chars", ".+cc")
.with(PostgresConnectorConfig.COLUMN_WHITELIST, ".+aa,.+cc");
.with(PostgresConnectorConfig.COLUMN_INCLUDE_LIST, ".+aa,.+cc");
start(PostgresConnector.class, configBuilder.build());
assertConnectorIsRunning();
@ -809,6 +879,36 @@ public void shouldRemoveWhiteSpaceChars() throws Exception {
String tableWhitelistWithWhitespace = "s1.a, s1.b";
TestHelper.execute(setupStmt);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, tableWhitelistWithWhitespace);
start(PostgresConnector.class, configBuilder.build());
assertConnectorIsRunning();
SourceRecords actualRecords = consumeRecordsByTopic(2);
List<SourceRecord> records = actualRecords.recordsForTopic(topicName("s1.b"));
assertThat(records.size()).isEqualTo(1);
SourceRecord record = records.get(0);
VerifyRecord.isValidRead(record, PK_FIELD, 1);
String sourceTable = ((Struct) record.value()).getStruct("source").getString("table");
assertThat(sourceTable).isEqualTo("b");
}
@Test
public void shouldRemoveWhiteSpaceCharsOld() throws Exception {
String setupStmt = SETUP_TABLES_STMT +
"CREATE TABLE s1.b (pk SERIAL, aa integer, PRIMARY KEY(pk));" +
"INSERT INTO s1.b (aa) VALUES (123);";
String tableWhitelistWithWhitespace = "s1.a, s1.b";
TestHelper.execute(setupStmt);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
@ -840,8 +940,8 @@ public void shouldCloseTxAfterTypeQuery() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.SCHEMA_WHITELIST, "s1")
.with(PostgresConnectorConfig.TABLE_WHITELIST, "s1.b")
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.b")
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true);
start(PostgresConnector.class, configBuilder.build());
@ -875,8 +975,8 @@ public void shouldReplaceInvalidTopicNameCharacters() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.SCHEMA_WHITELIST, "s1")
.with(PostgresConnectorConfig.TABLE_WHITELIST, "s1\\.dbz_878_some\\|test@data");
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1\\.dbz_878_some\\|test@data");
start(PostgresConnector.class, configBuilder.build());
assertConnectorIsRunning();
@ -902,7 +1002,7 @@ public void shouldNotSendEmptyOffset() throws InterruptedException, SQLException
Configuration config = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "s1.a")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.a")
.with(Heartbeat.HEARTBEAT_INTERVAL, 10)
.build();
start(PostgresConnector.class, config);
@ -923,7 +1023,7 @@ public void shouldRegularlyFlushLsn() throws InterruptedException, SQLException
Configuration config = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "s1.a")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.a")
.build();
start(PostgresConnector.class, config);
assertConnectorIsRunning();
@ -966,7 +1066,7 @@ public void shouldFlushLsnOnEmptyMessage() throws InterruptedException, SQLExcep
Configuration config = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "s1.a")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.a")
.with(Heartbeat.HEARTBEAT_INTERVAL, 1_000)
.build();
start(PostgresConnector.class, config);
@ -1372,13 +1472,13 @@ public void testEmptySchemaWarningAfterApplyingFilters() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY.getValue())
.with(PostgresConnectorConfig.TABLE_WHITELIST, "my_products");
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "my_products");
start(PostgresConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForAvailableRecords(10 * (TestHelper.waitTimeForRecords() * 5), TimeUnit.MILLISECONDS);
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(NO_MONITORED_TABLES_WARNING)).isTrue());
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isTrue());
}
@Test
@ -1397,7 +1497,7 @@ public void testNoEmptySchemaWarningAfterApplyingFilters() throws Exception {
assertConnectorIsRunning();
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(NO_MONITORED_TABLES_WARNING)).isFalse());
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isFalse());
}
@Test
@ -1428,7 +1528,7 @@ public void shouldRewriteIdentityKey() throws InterruptedException {
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.SCHEMA_WHITELIST, "s1,s2")
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1,s2")
// rewrite key from table 'a': from {pk} to {pk, aa}
.with(PostgresConnectorConfig.MSG_KEY_COLUMNS, "(.*)1.a:pk,aa");
@ -1458,19 +1558,19 @@ public void shouldNotIssueWarningForNoMonitoredTablesAfterApplyingFilters() thro
TestHelper.execute(SETUP_TABLES_STMT);
TestHelper.execute(INSERT_STMT);
Configuration config = TestHelper.defaultConfig().with(PostgresConnectorConfig.SCHEMA_WHITELIST, "s2").build();
Configuration config = TestHelper.defaultConfig().with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s2").build();
// Start connector, verify that it does not log no monitored tables warning
start(PostgresConnector.class, config);
waitForSnapshotToBeCompleted();
SourceRecords records = consumeRecordsByTopic(1);
assertThat(logInterceptor.containsMessage(NO_MONITORED_TABLES_WARNING)).isFalse();
assertThat(logInterceptor.containsMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isFalse();
stopConnector();
// Restart connector, verify it does not log no monitored tables warning
start(PostgresConnector.class, config);
waitForStreamingRunning();
assertThat(logInterceptor.containsMessage(NO_MONITORED_TABLES_WARNING)).isFalse();
assertThat(logInterceptor.containsMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isFalse();
stopConnector();
}
@ -1810,7 +1910,7 @@ public void shouldConfigureSubscriptionsFromTableFilters() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.PUBLICATION_NAME, "cdc")
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.numeric_table,public.text_table,s1.a,s2.a")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.numeric_table,public.text_table,s1.a,s2.a")
.with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, PostgresConnectorConfig.AutoCreateMode.FILTERED.getValue());
start(PostgresConnector.class, configBuilder.build());
@ -1867,7 +1967,7 @@ public void shouldProduceMessagesOnlyForConfiguredTables() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.PUBLICATION_NAME, "cdc")
.with(PostgresConnectorConfig.TABLE_WHITELIST, "s2.a")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s2.a")
.with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, PostgresConnectorConfig.AutoCreateMode.FILTERED.getValue());
start(PostgresConnector.class, configBuilder.build());

View File

@ -8,6 +8,7 @@
import static io.debezium.junit.EqualityCheck.LESS_THAN;
import static io.debezium.relational.RelationalDatabaseConnectorConfig.SCHEMA_BLACKLIST;
import static io.debezium.relational.RelationalDatabaseConnectorConfig.SCHEMA_EXCLUDE_LIST;
import static org.fest.assertions.Assertions.assertThat;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
@ -199,7 +200,7 @@ public void shouldApplyFilters() throws Exception {
"CREATE TABLE s2.A (pk SERIAL, aa integer, PRIMARY KEY(pk));" +
"CREATE TABLE s2.B (pk SERIAL, ba integer, PRIMARY KEY(pk));";
TestHelper.execute(statements);
PostgresConnectorConfig config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(SCHEMA_BLACKLIST, "s1").build());
PostgresConnectorConfig config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(SCHEMA_EXCLUDE_LIST, "s1").build());
final TypeRegistry typeRegistry = TestHelper.getTypeRegistry();
schema = TestHelper.getSchema(config, typeRegistry);
try (PostgresConnection connection = TestHelper.createWithTypeRegistry()) {
@ -208,6 +209,21 @@ public void shouldApplyFilters() throws Exception {
assertTablesExcluded("s1.a", "s1.b");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(SCHEMA_BLACKLIST, "s1").build());
schema = TestHelper.getSchema(config, typeRegistry);
try (PostgresConnection connection = TestHelper.createWithTypeRegistry()) {
schema.refresh(connection, false);
assertTablesIncluded("s2.a", "s2.b");
assertTablesExcluded("s1.a", "s1.b");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(SCHEMA_EXCLUDE_LIST, "s.*").build());
schema = TestHelper.getSchema(config, typeRegistry);
try (PostgresConnection connection = TestHelper.create()) {
schema.refresh(connection, false);
assertTablesExcluded("s1.a", "s2.a", "s1.b", "s2.b");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(SCHEMA_BLACKLIST, "s.*").build());
schema = TestHelper.getSchema(config, typeRegistry);
try (PostgresConnection connection = TestHelper.create()) {
@ -215,6 +231,14 @@ public void shouldApplyFilters() throws Exception {
assertTablesExcluded("s1.a", "s2.a", "s1.b", "s2.b");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(PostgresConnectorConfig.TABLE_EXCLUDE_LIST, "s1.A,s2.A").build());
schema = TestHelper.getSchema(config, typeRegistry);
try (PostgresConnection connection = TestHelper.createWithTypeRegistry()) {
schema.refresh(connection, false);
assertTablesIncluded("s1.b", "s2.b");
assertTablesExcluded("s1.a", "s2.a");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(PostgresConnectorConfig.TABLE_BLACKLIST, "s1.A,s2.A").build());
schema = TestHelper.getSchema(config, typeRegistry);
try (PostgresConnection connection = TestHelper.createWithTypeRegistry()) {
@ -223,6 +247,17 @@ public void shouldApplyFilters() throws Exception {
assertTablesExcluded("s1.a", "s2.a");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig()
.with(SCHEMA_EXCLUDE_LIST, "s2")
.with(PostgresConnectorConfig.TABLE_EXCLUDE_LIST, "s1.A")
.build());
schema = TestHelper.getSchema(config, typeRegistry);
try (PostgresConnection connection = TestHelper.createWithTypeRegistry()) {
schema.refresh(connection, false);
assertTablesIncluded("s1.b");
assertTablesExcluded("s1.a", "s2.a", "s2.b");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig()
.with(SCHEMA_BLACKLIST, "s2")
.with(PostgresConnectorConfig.TABLE_BLACKLIST, "s1.A")
@ -234,6 +269,14 @@ public void shouldApplyFilters() throws Exception {
assertTablesExcluded("s1.a", "s2.a", "s2.b");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(PostgresConnectorConfig.COLUMN_EXCLUDE_LIST, ".*aa")
.build());
schema = TestHelper.getSchema(config, typeRegistry);
try (PostgresConnection connection = TestHelper.createWithTypeRegistry()) {
schema.refresh(connection, false);
assertColumnsExcluded("s1.a.aa", "s2.a.aa");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(PostgresConnectorConfig.COLUMN_BLACKLIST, ".*aa")
.build());
schema = TestHelper.getSchema(config, typeRegistry);
@ -242,6 +285,14 @@ public void shouldApplyFilters() throws Exception {
assertColumnsExcluded("s1.a.aa", "s2.a.aa");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(PostgresConnectorConfig.COLUMN_INCLUDE_LIST, ".*bb")
.build());
schema = TestHelper.getSchema(config, typeRegistry);
try (PostgresConnection connection = TestHelper.createWithTypeRegistry()) {
schema.refresh(connection, false);
assertColumnsExcluded("s1.a.aa", "s2.a.aa");
}
config = new PostgresConnectorConfig(TestHelper.defaultConfig().with(PostgresConnectorConfig.COLUMN_WHITELIST, ".*bb")
.build());
schema = TestHelper.getSchema(config, typeRegistry);

View File

@ -578,7 +578,7 @@ public void shouldGenerateSnapshotForNegativeMoney() throws Exception {
// insert money
TestHelper.execute(INSERT_NEGATIVE_CASH_TYPES_STMT);
buildNoStreamProducer(TestHelper.defaultConfig().with(PostgresConnectorConfig.TABLE_WHITELIST, "public.cash_table"));
buildNoStreamProducer(TestHelper.defaultConfig().with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.cash_table"));
TestConsumer consumer = testConsumer(1, "public");
consumer.await(TestHelper.waitTimeForRecords() * 30, TimeUnit.SECONDS);
@ -596,7 +596,7 @@ public void shouldGenerateSnapshotForNullMoney() throws Exception {
// insert money
TestHelper.execute(INSERT_NULL_CASH_TYPES_STMT);
buildNoStreamProducer(TestHelper.defaultConfig().with(PostgresConnectorConfig.TABLE_WHITELIST, "public.cash_table"));
buildNoStreamProducer(TestHelper.defaultConfig().with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.cash_table"));
TestConsumer consumer = testConsumer(1, "public");
consumer.await(TestHelper.waitTimeForRecords() * 30, TimeUnit.SECONDS);
@ -821,7 +821,7 @@ public void shouldSnapshotDomainTypesLikeBaseTypes() throws Exception {
buildNoStreamProducer(TestHelper.defaultConfig()
.with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.alias_table"));
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table"));
final TestConsumer consumer = testConsumer(1, "public");
consumer.await(TestHelper.waitTimeForRecords() * 30, TimeUnit.SECONDS);
@ -861,7 +861,7 @@ public void shouldSnapshotEnumAsKnownType() throws Exception {
// type, length, and scale values are resolved correctly when paired with Enum types.
buildNoStreamProducer(TestHelper.defaultConfig()
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.enum_table")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.enum_table")
.with("column.propagate.source.type", "public.enum_table.value"));
final TestConsumer consumer = testConsumer(1, "public");
@ -888,7 +888,7 @@ public void shouldSnapshotEnumArrayAsKnownType() throws Exception {
// type, length, and scale values are resolved correctly when paired with Enum types.
buildNoStreamProducer(TestHelper.defaultConfig()
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.enum_array_table")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.enum_array_table")
.with("column.propagate.source.type", "public.enum_array_table.value"));
final TestConsumer consumer = testConsumer(1, "public");
@ -921,7 +921,7 @@ public void shouldSnapshotTimeArrayTypesAsKnownTypes() throws Exception {
buildNoStreamProducer(TestHelper.defaultConfig()
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.time_array_table"));
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.time_array_table"));
final TestConsumer consumer = testConsumer(1, "public");
consumer.await(TestHelper.waitTimeForRecords() * 30, TimeUnit.SECONDS);

View File

@ -126,7 +126,7 @@ public void before() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
.with(PostgresConnectorConfig.SCHEMA_BLACKLIST, "postgis");
.with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis");
// todo DBZ-766 are these really needed?
if (TestHelper.decoderPlugin() == PostgresConnectorConfig.LogicalDecoder.PGOUTPUT) {
@ -142,7 +142,7 @@ private void startConnector(Function<Configuration.Builder, Configuration.Builde
throws InterruptedException {
start(PostgresConnector.class, new PostgresConnectorConfig(customConfig.apply(TestHelper.defaultConfig()
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
.with(PostgresConnectorConfig.SCHEMA_BLACKLIST, "postgis")
.with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis")
.with(PostgresConnectorConfig.SNAPSHOT_MODE, waitForSnapshot ? SnapshotMode.INITIAL : SnapshotMode.NEVER))
.build()).getConfig(), isStopRecord);
assertConnectorIsRunning();
@ -238,7 +238,7 @@ public void shouldReceiveChangesAfterConnectionRestart() throws Exception {
startConnector(config -> config
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SCHEMA_BLACKLIST, "postgis"));
.with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis"));
TestHelper.execute("CREATE TABLE t0 (pk SERIAL, d INTEGER, PRIMARY KEY(pk));");
@ -260,7 +260,7 @@ public void shouldReceiveChangesAfterConnectionRestart() throws Exception {
// This appears to be a potential race condition problem
startConnector(config -> config
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SCHEMA_BLACKLIST, "postgis"),
.with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis"),
false);
consumer = testConsumer(1);
waitForStreamingToStart();
@ -278,7 +278,7 @@ public void shouldReceiveUpdateSchemaAfterConnectionRestart() throws Exception {
startConnector(config -> config
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SCHEMA_BLACKLIST, "postgis")
.with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis")
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, false)
.with(PostgresConnectorConfig.SCHEMA_REFRESH_MODE, SchemaRefreshMode.COLUMNS_DIFF_EXCLUDE_UNCHANGED_TOAST));
@ -306,7 +306,7 @@ public void shouldReceiveUpdateSchemaAfterConnectionRestart() throws Exception {
// This appears to be a potential race condition problem
startConnector(config -> config
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SCHEMA_BLACKLIST, "postgis")
.with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis")
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, false)
.with(PostgresConnectorConfig.SCHEMA_REFRESH_MODE, SchemaRefreshMode.COLUMNS_DIFF_EXCLUDE_UNCHANGED_TOAST),
false);
@ -416,7 +416,7 @@ private Struct testProcessNotNullColumns(TemporalPrecisionMode temporalMode) thr
startConnector(config -> config
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SCHEMA_BLACKLIST, "postgis")
.with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis")
.with(PostgresConnectorConfig.TIME_PRECISION_MODE, temporalMode));
consumer.expects(1);
@ -1217,7 +1217,7 @@ public void shouldReceiveHeartbeatAlsoWhenChangingNonWhitelistedTable() throws E
startConnector(config -> config
.with(Heartbeat.HEARTBEAT_INTERVAL, "100")
.with(PostgresConnectorConfig.POLL_INTERVAL_MS, "50")
.with(PostgresConnectorConfig.TABLE_WHITELIST, "s1\\.b")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1\\.b")
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER),
false);
waitForStreamingToStart();
@ -1265,7 +1265,7 @@ public void shouldWarnOnMissingHeartbeatForFilteredEvents() throws Exception {
final LogInterceptor logInterceptor = new LogInterceptor();
startConnector(config -> config
.with(PostgresConnectorConfig.POLL_INTERVAL_MS, "50")
.with(PostgresConnectorConfig.TABLE_WHITELIST, "s1\\.b")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1\\.b")
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER),
false);
waitForStreamingToStart();
@ -1482,7 +1482,7 @@ public void shouldReceiveChangesForTableWithoutPrimaryKey() throws Exception {
@Test()
@FixFor("DBZ-1130")
@SkipWhenDecoderPluginNameIsNot(value = WAL2JSON, reason = "WAL2JSON specific: Pass 'add-tables' stream parameter and verify it acts as a whitelist")
@SkipWhenDecoderPluginNameIsNot(value = WAL2JSON, reason = "WAL2JSON specific: Pass 'add-tables' stream parameter and verify it acts as an include list")
public void testPassingStreamParams() throws Exception {
// Verify that passing stream parameters works by using the WAL2JSON add-tables parameter which acts as a
// whitelist.
@ -1856,7 +1856,7 @@ public void shouldStreamChangesForDataTypeAlias() throws Exception {
.with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.alias_table"),
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table"),
false);
waitForStreamingToStart();
@ -1885,7 +1885,7 @@ public void shouldStreamChangesForDomainAliasAlterTable() throws Exception {
.with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.alias_table")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table")
.with("column.propagate.source.type", "public.alias_table.salary3"),
false);
@ -1926,7 +1926,7 @@ public void shouldStreamDomainAliasWithProperModifiers() throws Exception {
.with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.alias_table"),
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table"),
false);
waitForStreamingToStart();
@ -1958,7 +1958,7 @@ public void shouldStreamValuesForDomainTypeOfDomainType() throws Exception {
.with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.alias_table")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table")
.with("column.propagate.source.type", "public.alias_table.value"), false);
waitForStreamingToStart();
@ -1990,7 +1990,7 @@ public void shouldStreamValuesForAliasLikeBaseTypes() throws Exception {
.with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.alias_table"),
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table"),
false);
waitForStreamingToStart();
@ -2149,7 +2149,7 @@ public void shouldStreamEnumAsKnownType() throws Exception {
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with("column.propagate.source.type", "public.enum_table.value")
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.enum_table"), false);
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.enum_table"), false);
waitForStreamingToStart();
@ -2184,7 +2184,7 @@ public void shouldStreamEnumArrayAsKnownType() throws Exception {
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with("column.propagate.source.type", "public.enum_array_table.value")
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.enum_array_table"), false);
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.enum_array_table"), false);
waitForStreamingToStart();
@ -2244,7 +2244,7 @@ public void shouldStreamTimeArrayTypesAsKnownTypes() throws Exception {
startConnector(config -> config
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.time_array_table"), false);
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.time_array_table"), false);
waitForStreamingToStart();
@ -2310,7 +2310,7 @@ public void shouldStreamEnumsWhenIncludeUnknownDataTypesDisabled() throws Except
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with("column.propagate.source.type", "public.enum_table.value")
.with(PostgresConnectorConfig.TABLE_WHITELIST, "public.enum_table"), false);
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.enum_table"), false);
waitForStreamingToStart();

View File

@ -42,6 +42,28 @@ public void before() throws SQLException {
public void shouldProcessFromSnapshot() throws Exception {
TestHelper.execute(STATEMENTS);
start(PostgresConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "nopk")
.build());
assertConnectorIsRunning();
final int expectedRecordsCount = 1 + 1 + 1;
TestConsumer consumer = testConsumer(expectedRecordsCount, "nopk");
consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t3").get(0).keySchema()).isNull();
}
@Test
public void shouldProcessFromSnapshotOld() throws Exception {
TestHelper.execute(STATEMENTS);
start(PostgresConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY)
.with(PostgresConnectorConfig.SCHEMA_WHITELIST, "nopk")
@ -62,6 +84,29 @@ public void shouldProcessFromSnapshot() throws Exception {
@Test
public void shouldProcessFromStreaming() throws Exception {
start(PostgresConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "nopk")
.build());
assertConnectorIsRunning();
waitForStreamingToStart();
TestHelper.execute(STATEMENTS);
final int expectedRecordsCount = 1 + 1 + 1;
TestConsumer consumer = testConsumer(expectedRecordsCount, "nopk");
consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t3").get(0).keySchema()).isNull();
}
@Test
public void shouldProcessFromStreamingOld() throws Exception {
start(PostgresConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
.with(PostgresConnectorConfig.SCHEMA_WHITELIST, "nopk")

View File

@ -333,7 +333,9 @@ public static SnapshotIsolationMode parse(String value, String defaultValue) {
SOURCE_TIMESTAMP_MODE)
.excluding(
SCHEMA_WHITELIST,
SCHEMA_BLACKLIST)
SCHEMA_INCLUDE_LIST,
SCHEMA_BLACKLIST,
SCHEMA_EXCLUDE_LIST)
.create();
/**
@ -358,7 +360,8 @@ public SqlServerConnectorConfig(Configuration config) {
this.databaseName = config.getString(DATABASE_NAME);
this.snapshotMode = SnapshotMode.parse(config.getString(SNAPSHOT_MODE), SNAPSHOT_MODE.defaultValueAsString());
this.columnFilter = getColumnNameFilter(config.getString(RelationalDatabaseConnectorConfig.COLUMN_BLACKLIST));
this.columnFilter = getColumnNameFilter(
config.getFallbackStringProperty(SqlServerConnectorConfig.COLUMN_EXCLUDE_LIST, SqlServerConnectorConfig.COLUMN_BLACKLIST));
this.readOnlyDatabaseConnection = READ_ONLY_INTENT.equals(config.getString(APPLICATION_INTENT_KEY));
if (readOnlyDatabaseConnection) {
this.snapshotIsolationMode = SnapshotIsolationMode.SNAPSHOT;

View File

@ -316,7 +316,7 @@ private SqlServerChangeTable[] getCdcTablesToQuery() throws SQLException, Interr
LOGGER.warn("No table has enabled CDC or security constraints prevents getting the list of change tables");
}
final Map<TableId, List<SqlServerChangeTable>> whitelistedCdcEnabledTables = cdcEnabledTables.stream()
final Map<TableId, List<SqlServerChangeTable>> includeListCdcEnabledTables = cdcEnabledTables.stream()
.filter(changeTable -> {
if (connectorConfig.getTableFilters().dataCollectionFilter().isIncluded(changeTable.getSourceTableId())) {
return true;
@ -328,13 +328,13 @@ private SqlServerChangeTable[] getCdcTablesToQuery() throws SQLException, Interr
})
.collect(Collectors.groupingBy(x -> x.getSourceTableId()));
if (whitelistedCdcEnabledTables.isEmpty()) {
if (includeListCdcEnabledTables.isEmpty()) {
LOGGER.warn(
"No whitelisted table has enabled CDC, whitelisted table list does not contain any table with CDC enabled or no table match the white/blacklist filter(s)");
}
final List<SqlServerChangeTable> tables = new ArrayList<>();
for (List<SqlServerChangeTable> captures : whitelistedCdcEnabledTables.values()) {
for (List<SqlServerChangeTable> captures : includeListCdcEnabledTables.values()) {
SqlServerChangeTable currentTable = captures.get(0);
if (captures.size() > 1) {
SqlServerChangeTable futureTable;

View File

@ -79,7 +79,7 @@ public void after() throws SQLException {
public void decimalModeConfigString() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.tablenuma")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.tablenuma")
.with(SqlServerConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.STRING).build();
start(SqlServerConnector.class, config);
@ -111,7 +111,7 @@ public void decimalModeConfigString() throws Exception {
public void decimalModeConfigDouble() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.tablenumb")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.tablenumb")
.with(SqlServerConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE).build();
start(SqlServerConnector.class, config);
@ -142,7 +142,7 @@ public void decimalModeConfigDouble() throws Exception {
public void decimalModeConfigPrecise() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.tablenumc")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.tablenumc")
.with(SqlServerConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.PRECISE).build();
start(SqlServerConnector.class, config);

View File

@ -6,6 +6,7 @@
package io.debezium.connector.sqlserver;
import static io.debezium.connector.sqlserver.SqlServerConnectorConfig.SNAPSHOT_ISOLATION_MODE;
import static io.debezium.relational.RelationalDatabaseConnectorConfig.TABLE_INCLUDE_LIST;
import static org.fest.assertions.Assertions.assertThat;
import static org.junit.Assert.assertNull;
@ -264,7 +265,7 @@ public void takeSnapshotFromTableWithReservedName() throws Exception {
Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
final Configuration config = TestHelper.defaultConfig()
.with("table.whitelist", "dbo.User")
.with(TABLE_INCLUDE_LIST, "dbo.User")
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
@ -311,7 +312,7 @@ public void takeSchemaOnlySnapshotAndSendHeartbeat() throws Exception {
@Test
@FixFor("DBZ-1067")
public void blacklistColumn() throws Exception {
public void testBlacklistColumn() throws Exception {
connection.execute(
"CREATE TABLE blacklist_column_table_a (id int, name varchar(30), amount integer primary key(id))",
"CREATE TABLE blacklist_column_table_b (id int, name varchar(30), amount integer primary key(id))");
@ -368,6 +369,65 @@ public void blacklistColumn() throws Exception {
stopConnector();
}
@Test
@FixFor("DBZ-1067")
public void testColumnExcludeList() throws Exception {
connection.execute(
"CREATE TABLE blacklist_column_table_a (id int, name varchar(30), amount integer primary key(id))",
"CREATE TABLE blacklist_column_table_b (id int, name varchar(30), amount integer primary key(id))");
connection.execute("INSERT INTO blacklist_column_table_a VALUES(10, 'some_name', 120)");
connection.execute("INSERT INTO blacklist_column_table_b VALUES(11, 'some_name', 447)");
TestHelper.enableTableCdc(connection, "blacklist_column_table_a");
TestHelper.enableTableCdc(connection, "blacklist_column_table_b");
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.COLUMN_EXCLUDE_LIST, "dbo.blacklist_column_table_a.amount")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.blacklist_column_table_a,dbo.blacklist_column_table_b")
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
final SourceRecords records = consumeRecordsByTopic(2);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.blacklist_column_table_a");
final List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.blacklist_column_table_b");
Schema expectedSchemaA = SchemaBuilder.struct()
.optional()
.name("server1.dbo.blacklist_column_table_a.Value")
.field("id", Schema.INT32_SCHEMA)
.field("name", Schema.OPTIONAL_STRING_SCHEMA)
.build();
Struct expectedValueA = new Struct(expectedSchemaA)
.put("id", 10)
.put("name", "some_name");
Schema expectedSchemaB = SchemaBuilder.struct()
.optional()
.name("server1.dbo.blacklist_column_table_b.Value")
.field("id", Schema.INT32_SCHEMA)
.field("name", Schema.OPTIONAL_STRING_SCHEMA)
.field("amount", Schema.OPTIONAL_INT32_SCHEMA)
.build();
Struct expectedValueB = new Struct(expectedSchemaB)
.put("id", 11)
.put("name", "some_name")
.put("amount", 447);
Assertions.assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldIsEqualTo(expectedValueA)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA);
Assertions.assertThat(tableB).hasSize(1);
SourceRecordAssert.assertThat(tableB.get(0))
.valueAfterFieldIsEqualTo(expectedValueB)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaB);
stopConnector();
}
@Test
public void reoderCapturedTables() throws Exception {
connection.execute(
@ -380,7 +440,7 @@ public void reoderCapturedTables() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.table_b,dbo.table_a")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.table_b,dbo.table_a")
.build();
start(SqlServerConnector.class, config);
@ -415,7 +475,7 @@ public void reoderCapturedTablesWithOverlappingTableWhitelist() throws Exception
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.table_ab,dbo.table_(.*)")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.table_ab,dbo.table_(.*)")
.build();
start(SqlServerConnector.class, config);
@ -457,7 +517,7 @@ public void reoderCapturedTablesWithoutTableWhitelist() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_BLACKLIST, "dbo.table1")
.with(SqlServerConnectorConfig.TABLE_EXCLUDE_LIST, "dbo.table1")
.build();

View File

@ -54,7 +54,7 @@ public void after() throws SQLException {
public void shouldParseWhitespaceChars() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo\\.UAT WAG CZ\\$Fixed Asset.*, dbo\\.UAT WAG CZ\\$Fixed Prop.*")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo\\.UAT WAG CZ\\$Fixed Asset.*, dbo\\.UAT WAG CZ\\$Fixed Prop.*")
.build();
connection.execute(
@ -118,7 +118,7 @@ public void shouldParseWhitespaceChars() throws Exception {
public void shouldParseSpecialChars() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo\\.UAT WAG CZ\\$Fixed Asset.*")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo\\.UAT WAG CZ\\$Fixed Asset.*")
.with(SqlServerConnectorConfig.SANITIZE_FIELD_NAMES, true)
.build();

View File

@ -58,6 +58,7 @@
import io.debezium.relational.history.FileDatabaseHistory;
import io.debezium.relational.history.HistoryRecordComparator;
import io.debezium.relational.history.TableChanges;
import io.debezium.schema.DatabaseSchema;
import io.debezium.util.Testing;
/**
@ -846,7 +847,7 @@ public void verifyOffsets() throws Exception {
}
@Test
public void whitelistTable() throws Exception {
public void testWhitelistTable() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 1;
final int ID_START = 10;
@ -881,7 +882,42 @@ public void whitelistTable() throws Exception {
}
@Test
public void blacklistTable() throws Exception {
public void testTableIncludeList() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 1;
final int ID_START = 10;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.tableb")
.build();
connection.execute(
"INSERT INTO tableb VALUES(1, 'b')");
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
// Wait for snapshot completion
consumeRecordsByTopic(1);
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')");
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')");
}
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.tableb");
Assertions.assertThat(tableA == null || tableA.isEmpty()).isTrue();
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE);
stopConnector();
}
@Test
public void testBlacklistTable() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 1;
final int ID_START = 10;
@ -915,6 +951,41 @@ public void blacklistTable() throws Exception {
stopConnector();
}
@Test
public void testTableExcludeList() throws Exception {
final int RECORDS_PER_TABLE = 5;
final int TABLES = 1;
final int ID_START = 10;
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_EXCLUDE_LIST, "dbo.tablea")
.build();
connection.execute(
"INSERT INTO tableb VALUES(1, 'b')");
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
// Wait for snapshot completion
consumeRecordsByTopic(1);
for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i;
connection.execute(
"INSERT INTO tablea VALUES(" + id + ", 'a')");
connection.execute(
"INSERT INTO tableb VALUES(" + id + ", 'b')");
}
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.tableb");
Assertions.assertThat(tableA == null || tableA.isEmpty()).isTrue();
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE);
stopConnector();
}
@Test
@FixFor("DBZ-1617")
public void blacklistColumnWhenCdcColumnsDoNotMatchWithOriginalSnapshot() throws Exception {
@ -925,7 +996,7 @@ public void blacklistColumnWhenCdcColumnsDoNotMatchWithOriginalSnapshot() throws
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(SqlServerConnectorConfig.COLUMN_BLACKLIST, "dbo.table_a.blacklisted_column")
.with(SqlServerConnectorConfig.COLUMN_EXCLUDE_LIST, "dbo.table_a.blacklisted_column")
.build();
start(SqlServerConnector.class, config);
@ -961,7 +1032,7 @@ public void blacklistColumnWhenCdcColumnsDoNotMatchWithOriginalSnapshot() throws
@Test
@FixFor("DBZ-1067")
public void blacklistColumn() throws Exception {
public void testBlacklistColumn() throws Exception {
connection.execute(
"CREATE TABLE blacklist_column_table_a (id int, name varchar(30), amount integer primary key(id))",
"CREATE TABLE blacklist_column_table_b (id int, name varchar(30), amount integer primary key(id))");
@ -1021,6 +1092,68 @@ public void blacklistColumn() throws Exception {
stopConnector();
}
@Test
@FixFor("DBZ-1067")
public void testColumnExcludeList() throws Exception {
connection.execute(
"CREATE TABLE blacklist_column_table_a (id int, name varchar(30), amount integer primary key(id))",
"CREATE TABLE blacklist_column_table_b (id int, name varchar(30), amount integer primary key(id))");
TestHelper.enableTableCdc(connection, "blacklist_column_table_a");
TestHelper.enableTableCdc(connection, "blacklist_column_table_b");
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(SqlServerConnectorConfig.COLUMN_EXCLUDE_LIST, "dbo.blacklist_column_table_a.amount")
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
// Wait for snapshot completion
consumeRecordsByTopic(1);
connection.execute("INSERT INTO blacklist_column_table_a VALUES(10, 'some_name', 120)");
connection.execute("INSERT INTO blacklist_column_table_b VALUES(11, 'some_name', 447)");
final SourceRecords records = consumeRecordsByTopic(2);
final List<SourceRecord> tableA = records.recordsForTopic("server1.dbo.blacklist_column_table_a");
final List<SourceRecord> tableB = records.recordsForTopic("server1.dbo.blacklist_column_table_b");
Schema expectedSchemaA = SchemaBuilder.struct()
.optional()
.name("server1.dbo.blacklist_column_table_a.Value")
.field("id", Schema.INT32_SCHEMA)
.field("name", Schema.OPTIONAL_STRING_SCHEMA)
.build();
Struct expectedValueA = new Struct(expectedSchemaA)
.put("id", 10)
.put("name", "some_name");
Schema expectedSchemaB = SchemaBuilder.struct()
.optional()
.name("server1.dbo.blacklist_column_table_b.Value")
.field("id", Schema.INT32_SCHEMA)
.field("name", Schema.OPTIONAL_STRING_SCHEMA)
.field("amount", Schema.OPTIONAL_INT32_SCHEMA)
.build();
Struct expectedValueB = new Struct(expectedSchemaB)
.put("id", 11)
.put("name", "some_name")
.put("amount", 447);
Assertions.assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldIsEqualTo(expectedValueA)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA);
Assertions.assertThat(tableB).hasSize(1);
SourceRecordAssert.assertThat(tableB.get(0))
.valueAfterFieldIsEqualTo(expectedValueB)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaB);
stopConnector();
}
@Test
@FixFor("DBZ-1692")
public void shouldConsumeEventsWithMaskedHashedColumns() throws Exception {
@ -1306,14 +1439,14 @@ public void testEmptySchemaWarningAfterApplyingFilters() throws Exception {
Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "my_products")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "my_products")
.build();
start(SqlServerConnector.class, config);
assertConnectorIsRunning();
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(NO_MONITORED_TABLES_WARNING)).isTrue());
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isTrue());
}
@Test
@ -1330,7 +1463,7 @@ public void testNoEmptySchemaWarningAfterApplyingFilters() throws Exception {
assertConnectorIsRunning();
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(NO_MONITORED_TABLES_WARNING)).isFalse());
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isFalse());
}
@Test
@ -1343,7 +1476,7 @@ public void keylessTable() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.keyless")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.keyless")
.build();
start(SqlServerConnector.class, config);
@ -1401,7 +1534,7 @@ public void shouldRewriteIdentityKey() throws InterruptedException, SQLException
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.keyless")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.keyless")
// rewrite key from table 'products': from {null} to {id}
.with(SqlServerConnectorConfig.MSG_KEY_COLUMNS, "(.*).keyless:id")
.build();
@ -1529,7 +1662,7 @@ public void shouldHonorSourceTimestampMode() throws InterruptedException, SQLExc
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.source_timestamp_mode")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.source_timestamp_mode")
.with(SqlServerConnectorConfig.SOURCE_TIMESTAMP_MODE, "processing")
.build();
@ -1751,7 +1884,7 @@ public void shouldPropagateSourceTypeByDatatype() throws Exception {
final Configuration config = TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.dt_table")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.dt_table")
.with("datatype.propagate.source.type", ".+\\.NUMERIC,.+\\.VARCHAR,.+\\.REAL,.+\\.DECIMAL")
.build();

View File

@ -63,7 +63,7 @@ public void shouldProcessFromSnapshot() throws Exception {
start(SqlServerConnector.class, TestHelper.defaultConfig()
.with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(SqlServerConnectorConfig.TABLE_WHITELIST, "dbo.t[123]")
.with(SqlServerConnectorConfig.TABLE_INCLUDE_LIST, "dbo.t[123]")
.build());
assertConnectorIsRunning();

View File

@ -48,7 +48,7 @@ public enum Version implements EnumeratedValue {
private final String value;
private Version(String value) {
Version(String value) {
this.value = value;
}
@ -95,7 +95,7 @@ public static Version parse(String value, String defaultValue) {
/**
* The set of predefined modes for dealing with failures during event processing.
*/
public static enum EventProcessingFailureHandlingMode implements EnumeratedValue {
public enum EventProcessingFailureHandlingMode implements EnumeratedValue {
/**
* Problematic events will be skipped.
@ -121,7 +121,7 @@ public static enum EventProcessingFailureHandlingMode implements EnumeratedValue
private final String value;
private EventProcessingFailureHandlingMode(String value) {
EventProcessingFailureHandlingMode(String value) {
this.value = value;
}
@ -166,17 +166,17 @@ public enum BinaryHandlingMode implements EnumeratedValue {
/**
* Represent binary values as byte array
*/
BYTES("bytes", () -> SchemaBuilder.bytes()),
BYTES("bytes", SchemaBuilder::bytes),
/**
* Represent binary values as base64-encoded string
*/
BASE64("base64", () -> SchemaBuilder.string()),
BASE64("base64", SchemaBuilder::string),
/**
* Represents binary values as hex-encoded (base16) string
*/
HEX("hex", () -> SchemaBuilder.string());
HEX("hex", SchemaBuilder::string);
private final String value;
private final Supplier<SchemaBuilder> schema;
@ -230,8 +230,8 @@ public static BinaryHandlingMode parse(String value, String defaultValue) {
}
}
private static String CONFLUENT_AVRO_CONVERTER = "io.confluent.connect.avro.AvroConverter";
private static String APICURIO_AVRO_CONVERTER = "io.apicurio.registry.utils.converter.AvroConverter";
private static final String CONFLUENT_AVRO_CONVERTER = "io.confluent.connect.avro.AvroConverter";
private static final String APICURIO_AVRO_CONVERTER = "io.apicurio.registry.utils.converter.AvroConverter";
public static final int DEFAULT_MAX_QUEUE_SIZE = 8192;
public static final int DEFAULT_MAX_BATCH_SIZE = 2048;
@ -428,7 +428,9 @@ protected CommonConnectorConfig(Configuration config, String logicalName, int de
/**
* Provides access to the "raw" config instance. In most cases, access via typed getters for individual properties
* on the connector config class should be preferred.
* TODO this should be protected in the future to force proper facade methods based access / encapsulation
*/
@Deprecated
public Configuration getConfig() {
return config;
}

View File

@ -60,7 +60,9 @@
@Immutable
public interface Configuration {
public static final Pattern PASSWORD_PATTERN = Pattern.compile(".*password$|.*sasl\\.jaas\\.config$", Pattern.CASE_INSENSITIVE);
Logger CONFIGURATION_LOGGER = LoggerFactory.getLogger(Configuration.class);
Pattern PASSWORD_PATTERN = Pattern.compile(".*password$|.*sasl\\.jaas\\.config$", Pattern.CASE_INSENSITIVE);
/**
* The basic interface for configuration builders.
@ -1429,7 +1431,7 @@ default <T> T getInstance(String key, Class<T> type, Supplier<ClassLoader> class
*
* @param key the key for the configuration property
* @param clazz the Class of which the resulting object is expected to be an instance of; may not be null
* @param the {@link Configuration} object that is passed as a parameter to the constructor
* @param configuration {@link Configuration} object that is passed as a parameter to the constructor
* @return the new instance, or null if there is no such key-value pair in the configuration or if there is a key-value
* configuration but the value could not be converted to an existing class with a zero-argument constructor
*/
@ -1469,7 +1471,7 @@ default <T> T getInstance(Field field, Class<T> type, Supplier<ClassLoader> clas
*
* @param field the field for the configuration property
* @param clazz the Class of which the resulting object is expected to be an instance of; may not be null
* @param the {@link Configuration} object that is passed as a parameter to the constructor
* @param configuration the {@link Configuration} object that is passed as a parameter to the constructor
* @return the new instance, or null if there is no such key-value pair in the configuration or if there is a key-value
* configuration but the value could not be converted to an existing class with a zero-argument constructor
*/
@ -2097,4 +2099,44 @@ default <T> void forEachMatchingFieldName(Pattern regex, int groupNumber, BiFunc
default <T> void forEach(BiConsumer<String, String> function) {
this.asMap().forEach(function);
}
/**
* Returns the string config value from newProperty config field if it's set or its default value when it's not
* set/null.If both are null it returns the value of the oldProperty config field, or its default value when it's
* null.
* This fallback only works for newProperty fields that have a null / not-set default value!
*
* @param newProperty the new property config field
* @param oldProperty the old / fallback property config field
* @return the evaluated value
*/
default String getFallbackStringProperty(Field newProperty, Field oldProperty) {
return Configuration.getFallbackStringProperty(this, newProperty, oldProperty);
}
/**
* Returns the string config value of the provided Configuration from newProperty config field if it's set or its
* default value when it's not set/null.If both are null it returns the value of the oldProperty config field, or
* its default value when it's null.
* This fallback only works for newProperty fields that have a null / not-set default value!
*
* @param newProperty the new property config field
* @param oldProperty the old / fallback property config field
* @return the evaluated value
*/
static String getFallbackStringProperty(Configuration config, Field newProperty, Field oldProperty) {
if (null != config.getString(oldProperty.name()) && null != config.getString(newProperty.name())) { // both are set
CONFIGURATION_LOGGER.warn("Provided configuration has deprecated property \"" + oldProperty.name()
+ "\" and new property \"" + newProperty.name() + "\" set. Using value from \"" + newProperty.name() + "\"!");
}
return config.getString(
newProperty,
() -> {
CONFIGURATION_LOGGER.warn("Using configuration property \"" + oldProperty.name()
+ "\" is deprecated and will be removed in future versions. Please use \"" + newProperty.name()
+ "\" instead.");
return config.getString(oldProperty);
});
}
}

View File

@ -1067,13 +1067,13 @@ public void readSchema(Tables tables, String databaseCatalog, String schemaNameP
String tableName = columnMetadata.getString(3);
TableId tableId = new TableId(catalogName, schemaName, tableName);
// exclude views and non-whitelisted tables
// exclude views and non-captured tables
if (viewIds.contains(tableId) ||
(tableFilter != null && !tableFilter.isIncluded(tableId))) {
continue;
}
// add all whitelisted columns
// add all included columns
readTableColumn(columnMetadata, tableId, columnFilter).ifPresent(column -> {
columnsByTable.computeIfAbsent(tableId, t -> new ArrayList<>())
.add(column.create());
@ -1102,7 +1102,7 @@ public void readSchema(Tables tables, String databaseCatalog, String schemaNameP
/**
* Returns a {@link ColumnEditor} representing the current record of the given result set of column metadata, if
* included in the column whitelist.
* included in column.include.list.
*/
protected Optional<ColumnEditor> readTableColumn(ResultSet columnMetadata, TableId tableId, ColumnNameFilter columnFilter) throws SQLException {
final String columnName = columnMetadata.getString(4);

View File

@ -47,7 +47,7 @@
/**
* Central dispatcher for data change and schema change events. The former will be routed to the change event queue, the
* latter will be routed to the {@link DatabaseSchema}. But based on the applying whitelist/blacklist configuration,
* latter will be routed to the {@link DatabaseSchema}. But based on the applying include/exclude list configuration,
* events may be not be dispatched at all.
* <p>
* This router is also in charge of emitting heartbeat messages, exposing of metrics via JMX etc.

View File

@ -26,7 +26,7 @@ public interface DataChangeEventListener {
void onEvent(DataCollectionId source, OffsetContext offset, Object key, Struct value) throws InterruptedException;
/**
* Invoked for events pertaining to non-whitelisted tables.
* Invoked for events pertaining to non-captured tables.
*/
void onFilteredEvent(String event);

View File

@ -30,22 +30,24 @@ public final class ColumnId implements Comparable<ColumnId> {
* <p>
* Qualified column names are comma-separated strings that are each {@link #parse(String) parsed} into {@link ColumnId} objects.
*
* @param columnBlacklist the comma-separated string listing the qualified names of the columns to be explicitly disallowed;
* @param columnExcludeList the comma-separated string listing the qualified names of the columns to be explicitly disallowed;
* may be null
* @return the predicate function; never null
*/
public static Map<TableId, Predicate<Column>> filter(String columnBlacklist) {
Set<ColumnId> columnExclusions = columnBlacklist == null ? null : Strings.setOf(columnBlacklist, ColumnId::parse);
public static Map<TableId, Predicate<Column>> filter(String columnExcludeList) {
Set<ColumnId> columnExclusions = columnExcludeList == null ? null : Strings.setOf(columnExcludeList, ColumnId::parse);
Map<TableId, Set<String>> excludedColumnNamesByTable = new HashMap<>();
columnExclusions.forEach(columnId -> {
excludedColumnNamesByTable.compute(columnId.tableId(), (tableId, columns) -> {
if (columns == null) {
columns = new HashSet<String>();
}
columns.add(columnId.columnName().toLowerCase());
return columns;
if (null != columnExclusions) {
columnExclusions.forEach(columnId -> {
excludedColumnNamesByTable.compute(columnId.tableId(), (tableId, columns) -> {
if (columns == null) {
columns = new HashSet<>();
}
columns.add(columnId.columnName().toLowerCase());
return columns;
});
});
});
}
Map<TableId, Predicate<Column>> exclusionFilterByTable = new HashMap<>();
excludedColumnNamesByTable.forEach((tableId, excludedColumnNames) -> {
exclusionFilterByTable.put(tableId, (col) -> !excludedColumnNames.contains(col.name().toLowerCase()));

View File

@ -37,10 +37,17 @@
*/
public abstract class RelationalDatabaseConnectorConfig extends CommonConnectorConfig {
protected static final String SCHEMA_INCLUDE_LIST_NAME = "schema.include.list";
protected static final String SCHEMA_EXCLUDE_LIST_NAME = "schema.exclude.list";
protected static final String TABLE_BLACKLIST_NAME = "table.blacklist";
protected static final String TABLE_EXCLUDE_LIST_NAME = "table.exclude.list";
protected static final String TABLE_WHITELIST_NAME = "table.whitelist";
protected static final String TABLE_INCLUDE_LIST_NAME = "table.include.list";
private static final Pattern MSG_KEY_COLUMNS_PATTERN = Pattern.compile("^(([^:]+):([^:;\\s]+))+[^;]$");
public static final long DEFAULT_SNAPSHOT_LOCK_TIMEOUT_MILLIS = TimeUnit.SECONDS.toMillis(10);
public static final String TABLE_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG = "\"table.include.list\" or \"table.whitelist\" is already specified";
public static final String COLUMN_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG = "\"column.include.list\" or \"column.whitelist\" is already specified";
public static final String SCHEMA_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG = "\"schema.include.list\" or \"schema.whitelist\" is already specified";
/**
* The set of predefined DecimalHandlingMode options or aliases.
@ -66,7 +73,7 @@ public enum DecimalHandlingMode implements EnumeratedValue {
private final String value;
private DecimalHandlingMode(String value) {
DecimalHandlingMode(String value) {
this.value = value;
}
@ -136,29 +143,58 @@ public static DecimalHandlingMode parse(String value, String defaultValue) {
/**
* A comma-separated list of regular expressions that match the fully-qualified names of tables to be monitored.
* Fully-qualified names for tables are of the form {@code <databaseName>.<tableName>} or
* {@code <databaseName>.<schemaName>.<tableName>}. May not be used with {@link #TABLE_BLACKLIST}, and superseded by database
* {@code <databaseName>.<schemaName>.<tableName>}. Must not be used with {@link #TABLE_EXCLUDE_LIST}, and superseded by database
* inclusions/exclusions.
*/
public static final Field TABLE_WHITELIST = Field.create(TABLE_WHITELIST_NAME)
.withDisplayName("Included tables")
public static final Field TABLE_INCLUDE_LIST = Field.create(TABLE_INCLUDE_LIST_NAME)
.withDisplayName("Include Tables")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.HIGH)
.withValidation(Field::isListOfRegex)
.withDescription("The tables for which changes are to be captured");
/**
* Old, backwards-compatible "whitelist" property.
*/
@Deprecated
public static final Field TABLE_WHITELIST = Field.create(TABLE_WHITELIST_NAME)
.withDisplayName("Deprecated: Include Tables")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withValidation(Field::isListOfRegex)
.withInvisibleRecommender()
.withDescription("The tables for which changes are to be captured (deprecated, use \"" + TABLE_INCLUDE_LIST.name() + "\" instead)");
/**
* A comma-separated list of regular expressions that match the fully-qualified names of tables to be excluded from
* monitoring. Fully-qualified names for tables are of the form {@code <databaseName>.<tableName>} or
* {@code <databaseName>.<schemaName>.<tableName>}. May not be used with {@link #TABLE_WHITELIST}.
* {@code <databaseName>.<schemaName>.<tableName>}. Must not be used with {@link #TABLE_INCLUDE_LIST}.
*/
public static final Field TABLE_BLACKLIST = Field.create(TABLE_BLACKLIST_NAME)
.withDisplayName("Excluded tables")
public static final Field TABLE_EXCLUDE_LIST = Field.create(TABLE_EXCLUDE_LIST_NAME)
.withDisplayName("Exclude Tables")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.MEDIUM)
.withValidation(Field::isListOfRegex, RelationalDatabaseConnectorConfig::validateTableBlacklist)
.withInvisibleRecommender();
.withValidation(Field::isListOfRegex, RelationalDatabaseConnectorConfig::validateTableExcludeList)
.withInvisibleRecommender()
.withDescription("A comma-separated list of regular expressions that match the fully-qualified names of tables to be excluded from monitoring");
/**
* Old, backwards-compatible "blacklist" property.
*/
@Deprecated
public static final Field TABLE_BLACKLIST = Field.create(TABLE_BLACKLIST_NAME)
.withDisplayName("Deprecated: Exclude Tables")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withValidation(Field::isListOfRegex, RelationalDatabaseConnectorConfig::validateTableExcludeList)
.withInvisibleRecommender()
.withDescription(
"A comma-separated list of regular expressions that match the fully-qualified names of tables to be excluded from monitoring (deprecated, use \""
+ TABLE_EXCLUDE_LIST.name() + "\" instead)");
public static final Field TABLE_IGNORE_BUILTIN = Field.create("table.ignore.builtin")
.withDisplayName("Ignore system databases")
@ -175,27 +211,52 @@ public static DecimalHandlingMode parse(String value, String defaultValue) {
* For instance, they could be of the form {@code <databaseName>.<tableName>.<columnName>} or
* {@code <schemaName>.<tableName>.<columnName>} or {@code <databaseName>.<schemaName>.<tableName>.<columnName>}.
*/
public static final Field COLUMN_BLACKLIST = Field.create("column.blacklist")
public static final Field COLUMN_EXCLUDE_LIST = Field.create("column.exclude.list")
.withDisplayName("Exclude Columns")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.MEDIUM)
.withValidation(RelationalDatabaseConnectorConfig::validateColumnBlacklist)
.withValidation(RelationalDatabaseConnectorConfig::validateColumnExcludeList)
.withDescription("Regular expressions matching columns to exclude from change events");
/**
* Old, backwards-compatible "blacklist" property.
*/
@Deprecated
public static final Field COLUMN_BLACKLIST = Field.create("column.blacklist")
.withDisplayName("Deprecated: Exclude Columns")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withValidation(RelationalDatabaseConnectorConfig::validateColumnExcludeList)
.withInvisibleRecommender()
.withDescription("Regular expressions matching columns to exclude from change events (deprecated, use \"" + COLUMN_EXCLUDE_LIST.name() + "\" instead)");
/**
* A comma-separated list of regular expressions that match fully-qualified names of columns to be excluded from monitoring
* and change messages. The exact form of fully qualified names for columns might vary between connector types.
* For instance, they could be of the form {@code <databaseName>.<tableName>.<columnName>} or
* {@code <schemaName>.<tableName>.<columnName>} or {@code <databaseName>.<schemaName>.<tableName>.<columnName>}.
*/
public static final Field COLUMN_WHITELIST = Field.create("column.whitelist")
public static final Field COLUMN_INCLUDE_LIST = Field.create("column.include.list")
.withDisplayName("Include Columns")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.MEDIUM)
.withDescription("Regular expressions matching columns to include in change events");
/**
* Old, backwards-compatible "whitelist" property.
*/
@Deprecated
public static final Field COLUMN_WHITELIST = Field.create("column.whitelist")
.withDisplayName("Deprecated: Include Columns")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withInvisibleRecommender()
.withDescription("Regular expressions matching columns to include in change events (deprecated, use \"" + COLUMN_INCLUDE_LIST.name() + "\" instead)");
public static final Field MSG_KEY_COLUMNS = Field.create("message.key.columns")
.withDisplayName("Columns PK mapping")
.withType(Type.STRING)
@ -235,29 +296,55 @@ public static DecimalHandlingMode parse(String value, String defaultValue) {
/**
* A comma-separated list of regular expressions that match schema names to be monitored.
* May not be used with {@link #SCHEMA_BLACKLIST}.
* Must not be used with {@link #SCHEMA_EXCLUDE_LIST}.
*/
public static final Field SCHEMA_WHITELIST = Field.create("schema.whitelist")
.withDisplayName("Schemas")
public static final Field SCHEMA_INCLUDE_LIST = Field.create(SCHEMA_INCLUDE_LIST_NAME)
.withDisplayName("Include Schemas")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.HIGH)
.withDependents(TABLE_WHITELIST_NAME)
.withDependents(TABLE_INCLUDE_LIST_NAME, TABLE_WHITELIST_NAME)
.withDescription("The schemas for which events should be captured");
/**
* A comma-separated list of regular expressions that match schema names to be excluded from monitoring.
* May not be used with {@link #SCHEMA_WHITELIST}.
* Old, backwards-compatible "whitelist" property.
*/
public static final Field SCHEMA_BLACKLIST = Field.create("schema.blacklist")
@Deprecated
public static final Field SCHEMA_WHITELIST = Field.create("schema.whitelist")
.withDisplayName("Deprecated: Include Schemas")
.withType(Type.LIST)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withDependents(TABLE_INCLUDE_LIST_NAME, TABLE_WHITELIST_NAME)
.withInvisibleRecommender()
.withDescription("The schemas for which events should be captured (deprecated, use \"" + SCHEMA_INCLUDE_LIST.name() + "\" instead)");
/**
* A comma-separated list of regular expressions that match schema names to be excluded from monitoring.
* Must not be used with {@link #SCHEMA_INCLUDE_LIST}.
*/
public static final Field SCHEMA_EXCLUDE_LIST = Field.create(SCHEMA_EXCLUDE_LIST_NAME)
.withDisplayName("Exclude Schemas")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.MEDIUM)
.withValidation(RelationalDatabaseConnectorConfig::validateSchemaBlacklist)
.withValidation(RelationalDatabaseConnectorConfig::validateSchemaExcludeList)
.withInvisibleRecommender()
.withDescription("The schemas for which events must not be captured");
/**
* Old, backwards-compatible "blacklist" property.
*/
@Deprecated
public static final Field SCHEMA_BLACKLIST = Field.create("schema.blacklist")
.withDisplayName("Deprecated: Exclude Schemas")
.withType(Type.STRING)
.withWidth(Width.LONG)
.withImportance(Importance.LOW)
.withValidation(RelationalDatabaseConnectorConfig::validateSchemaExcludeList)
.withInvisibleRecommender()
.withDescription("The schemas for which events must not be captured (deprecated, use \"" + SCHEMA_EXCLUDE_LIST.name() + "\" instead)");
public static final Field TIME_PRECISION_MODE = Field.create("time.precision.mode")
.withDisplayName("Time Precision")
.withEnum(TemporalPrecisionMode.class, TemporalPrecisionMode.ADAPTIVE)
@ -317,12 +404,18 @@ public static DecimalHandlingMode parse(String value, String defaultValue) {
SNAPSHOT_LOCK_TIMEOUT_MS)
.events(
COLUMN_WHITELIST,
COLUMN_INCLUDE_LIST,
COLUMN_BLACKLIST,
COLUMN_EXCLUDE_LIST,
TABLE_WHITELIST,
TABLE_INCLUDE_LIST,
TABLE_BLACKLIST,
TABLE_EXCLUDE_LIST,
TABLE_IGNORE_BUILTIN,
SCHEMA_WHITELIST,
SCHEMA_INCLUDE_LIST,
SCHEMA_BLACKLIST,
SCHEMA_EXCLUDE_LIST,
MSG_KEY_COLUMNS,
SNAPSHOT_SELECT_STATEMENT_OVERRIDES_BY_TABLE,
MASK_COLUMN_WITH_HASH,
@ -383,11 +476,36 @@ public Duration snapshotLockTimeout() {
return Duration.ofMillis(getConfig().getLong(SNAPSHOT_LOCK_TIMEOUT_MS));
}
private static int validateColumnBlacklist(Configuration config, Field field, Field.ValidationOutput problems) {
String whitelist = config.getString(COLUMN_WHITELIST);
String blacklist = config.getString(COLUMN_BLACKLIST);
if (whitelist != null && blacklist != null) {
problems.accept(COLUMN_BLACKLIST, blacklist, "Column whitelist is already specified");
public String schemaExcludeList() {
return getConfig().getFallbackStringProperty(SCHEMA_EXCLUDE_LIST, SCHEMA_BLACKLIST);
}
public String schemaIncludeList() {
return getConfig().getFallbackStringProperty(SCHEMA_INCLUDE_LIST, SCHEMA_WHITELIST);
}
public String tableExcludeList() {
return getConfig().getFallbackStringProperty(TABLE_EXCLUDE_LIST, TABLE_BLACKLIST);
}
public String tableIncludeList() {
return getConfig().getFallbackStringProperty(TABLE_INCLUDE_LIST, TABLE_WHITELIST);
}
public String columnExcludeList() {
return getConfig().getFallbackStringProperty(COLUMN_EXCLUDE_LIST, COLUMN_BLACKLIST);
}
public String columnIncludeList() {
return getConfig().getFallbackStringProperty(COLUMN_INCLUDE_LIST, COLUMN_WHITELIST);
}
private static int validateColumnExcludeList(Configuration config, Field field, Field.ValidationOutput problems) {
String includeList = Configuration.getFallbackStringProperty(config, COLUMN_INCLUDE_LIST, COLUMN_WHITELIST);
String excludeList = Configuration.getFallbackStringProperty(config, COLUMN_EXCLUDE_LIST, COLUMN_BLACKLIST);
if (includeList != null && excludeList != null) {
problems.accept(COLUMN_EXCLUDE_LIST, excludeList, COLUMN_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG);
return 1;
}
return 0;
@ -401,12 +519,12 @@ public TableIdToStringMapper getTableIdMapper() {
return tableIdMapper;
}
private static int validateTableBlacklist(Configuration config, Field field, ValidationOutput problems) {
String whitelist = config.getString(TABLE_WHITELIST);
String blacklist = config.getString(TABLE_BLACKLIST);
private static int validateTableExcludeList(Configuration config, Field field, ValidationOutput problems) {
String includeList = Configuration.getFallbackStringProperty(config, TABLE_INCLUDE_LIST, TABLE_WHITELIST);
String excludeList = Configuration.getFallbackStringProperty(config, TABLE_EXCLUDE_LIST, TABLE_BLACKLIST);
if (whitelist != null && blacklist != null) {
problems.accept(TABLE_BLACKLIST, blacklist, "Table whitelist is already specified");
if (includeList != null && excludeList != null) {
problems.accept(TABLE_EXCLUDE_LIST, excludeList, TABLE_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG);
return 1;
}
@ -434,11 +552,12 @@ public Map<TableId, String> getSnapshotSelectOverridesByTable() {
return Collections.unmodifiableMap(snapshotSelectOverridesByTable);
}
private static int validateSchemaBlacklist(Configuration config, Field field, Field.ValidationOutput problems) {
String whitelist = config.getString(SCHEMA_WHITELIST);
String blacklist = config.getString(SCHEMA_BLACKLIST);
if (whitelist != null && blacklist != null) {
problems.accept(SCHEMA_BLACKLIST, blacklist, "Schema whitelist is already specified");
private static int validateSchemaExcludeList(Configuration config, Field field, Field.ValidationOutput problems) {
String includeList = Configuration.getFallbackStringProperty(config, SCHEMA_INCLUDE_LIST, SCHEMA_WHITELIST);
String excludeList = Configuration.getFallbackStringProperty(config, SCHEMA_EXCLUDE_LIST, SCHEMA_BLACKLIST);
if (includeList != null && excludeList != null) {
problems.accept(SCHEMA_EXCLUDE_LIST, excludeList, SCHEMA_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG);
return 1;
}
return 0;

View File

@ -82,7 +82,7 @@ public Set<TableId> tableIds() {
@Override
public void assureNonEmptySchema() {
if (tableIds().isEmpty()) {
LOG.warn("After applying blacklist/whitelist filters there are no tables to monitor, please check your configuration");
LOG.warn(NO_CAPTURED_DATA_COLLECTIONS_WARNING);
}
}

View File

@ -167,9 +167,9 @@ private Stream<TableId> toTableIds(Set<TableId> tableIds, Pattern pattern) {
}
private Set<TableId> sort(Set<TableId> capturedTables) throws Exception {
String value = connectorConfig.getConfig().getString(RelationalDatabaseConnectorConfig.TABLE_WHITELIST);
if (value != null) {
return Strings.listOfRegex(value, Pattern.CASE_INSENSITIVE)
String tableIncludeList = connectorConfig.tableIncludeList();
if (tableIncludeList != null) {
return Strings.listOfRegex(tableIncludeList, Pattern.CASE_INSENSITIVE)
.stream()
.flatMap(pattern -> toTableIds(capturedTables, pattern))
.collect(Collectors.toCollection(LinkedHashSet::new));

View File

@ -17,21 +17,33 @@ public class RelationalTableFilters implements DataCollectionFilters {
private final TableFilter tableFilter;
public RelationalTableFilters(Configuration config, TableFilter systemTablesFilter, TableIdToStringMapper tableIdMapper) {
// Define the filter using the whitelists and blacklists for tables and database names ...
// Define the filter using the include and exclude lists for tables and database names ...
Predicate<TableId> predicate = Selectors.tableSelector()
// .includeDatabases(config.getString(RelationalDatabaseConnectorConfig.DATABASE_WHITELIST))
// .excludeDatabases(config.getString(RelationalDatabaseConnectorConfig.DATABASE_BLACKLIST))
.includeSchemas(config.getString(RelationalDatabaseConnectorConfig.SCHEMA_WHITELIST))
.excludeSchemas(config.getString(RelationalDatabaseConnectorConfig.SCHEMA_BLACKLIST))
.includeTables(config.getString(RelationalDatabaseConnectorConfig.TABLE_WHITELIST), tableIdMapper)
.excludeTables(config.getString(RelationalDatabaseConnectorConfig.TABLE_BLACKLIST), tableIdMapper)
.includeSchemas(
config.getFallbackStringProperty(
RelationalDatabaseConnectorConfig.SCHEMA_INCLUDE_LIST,
RelationalDatabaseConnectorConfig.SCHEMA_WHITELIST))
.excludeSchemas(
config.getFallbackStringProperty(
RelationalDatabaseConnectorConfig.SCHEMA_EXCLUDE_LIST,
RelationalDatabaseConnectorConfig.SCHEMA_BLACKLIST))
.includeTables(
config.getFallbackStringProperty(
RelationalDatabaseConnectorConfig.TABLE_INCLUDE_LIST,
RelationalDatabaseConnectorConfig.TABLE_WHITELIST),
tableIdMapper)
.excludeTables(
config.getFallbackStringProperty(
RelationalDatabaseConnectorConfig.TABLE_EXCLUDE_LIST,
RelationalDatabaseConnectorConfig.TABLE_BLACKLIST),
tableIdMapper)
.build();
Predicate<TableId> finalPredicate = config.getBoolean(RelationalDatabaseConnectorConfig.TABLE_IGNORE_BUILTIN)
? predicate.and(systemTablesFilter::isIncluded)
: predicate;
this.tableFilter = t -> finalPredicate.test(t);
this.tableFilter = finalPredicate::test;
}
@Override

View File

@ -72,7 +72,7 @@ public static class ColumnNameFilterFactory {
* @param fullyQualifiedColumnNames the comma-separated list of fully-qualified column names to exclude; may be null or
* @return a column name filter; never null
*/
public static ColumnNameFilter createBlacklistFilter(String fullyQualifiedColumnNames) {
public static ColumnNameFilter createExcludeListFilter(String fullyQualifiedColumnNames) {
Predicate<ColumnId> delegate = Predicates.excludes(fullyQualifiedColumnNames, ColumnId::toString);
return (catalogName, schemaName, tableName, columnName) -> delegate.test(new ColumnId(new TableId(catalogName, schemaName, tableName), columnName));
}
@ -87,7 +87,7 @@ public static ColumnNameFilter createBlacklistFilter(String fullyQualifiedColumn
* @param fullyQualifiedColumnNames the comma-separated list of fully-qualified column names to include; may be null or
* @return a column name filter; never null
*/
public static ColumnNameFilter createWhitelistFilter(String fullyQualifiedColumnNames) {
public static ColumnNameFilter createIncludeListFilter(String fullyQualifiedColumnNames) {
Predicate<ColumnId> delegate = Predicates.includes(fullyQualifiedColumnNames, ColumnId::toString);
return (catalogName, schemaName, tableName, columnName) -> delegate.test(new ColumnId(new TableId(catalogName, schemaName, tableName), columnName));
}

View File

@ -183,9 +183,9 @@ public static interface DatabaseStatementStringConsumer {
/**
* @return true if any event stored is one of
* <ul>
* <li>database-wide events and affects whitelisted database</li>
* <li>table related events and the table is whitelisted</li>
* <li>events that set a variable and either affects whitelisted database or is a system-wide variable</li>
* <li>database-wide events and affects included/excluded database</li>
* <li>table related events and the table is included</li>
* <li>events that set a variable and either affects included database or is a system-wide variable</li>
* <ul>
*/
public boolean anyMatch(Predicate<String> databaseFilter, Predicate<TableId> tableFilter) {

View File

@ -46,7 +46,7 @@ public interface DatabaseHistory {
.withDefault(false);
public static final Field STORE_ONLY_MONITORED_TABLES_DDL = Field.create(CONFIGURATION_FIELD_PREFIX_STRING + "store.only.monitored.tables.ddl")
.withDisplayName("Store only DDL that modifies whitelisted/not-blacklisted tables")
.withDisplayName("Store only DDL that modifies tables that are captured based on include/exclude lists")
.withType(Type.BOOLEAN)
.withWidth(Width.SHORT)
.withImportance(Importance.LOW)

View File

@ -15,6 +15,8 @@
*/
public interface DatabaseSchema<I extends DataCollectionId> {
String NO_CAPTURED_DATA_COLLECTIONS_WARNING = "After applying the include/exclude list filters, no changes will be captured. Please check your configuration!";
void close();
DataCollectionSchema schemaFor(I id);

View File

@ -6,6 +6,9 @@
package io.debezium.config;
import static io.debezium.relational.RelationalDatabaseConnectorConfig.COLUMN_BLACKLIST;
import static io.debezium.relational.RelationalDatabaseConnectorConfig.COLUMN_EXCLUDE_LIST;
import static io.debezium.relational.RelationalDatabaseConnectorConfig.COLUMN_INCLUDE_LIST;
import static io.debezium.relational.RelationalDatabaseConnectorConfig.COLUMN_WHITELIST;
import static io.debezium.relational.RelationalDatabaseConnectorConfig.MSG_KEY_COLUMNS;
import static org.fest.assertions.Assertions.assertThat;
@ -20,6 +23,7 @@
import io.debezium.doc.FixFor;
import io.debezium.function.Predicates;
import io.debezium.relational.RelationalDatabaseConnectorConfig;
import io.debezium.relational.history.DatabaseHistory;
/**
@ -60,15 +64,63 @@ public void shouldCreateInternalFields() {
@Test
@FixFor("DBZ-1962")
public void shouldThrowValidationOnDuplicateColumnFilterConfiguration() {
public void shouldThrowValidationOnDuplicateOldColumnFilterConfigurationOld() {
config = Configuration.create()
.with("column.whitelist", ".+aa")
.with("column.blacklist", ".+bb")
.with(COLUMN_WHITELIST, ".+aa")
.with(COLUMN_BLACKLIST, ".+bb")
.build();
List<String> errorMessages = config.validate(Field.setOf(COLUMN_BLACKLIST)).get(COLUMN_BLACKLIST.name()).errorMessages();
List<String> errorMessages = config.validate(Field.setOf(COLUMN_EXCLUDE_LIST)).get(COLUMN_EXCLUDE_LIST.name()).errorMessages();
assertThat(errorMessages).isNotEmpty();
assertThat(errorMessages.get(0)).isEqualTo("Column whitelist is already specified");
assertThat(errorMessages.get(0)).isEqualTo(RelationalDatabaseConnectorConfig.COLUMN_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG);
}
@Test
@FixFor("DBZ-1962")
public void shouldThrowValidationOnDuplicateOldColumnFilterConfiguration() {
config = Configuration.create()
.with(COLUMN_INCLUDE_LIST, ".+aa")
.with(COLUMN_EXCLUDE_LIST, ".+bb")
.build();
List<String> errorMessages = config.validate(Field.setOf(COLUMN_EXCLUDE_LIST)).get(COLUMN_EXCLUDE_LIST.name()).errorMessages();
assertThat(errorMessages).isNotEmpty();
assertThat(errorMessages.get(0)).isEqualTo(RelationalDatabaseConnectorConfig.COLUMN_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG);
}
@Test
@FixFor("DBZ-1962")
public void shouldThrowValidationOnDuplicateColumnFilterConfiguration() {
config = Configuration.create()
.with("column.include.list", ".+aa")
.with("column.exclude.list", ".+bb")
.build();
List<String> errorMessages = config.validate(Field.setOf(COLUMN_EXCLUDE_LIST)).get(COLUMN_EXCLUDE_LIST.name()).errorMessages();
assertThat(errorMessages).isNotEmpty();
assertThat(errorMessages.get(0)).isEqualTo(RelationalDatabaseConnectorConfig.COLUMN_INCLUDE_LIST_ALREADY_SPECIFIED_ERROR_MSG);
}
@Test
public void shouldAllowNewColumnFilterIncludeListConfiguration() {
config = Configuration.create()
.with("column.include.list", ".+aa")
.build();
List<String> errorMessages = config.validate(Field.setOf(COLUMN_EXCLUDE_LIST)).get(COLUMN_EXCLUDE_LIST.name()).errorMessages();
assertThat(errorMessages).isEmpty();
errorMessages = config.validate(Field.setOf(COLUMN_INCLUDE_LIST)).get(COLUMN_INCLUDE_LIST.name()).errorMessages();
assertThat(errorMessages).isEmpty();
}
@Test
public void shouldAllowNewColumnFilterExcludeListConfiguration() {
config = Configuration.create()
.with("column.exclude.list", ".+bb")
.build();
List<String> errorMessages = config.validate(Field.setOf(COLUMN_EXCLUDE_LIST)).get(COLUMN_EXCLUDE_LIST.name()).errorMessages();
assertThat(errorMessages).isEmpty();
}
@Test

View File

@ -59,8 +59,8 @@ If you have an existing up and running Debezium environment, you can do the benc
- SQL create table for MySQL
``` CREATE TABLE TPC.TEST ( USERNAME VARCHAR(32) NOT NULL, NAME VARCHAR(64), BLOOD_GROUP CHAR(3), RESIDENCE VARCHAR(200), COMPANY VARCHAR(128), ADDRESS VARCHAR(200), BIRTHDATE DATE, SEX CHAR(1), JOB VARCHAR(128), SSN CHAR(11), MAIL VARCHAR(128), ID INTEGER NOT NULL AUTO_INCREMENT, T0 TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) ```
- Whitelist the TPC.TEST table in your Debezium connector config JSON
``` "table.whitelist" : "TPC.TEST" ```
- Include the TPC.TEST table in your Debezium connector config JSON
``` "table.include.list" : "TPC.TEST" ```
- Enable the table for CDC on the database
- SQL for db2

View File

@ -94,7 +94,7 @@ The next few lines define the fields that are specific to the connector, which i
Here, we set the name of the host machine and port number where the MySQL database server is running, and we define the username and password that will be used to connect to the MySQL database. Note that for MySQL the username and password should correspond to a MySQL database user that has been granted the [`REPLICATION SLAVE` privilege](http://dev.mysql.com/doc/refman/5.7/en/replication-howto-repuser.html), allowing the database to read the server's binlog that is normally used for MySQL replication.
The configuration also includes a numeric identifier for the `server.id`. Since MySQL's binlog is part of the MySQL replication mechanism, in order to read the binlog the `MySqlConnector` instance must join the MySQL server group, and that means this server ID must be [unique within all processes that make up the MySQL server group](http://dev.mysql.com/doc/refman/5.7/en/replication-howto-masterbaseconfig.html) and is any integer between 1 and (2^32)1. In our code we set it to a fairly large but somewhat random value we'll use only for our application.
The configuration also includes a numeric identifier for the `server.id`. Since MySQL's binlog is part of the MySQL replication mechanism, in order to read the binlog the `MySqlConnector` instance must join the MySQL server group, and that means this server ID must be [unique within all processes that make up the MySQL server group](http://dev.mysql.com/doc/refman/{mysql-version}/en/replication-howto-masterbaseconfig.html) and is any integer between 1 and (2^32)1. In our code we set it to a fairly large but somewhat random value we'll use only for our application.
The configuration also specifies a logical name for the MySQL server. The connector includes this logical name within the topic field of every source record it produces, enabling your application to discern the origin of those records. Our example uses a server name of "products", presumably because the database contains product information. Of course, you can name this anything meaningful to your application.

View File

@ -88,7 +88,6 @@ public abstract class AbstractConnectorTest implements Testing {
public TestRule skipTestRule = new SkipTestRule();
protected static final Path OFFSET_STORE_PATH = Testing.Files.createTestingPath("file-connector-offsets.txt").toAbsolutePath();
protected static final String NO_MONITORED_TABLES_WARNING = "After applying blacklist/whitelist filters there are no tables to monitor, please check your configuration";
private ExecutorService executor;
protected EmbeddedEngine engine;
@ -491,7 +490,6 @@ protected SourceRecords consumeRecordsByTopic(int numRecords) throws Interrupted
* Try to consume and capture exactly the specified number of records from the connector.
*
* @param numRecords the number of records that should be consumed
* @param true if the record serialization should be tested
* @return the collector into which the records were captured; never null
* @throws InterruptedException if the thread was interrupted while waiting for a record to be returned
*/

View File

@ -37,8 +37,8 @@ public TestConfigSource() {
integrationTest.put("debezium.source.database.password", TestDatabase.POSTGRES_PASSWORD);
integrationTest.put("debezium.source.database.dbname", TestDatabase.POSTGRES_DBNAME);
integrationTest.put("debezium.source.database.server.name", "testc");
integrationTest.put("debezium.source.schema.whitelist", "inventory");
integrationTest.put("debezium.source.table.whitelist", "inventory.customers");
integrationTest.put("debezium.source.schema.include.list", "inventory");
integrationTest.put("debezium.source.table.include.list", "inventory.customers");
unitTest.put("debezium.sink.type", "test");
unitTest.put("debezium.source.connector.class", "org.apache.kafka.connect.file.FileStreamSourceConnector");

View File

@ -9,4 +9,4 @@ debezium.source.database.user=postgres
debezium.source.database.password=postgres
debezium.source.database.dbname=postgres
debezium.source.database.server.name=tutorial
debezium.source.schema.whitelist=inventory
debezium.source.schema.include.list=inventory

View File

@ -40,8 +40,8 @@ public EventHubsTestConfigSource() {
eventHubsTest.put("debezium.source.database.password", TestDatabase.POSTGRES_PASSWORD);
eventHubsTest.put("debezium.source.database.dbname", TestDatabase.POSTGRES_DBNAME);
eventHubsTest.put("debezium.source.database.server.name", "testc");
eventHubsTest.put("debezium.source.schema.whitelist", "inventory");
eventHubsTest.put("debezium.source.table.whitelist", "inventory.customers");
eventHubsTest.put("debezium.source.schema.include.list", "inventory");
eventHubsTest.put("debezium.source.table.include.list", "inventory.customers");
config = eventHubsTest;
}

View File

@ -31,8 +31,8 @@ public KinesisTestConfigSource() {
kinesisTest.put("debezium.source.database.password", TestDatabase.POSTGRES_PASSWORD);
kinesisTest.put("debezium.source.database.dbname", TestDatabase.POSTGRES_DBNAME);
kinesisTest.put("debezium.source.database.server.name", "testc");
kinesisTest.put("debezium.source.schema.whitelist", "inventory");
kinesisTest.put("debezium.source.table.whitelist", "inventory.customers");
kinesisTest.put("debezium.source.schema.include.list", "inventory");
kinesisTest.put("debezium.source.table.include.list", "inventory.customers");
config = kinesisTest;
}

View File

@ -29,8 +29,8 @@ public PubSubTestConfigSource() {
pubsubTest.put("debezium.source.database.password", TestDatabase.POSTGRES_PASSWORD);
pubsubTest.put("debezium.source.database.dbname", TestDatabase.POSTGRES_DBNAME);
pubsubTest.put("debezium.source.database.server.name", "testc");
pubsubTest.put("debezium.source.schema.whitelist", "inventory");
pubsubTest.put("debezium.source.table.whitelist", "inventory.customers");
pubsubTest.put("debezium.source.schema.include.list", "inventory");
pubsubTest.put("debezium.source.table.include.list", "inventory.customers");
config = pubsubTest;
}

View File

@ -30,8 +30,8 @@ public PulsarTestConfigSource() {
pulsarTest.put("debezium.source.database.password", TestDatabase.POSTGRES_PASSWORD);
pulsarTest.put("debezium.source.database.dbname", TestDatabase.POSTGRES_DBNAME);
pulsarTest.put("debezium.source.database.server.name", "testc");
pulsarTest.put("debezium.source.schema.whitelist", "inventory");
pulsarTest.put("debezium.source.table.whitelist", "inventory.customers");
pulsarTest.put("debezium.source.schema.include.list", "inventory");
pulsarTest.put("debezium.source.table.include.list", "inventory.customers");
config = pulsarTest;
}

View File

@ -29,7 +29,7 @@ public ConnectorConfigBuilder mysql() {
.put("database.user", ConfigProperties.DATABASE_MYSQL_DBZ_USERNAME)
.put("database.password", ConfigProperties.DATABASE_MYSQL_DBZ_PASSWORD)
.put("database.server.name", "mysqldb") // this should be overwritten with unique name
.put("database.whitelist", "inventory") // might want to change
.put("database.include.list", "inventory") // might want to change
.put("database.history.kafka.bootstrap.servers", "debezium-kafka-cluster-kafka-bootstrap." + ConfigProperties.OCP_PROJECT_DBZ + ".svc.cluster.local:9092")
.put("database.history.kafka.topic", "schema-changes.inventory");
}
@ -46,7 +46,7 @@ public ConnectorConfigBuilder postgresql() {
.put("database.password", ConfigProperties.DATABASE_POSTGRESQL_DBZ_PASSWORD)
.put("database.dbname", ConfigProperties.DATABASE_POSTGRESQL_DBZ_DBNAME)
.put("database.server.name", "postgresqldb") // this should be overwritten with unique name
.put("schema.whitelist", "inventory") // might want to change
.put("schema.include.list", "inventory") // might want to change
.put("slot.name", "debezium")
.put("plugin.name", "pgoutput");
}
@ -77,6 +77,6 @@ public ConnectorConfigBuilder mongo() {
.put("mongodb.user", ConfigProperties.DATABASE_MONGO_DBZ_USERNAME)
.put("mongodb.password", ConfigProperties.DATABASE_MONGO_DBZ_PASSWORD)
.put("mongodb.name", "mongodb") // this should be overwritten with unique name
.put("database.whitelist", ConfigProperties.DATABASE_MONGO_DBZ_DBNAME); // might want to change
.put("database.include.list", ConfigProperties.DATABASE_MONGO_DBZ_DBNAME); // might want to change
}
}

View File

@ -259,7 +259,7 @@ spec:
database.password: dbz
database.server.id: 184054
database.server.name: dbserver1
database.whitelist: inventory
database.include.list: inventory
database.history.kafka.bootstrap.servers: my-cluster-kafka-bootstrap:9092
database.history.kafka.topic: schema-changes.inventory
key.converter: io.apicurio.registry.utils.converter.AvroConverter
@ -303,7 +303,7 @@ Taking the snapshot involves a number of steps:
2020-02-21 17:57:30,822 INFO Step 0: disabling autocommit, enabling repeatable read transactions, and setting lock wait timeout to 10 (io.debezium.connector.mysql.SnapshotReader) [debezium-mysqlconnector-dbserver1-snapshot]
2020-02-21 17:57:30,836 INFO Step 1: flush and obtain global read lock to prevent writes to database (io.debezium.connector.mysql.SnapshotReader) [debezium-mysqlconnector-dbserver1-snapshot]
2020-02-21 17:57:30,839 INFO Step 2: start transaction with consistent snapshot (io.debezium.connector.mysql.SnapshotReader) [debezium-mysqlconnector-dbserver1-snapshot]
2020-02-21 17:57:30,840 INFO Step 3: read binlog position of MySQL master (io.debezium.connector.mysql.SnapshotReader) [debezium-mysqlconnector-dbserver1-snapshot]
2020-02-21 17:57:30,840 INFO Step 3: read binlog position of MySQL primary server (io.debezium.connector.mysql.SnapshotReader) [debezium-mysqlconnector-dbserver1-snapshot]
2020-02-21 17:57:30,843 INFO using binlog 'mysql-bin.000003' at position '154' and gtid '' (io.debezium.connector.mysql.SnapshotReader) [debezium-mysqlconnector-dbserver1-snapshot]
...
2020-02-21 17:57:34,423 INFO Step 9: committing transaction (io.debezium.connector.mysql.SnapshotReader) [debezium-mysqlconnector-dbserver1-snapshot]

View File

@ -22,10 +22,10 @@ SQL based polling model of tables put into "capture mode". This uses the ASN lib
In order to use ASN and hence this connector, you need to have a license for the IBM InfoSphere Data Replication (IIDR) product.
It is not required that IIDR itself is installed.
The first time it connects to a Db2 database, it reads a consistent snapshot of all of the tables that are whitelisted (or not blacklisted depending on the mode of operation).
The first time it connects to a Db2 database, it reads a consistent snapshot of all of the tables that are included (or not excluded depending on the mode of operation).
Note that by default all tables in the database are snapshoted, NOT just those that are in capture mode.
When that snapshot is complete, the connector continuously streams the changes that were committed to the Db2 database for all whitelisted tables in capture mode. This generates corresponding insert, update and delete events.
When that snapshot is complete, the connector continuously streams the changes that were committed to the Db2 database for all included tables in capture mode. This generates corresponding insert, update and delete events.
All of the events for each table are recorded in a separate Kafka topic, where they can be easily consumed by applications and services.
[[db2-overview]]
@ -50,7 +50,7 @@ The client applications read the Kafka topics that correspond to the database ta
The database administrator normally enables _CDC_ in the middle of the life of a table.
This means that the connector does not have the complete history of all changes that have been made to the table.
Therefore, when the Db2 connector first connects to a particular Db2 database, it starts by performing a _consistent snapshot_ of each of the whitelisted tables.
Therefore, when the Db2 connector first connects to a particular Db2 database, it starts by performing a _consistent snapshot_ of each of the included tables.
After the connector completes the snapshot, it continues streaming changes from the exact point at which the snapshot was made for tables in capture mode.
This way, we start with a consistent view of all of the data, yet continue reading without having lost any of the changes made while the snapshot was taking place.
@ -240,7 +240,7 @@ By default (snapshotting mode *initial*) the connector will upon the first start
Each snapshot consists of the following steps:
1. Determine the tables to be snapshoted from whitelist/blacklist
1. Determine the tables to be snapshoted from include/exclude list
2. Obtain a lock on each of the monitored tables to ensure that no structural changes can occur to any of the tables.
The level of the lock is determined by `snapshot.isolation.mode` configuration option.
3. Read the maximum LSN ("log sequence number") position in the server's transaction log.
@ -264,7 +264,7 @@ This assures that the changes are replayed by {prodname} in the same order as we
After a restart, the connector will resume from the offset (commit and change LSNs) where it left off before.
The connector is able to detect whether the CDC is enabled or disabled for whitelisted source table during the runtime and modify its behavior.
The connector is able to detect whether the CDC is enabled or disabled for included source table during the runtime and modify its behavior.
[[db2-topic-names]]
=== Topic names

View File

@ -663,7 +663,7 @@ Typically, you configure the {prodname} MongoDB connector in a `.json` file usin
"connector.class": "io.debezium.connector.mongodb.MongoDbConnector", // <2>
"mongodb.hosts": "rs0/192.168.99.100:27017", // <3>
"mongodb.name": "fullfillment", // <4>
"collection.whitelist": "inventory[.]*", // <5>
"collection.include.list": "inventory[.]*", // <5>
}
}
----
@ -691,7 +691,7 @@ apiVersion: kafka.strimzi.io/v1beta1
config:
mongodb.hosts: rs0/192.168.99.100:27017 // <3>
mongodb.name: fulfillment // <4>
collection.whitelist: inventory[.]* // <5>
collection.include.list: inventory[.]* // <5>
----
<1> The name of our connector when we register it with a Kafka Connect service.
<2> The name of the MongoDB connector class.
@ -803,28 +803,31 @@ Only alphanumeric characters and underscores should be used.
|`false`
|When SSL is enabled this setting controls whether strict hostname checking is disabled during connection phase. If `true` the connection will not prevent man-in-the-middle attacks.
|[[mongodb-property-database-whitelist]]<<mongodb-property-database-whitelist, `database.whitelist`>>
|[[mongodb-property-database-whitelist]]<<mongodb-property-database-whitelist [[mongodb-property-database-include-list]]<<mongodb-property-database-include-list , `database.include.list`>>
|_empty string_
|An optional comma-separated list of regular expressions that match database names to be monitored; any database name not included in the whitelist is excluded from monitoring. By default all databases is monitored.
May not be used with `database.blacklist`.
|An optional comma-separated list of regular expressions that match database names to be monitored; any database name not included in `database.include.list` is excluded from monitoring. By default all databases are monitored.
Must not be used with `database.exclude.list`.
|[[mongodb-property-database-blacklist]]<<mongodb-property-database-blacklist, `database.blacklist`>>
|[[mongodb-property-database-blacklist]]<<mongodb-property-database-blacklist [[mongodb-property-database-exclude-list]]<<mongodb-property-database-exclude-list, `database.exclude.list`>>
|_empty string_
|An optional comma-separated list of regular expressions that match database names to be excluded from monitoring; any database name not included in the blacklist is monitored. May not be used with `database.whitelist`.
|An optional comma-separated list of regular expressions that match database names to be excluded from monitoring; any database name not included in `database.exclude.list` is monitored.
Must not be used with `database.include.list`.
|[[mongodb-property-collection-whitelist]]<<mongodb-property-collection-whitelist, `collection.whitelist`>>
|[[mongodb-property-collection-whitelist]]<<mongodb-property-collection-whitelist [[mongodb-property-collection-include-list]]<<mongodb-property-collection-include-list, `collection.include.list`>>
|_empty string_
|An optional comma-separated list of regular expressions that match fully-qualified namespaces for MongoDB collections to be monitored; any collection not included in the whitelist is excluded from monitoring. Each identifier is of the form _databaseName_._collectionName_. By default the connector will monitor all collections except those in the `local` and `admin` databases. May not be used with `collection.blacklist`.
|An optional comma-separated list of regular expressions that match fully-qualified namespaces for MongoDB collections to be monitored; any collection not included in `collection.include.list` is excluded from monitoring. Each identifier is of the form _databaseName_._collectionName_. By default the connector will monitor all collections except those in the `local` and `admin` databases.
Must not be used with `collection.exclude.list`.
|[[mongodb-property-collection-blacklist]]<<mongodb-property-collection-blacklist, `collection.blacklist`>>
|[[mongodb-property-collection-blacklist]]<<mongodb-property-collection-blacklist [[mongodb-property-collection-exclude-list]]<<mongodb-property-collection-exclude-list , `collection.exclude.list`>>
|_empty string_
|An optional comma-separated list of regular expressions that match fully-qualified namespaces for MongoDB collections to be excluded from monitoring; any collection not included in the blacklist is monitored. Each identifier is of the form _databaseName_._collectionName_. May not be used with `collection.whitelist`.
|An optional comma-separated list of regular expressions that match fully-qualified namespaces for MongoDB collections to be excluded from monitoring; any collection not included in `collection.exclude.list` is monitored. Each identifier is of the form _databaseName_._collectionName_.
Must not be used with `collection.include.list`.
|[[mongodb-property-snapshot-mode]]<<mongodb-property-snapshot-mode, `snapshot.mode`>>
|`initial`
|Specifies the criteria for running a snapshot upon startup of the connector. The default is *initial*, and specifies the connector reads a snapshot when either no offset is found or if the oplog no longer contains the previous offset. The *never* option specifies that the connector should never use snapshots, instead the connector should proceed to tail the log.
|[[mongodb-property-field-blacklist]]<<mongodb-property-field-blacklist, `field.blacklist`>>
|[[mongodb-property-field-blacklist]]<<mongodb-property-field-blacklist [[mongodb-property-field-exclude-list]]<<mongodb-property-field-exclude-list, `field.exclude.list`>>
|_empty string_
|An optional comma-separated list of the fully-qualified names of fields that should be excluded from change event message values. Fully-qualified names for fields are of the form _databaseName_._collectionName_._fieldName_._nestedFieldName_, where _databaseName_ and _collectionName_ may contain the wildcard (*) which matches any characters.

View File

@ -502,7 +502,7 @@ This key describes output from the connector named `PostgreSQL_server`, for the
[NOTE]
====
Although the `column.blacklist` and `column.whitelist` connector configuration properties allow you to capture only a subset of table columns, all columns in a primary or unique key are always included in the event's key.
Although the `column.exclude.list` and `column.include.list` connector configuration properties allow you to capture only a subset of table columns, all columns in a primary or unique key are always included in the event's key.
====
[WARNING]
@ -1507,11 +1507,11 @@ ifdef::community[]
It is possible to capture changes in a PostgreSQL database that is running in link:https://aws.amazon.com/rds/[Amazon RDS]. To do this:
* Set the instance parameter `rds.logical_replication` to `1`.
* Verify that the `wal_level` parameter is set to `logical` by running the query `SHOW wal_level` as the database master user. This might not be the case in multi-zone replication setups.
* Verify that the `wal_level` parameter is set to `logical` by running the query `SHOW wal_level` as the database RDS master user. This might not be the case in multi-zone replication setups.
You cannot set this option manually. It is link:https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html[automatically changed] when the `rds.logical_replication` parameter is set to `1`.
If the `wal_level` is not `logical` after the change above, it is probably because the instance has to be restarted due to the parameter group change. This happens according to your maintenance window or you can do it manually.
* Set the {prodname} `plugin.name` parameter to `wal2json`. You can skip this on PostgreSQL 10+ if you plan to use `pgoutput` logical replication stream support.
* Use the database master account for replication as RDS currently does not support setting of `REPLICATION` privilege for another account.
* Use the RDS master account for replication as RDS currently does not support setting of `REPLICATION` privilege for another account.
[IMPORTANT]
====
@ -1796,7 +1796,7 @@ You can choose to produce events for a subset of the schemas and tables. Optiona
"database.password": "postgres", // <6>
"database.dbname" : "postgres", // <7>
"database.server.name": "fullfillment", // <8>
"table.whitelist": "public.inventory" // <9>
"table.include.list": "public.inventory" // <9>
}
}
@ -1836,7 +1836,7 @@ apiVersion: kafka.strimzi.io/v1beta1
database.password: dbz
database.dbname: postgres
database.server.name: fullfillment // <5>
database.whitelist: public.inventory // <6>
database.include.list: public.inventory // <6>
----
<1> The name of the connector.
<2> Only one task should operate at any one time.
@ -2072,7 +2072,7 @@ Set to `true` in only testing or development environments. Dropping the slot all
|The name of the PostgreSQL publication created for streaming changes when using `pgoutput`.
This publication is created at start-up if it does not already exist and it includes _all tables_.
{prodname} then applies its own whitelist/blacklist filtering, if configured, to limit the publication to change events for the specific tables of interest.
{prodname} then applies its own include/exclude list filtering, if configured, to limit the publication to change events for the specific tables of interest.
The connector user must have superuser permissions to create this publication,
so it is usually preferable to create the publication before starting the connector for the first time.
@ -2102,29 +2102,29 @@ If the publication already exists, either for all tables or configured with a su
|
|Logical name that identifies and provides a namespace for the particular PostgreSQL database server or cluster in which {prodname} is capturing changes. Only alphanumeric characters and underscores should be used in the database server logical name. The logical name should be unique across all other connectors, since it is used as a prefix for all Kafka topic names that receive records from this connector.
|[[postgresql-property-schema-whitelist]]<<postgresql-property-schema-whitelist, `schema.whitelist`>>
|[[postgresql-property-schema-whitelist]]<<postgresql-property-schema-whitelist [[postgresql-property-schema-inlcude-list]]<<postgresql-property-schema-include-list , `schema.include.list`>>
|
|An optional, comma-separated list of regular expressions that match names of schemas for which you *want* to capture changes. Any schema name not included in the whitelist is excluded from having its changes captured. By default, all non-system schemas have their changes captured. Do not also set the `schema.blacklist` property.
|An optional, comma-separated list of regular expressions that match names of schemas for which you *want* to capture changes. Any schema name not included in `schema.include.list` is excluded from having its changes captured. By default, all non-system schemas have their changes captured. Do not also set the `schema.exclude.list` property.
|[[postgresql-property-schema-blacklist]]<<postgresql-property-schema-blacklist, `schema.blacklist`>>
|[[postgresql-property-schema-blacklist]]<<postgresql-property-schema-blacklist [[postgresql-property-schema-exclude-list]]<<postgresql-property-schema-exclude-list , `schema.exclude.list`>>
|
|An optional, comma-separated list of regular expressions that match names of schemas for which you *do not* want to capture changes. Any schema whose name is not included in the blacklist has its changes captured, with the exception of system schemas. Do not also set the `schema.whitelist` property.
|An optional, comma-separated list of regular expressions that match names of schemas for which you *do not* want to capture changes. Any schema whose name is not included in `schema.exclude.list` has its changes captured, with the exception of system schemas. Do not also set the `schema.include.list` property.
|[[postgresql-property-table-whitelist]]<<postgresql-property-table-whitelist, `table.whitelist`>>
|[[postgresql-property-table-whitelist]]<<postgresql-property-table-whitelist [[postgresql-property-table-include-list]]<<postgresql-property-table-include-list, `table.include.list`>>
|
|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you want to capture. Any table not included in the whitelist does not have its changes captured. Each identifier is of the form _schemaName_._tableName_. By default, the connector captures changes in every non-system table in each schema whose changes are being captured. Do not also set the `table.blacklist` property.
|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you want to capture. Any table not included in `table.include.list` does not have its changes captured. Each identifier is of the form _schemaName_._tableName_. By default, the connector captures changes in every non-system table in each schema whose changes are being captured. Do not also set the `table.exclude.list` property.
|[[postgresql-property-table-blacklist]]<<postgresql-property-table-blacklist, `table.blacklist`>>
|[[postgresql-property-table-blacklist]]<<postgresql-property-table-blacklist [[postgresql-property-table-exclude.list]]<<postgresql-property-table-exclude.list, `table.exclude.list`>>
|
|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you *do not* want to capture. Any table not included in the blacklist has it changes captured. Each identifier is of the form _schemaName_._tableName_. Do not also set the `table.whitelist` property.
|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you *do not* want to capture. Any table not included in `table.exclude.list` has it changes captured. Each identifier is of the form _schemaName_._tableName_. Do not also set the `table.include.list` property.
|[[postgresql-property-column-whitelist]]<<postgresql-property-column-whitelist, `column.whitelist`>>
|[[postgresql-property-column-whitelist]]<<postgresql-property-column-whitelist [[postgresql-property-column-include-list]]<<postgresql-property-column-include-list , `column.include.list`>>
|
|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be included in change event record values. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. Do not also set the `column.blacklist` property.
|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be included in change event record values. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. Do not also set the `column.exclude.list` property.
|[[postgresql-property-column-blacklist]]<<postgresql-property-column-blacklist, `column.blacklist`>>
|[[postgresql-property-column-blacklist]]<<postgresql-property-column-blacklist [[postgresql-property-column-exclude-list]]<<postgresql-property-column-exclude-list, `column.exclude.list`>>
|
|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event record values. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. Do not also set the `column.whitelist` property.
|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event record values. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. Do not also set the `column.include.list` property.
|[[postgresql-property-time-precision-mode]]<<postgresql-property-time-precision-mode, `time.precision.mode`>>
|`adaptive`
@ -2268,7 +2268,7 @@ If `table_a` has a an `id` column, and `regex_1` is `^i` (matches any column tha
`disabled` - The connector does not attempt to create a publication. A database administrator or the user configured to perform replications must have created the publication before running the connector. If the connector cannot find the publication, the connector throws an exception and stops.
`filtered` - If a publication exists, the connector uses it. If no publication exists, the connector creates a new publication for tables that match the current filter configuration as specified by the `database.blacklist`, `database.whitelist`, `table.blacklist`, and `table.whitelist` connector configuration properties. For example: `CREATE PUBLICATION <publication_name> FOR TABLE <tbl1, tbl2, etc>`.
`filtered` - If a publication exists, the connector uses it. If no publication exists, the connector creates a new publication for tables that match the current filter configuration as specified by the `database.exclude.list`, `database.include.list`, `table.exclude.list`, and `table.include.list` connector configuration properties. For example: `CREATE PUBLICATION <publication_name> FOR TABLE <tbl1, tbl2, etc>`.
|[[postgresql-property-binary-handling-mode]]<<postgresql-property-binary-handling-mode, `binary.handling.mode`>>
|bytes

View File

@ -48,7 +48,7 @@ This includes snapshots: if the snapshot was not completed when the connector is
== Setting up SQL Server
Before using the SQL Server connector to monitor the changes committed on SQL Server, first enable _CDC_ on a monitored database.
Please bear in mind that _CDC_ cannot be enabled for the `master` database.
Please bear in mind that _CDC_ cannot be enabled for the `primary` database.
[source,sql]
----
@ -105,7 +105,7 @@ ifdef::community[]
The SQL Server plug-in can capture changes from an Always On read-only replica.
A few pre-requisities are necessary to be fulfilled:
* Change data capture is configured and enabled on the master node.
* Change data capture is configured and enabled on the primary node.
SQL Server does not support CDC directly on replicas.
* The configuration option `database.applicationIntent` must be set to `ReadOnly`.
This is required by SQL Server.
@ -153,7 +153,7 @@ This ensures that the changes are replayed by {prodname} in the same order as we
After a restart, the connector will resume from the offset (commit and change LSNs) where it left off before.
The connector is able to detect whether CDC is enabled or disabled for whitelisted source tables and adjust its behavior.
The connector is able to detect whether CDC is enabled or disabled for included source tables and adjust its behavior.
[[sqlserver-topic-names]]
=== Topic names
@ -385,7 +385,7 @@ Therefore, you can interpret this key as describing the row in the `dbo.customer
ifdef::community[]
[NOTE]
====
Although the `column.blacklist` configuration property allows you to remove columns from the event values, all columns in a primary or unique key are always included in the event's key.
Although the `column.exclude.list` configuration property allows you to remove columns from the event values, all columns in a primary or unique key are always included in the event's key.
====
[WARNING]
@ -1244,7 +1244,7 @@ Typically, you configure the {prodname} SQL Server connector in a `.json` file u
"database.password": "Password!", // <6>
"database.dbname": "testDB", // <7>
"database.server.name": "fullfillment", // <8>
"table.whitelist": "dbo.customers", // <9>
"table.include.list": "dbo.customers", // <9>
"database.history.kafka.bootstrap.servers": "kafka:9092", // <10>
"database.history.kafka.topic": "dbhistory.fullfillment" // <11>
}
@ -1284,7 +1284,7 @@ apiVersion: kafka.strimzi.io/v1beta1
database.password: dbz //<6>
database.dbname: testDB //<7>
database.server.name: fullfullment //<8>
database.whitelist: dbo.customers //<9>
database.include.list: dbo.customers //<9>
database.history.kafka.bootstrap.servers: my-cluster-kafka-bootstrap:9092 //<10>
database.history.kafka.topic: dbhistory.fullfillment //<11>
@ -1396,20 +1396,21 @@ Only alphanumeric characters and underscores should be used.
|A list of host/port pairs that the connector will use for establishing an initial connection to the Kafka cluster.
This connection is used for retrieving database schema history previously stored by the connector, and for writing each DDL statement read from the source database. This should point to the same Kafka cluster used by the Kafka Connect process.
|[[sqlserver-property-table-whitelist]]<<sqlserver-property-table-whitelist, `table.whitelist`>>
|[[sqlserver-property-table-whitelist]]<<sqlserver-property-table-whitelist [[sqlserver-property-table-include-list]]<<sqlserver-property-table-include-list , `table.include.list`>>
|
|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be monitored; any table not included in the whitelist is excluded from monitoring. Each identifier is of the form _schemaName_._tableName_. By default the connector will monitor every non-system table in each monitored schema. May not be used with `table.blacklist`.
|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be monitored; any table not included in `table.include.list` is excluded from monitoring. Each identifier is of the form _schemaName_._tableName_. By default the connector will monitor every non-system table in each monitored schema.
Must not be used with `table.exclude.list`.
|[[sqlserver-property-table-blacklist]]<<sqlserver-property-table-blacklist, `table.blacklist`>>
|[[sqlserver-property-table-blacklist]]<<sqlserver-property-table-blacklist [[sqlserver-property-table-exclude-list]]<<sqlserver-property-table-exclude-list, `table.exclude.list`>>
|
|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be excluded from monitoring; any table not included in the blacklist is monitored.
Each identifier is of the form _schemaName_._tableName_. May not be used with `table.whitelist`.
|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be excluded from monitoring; any table not included in `table.exclude.list` is monitored.
Each identifier is of the form _schemaName_._tableName_. Must not be used with `table.include.list`.
|[[sqlserver-property-column-blacklist]]<<sqlserver-property-column-blacklist, `column.blacklist`>>
|[[sqlserver-property-column-blacklist]]<<sqlserver-property-column-blacklist [[sqlserver-property-column-exclude-list]]<<sqlserver-property-column-exclude-list , `column.exclude.list`>>
|_empty string_
|An optional comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event message values.
Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.
Note that primary key columns are always included in the event's key, also if blacklisted from the value.
Note that primary key columns are always included in the event's key, also if excluded from the value.
|[[sqlserver-property-column-mask-hash]]<<sqlserver-property-column-mask-hash, `column.mask.hash._hashAlgorithm_.with.salt._salt_`>>
|_n/a_

View File

@ -173,7 +173,7 @@ The first three privileges are required when reading a consistent snapshot of th
The last two privileges allow the database to read the server's binlog that is normally used for MySQL replication.
The configuration also includes a numeric identifier for the `server.id`.
Since MySQL's binlog is part of the MySQL replication mechanism, in order to read the binlog the `MySqlConnector` instance must join the MySQL server group, and that means this server ID must be https://dev.mysql.com/doc/refman/5.7/en/replication-howto-masterbaseconfig.html[unique within all processes that make up the MySQL server group] and is any integer between 1 and 2^32^-1.
Since MySQL's binlog is part of the MySQL replication mechanism, in order to read the binlog the `MySqlConnector` instance must join the MySQL server group, and that means this server ID must be https://dev.mysql.com/doc/refman/{mysql-version}/en/replication-howto-masterbaseconfig.html[unique within all processes that make up the MySQL server group] and is any integer between 1 and 2^32^-1.
In our code we set it to a fairly large but somewhat random value we'll use only for our application.
The configuration also specifies a logical name for the MySQL server.

View File

@ -22,7 +22,7 @@ The actual change data capture feature of {prodname} is amended with a range of
* *Snapshots:* optionally, an initial snapshot of a database's current state can be taken if a connector gets started up and not all logs still exist (typically the case when the database has been running for some time and has discarded any transaction logs not needed any longer for transaction recovery or replication);
different modes exist for snapshotting, refer to the docs of the specific connector you're using to learn more
* *Filters:* the set of captured schemas, tables and columns can be configured via whitelist/blacklist filters
* *Filters:* the set of captured schemas, tables and columns can be configured via include/exclude list filters
* *Masking:* the values from specific columns can be masked, e.g. for sensitive data
* *Monitoring:* most connectors can be monitored using JMX
* Different ready-to-use *message transformations*:

View File

@ -80,7 +80,7 @@ debezium.source.database.user=postgres
debezium.source.database.password=postgres
debezium.source.database.dbname=postgres
debezium.source.database.server.name=tutorial
debezium.source.schema.whitelist=inventory
debezium.source.schema.include.list=inventory
----
When the server is started it generates a seqeunce of log messages like this:
@ -168,7 +168,7 @@ __ ____ __ _____ ___ __ ____ ______
2020-05-15 11:33:12,839 INFO [io.deb.con.com.BaseSourceTask] (pool-3-thread-1) name = kinesis
2020-05-15 11:33:12,839 INFO [io.deb.con.com.BaseSourceTask] (pool-3-thread-1) database.server.name = tutorial
2020-05-15 11:33:12,839 INFO [io.deb.con.com.BaseSourceTask] (pool-3-thread-1) database.port = 5432
2020-05-15 11:33:12,839 INFO [io.deb.con.com.BaseSourceTask] (pool-3-thread-1) schema.whitelist = inventory
2020-05-15 11:33:12,839 INFO [io.deb.con.com.BaseSourceTask] (pool-3-thread-1) schema.include.list = inventory
2020-05-15 11:33:12,908 INFO [io.quarkus] (main) debezium-server 1.2.0-SNAPSHOT (powered by Quarkus 1.4.1.Final) started in 1.198s. Listening on: http://0.0.0.0:8080
2020-05-15 11:33:12,911 INFO [io.quarkus] (main) Profile prod activated.
2020-05-15 11:33:12,911 INFO [io.quarkus] (main) Installed features: [cdi, smallrye-health]

View File

@ -154,7 +154,7 @@ Here, we set the name of the host machine and port number where the MySQL databa
The first three privileges are required when reading a consistent snapshot of the databases. The last two privileges allow the database to read the server's binlog that is normally used for MySQL replication.
The configuration also includes a numeric identifier for the `server.id`. Since MySQL's binlog is part of the MySQL replication mechanism, in order to read the binlog the `MySqlConnector` instance must join the MySQL server group, and that means this server ID must be https://dev.mysql.com/doc/refman/5.7/en/replication-howto-masterbaseconfig.html[unique within all processes that make up the MySQL server group] and is any integer between 1 and 2^32^-1. In our code we set it to a fairly large but somewhat random value we'll use only for our application.
The configuration also includes a numeric identifier for the `server.id`. Since MySQL's binlog is part of the MySQL replication mechanism, in order to read the binlog the `MySqlConnector` instance must join the MySQL server group, and that means this server ID must be https://dev.mysql.com/doc/refman/{mysql-version}/en/replication-howto-masterbaseconfig.html[unique within all processes that make up the MySQL server group] and is any integer between 1 and 2^32^-1. In our code we set it to a fairly large but somewhat random value we'll use only for our application.
The configuration also specifies a logical name for the MySQL server. The connector includes this logical name within the topic field of every source record it produces, enabling your application to discern the origin of those records. Our example uses a server name of "products", presumably because the database contains product information. Of course, you can name this anything meaningful to your application.

View File

@ -142,7 +142,7 @@ oc exec -i -c kafka broker-kafka-0 -- curl -X POST \
"database.password": "dbz",
"database.server.id": "184054",
"database.server.name": "dbserver1",
"database.whitelist": "inventory",
"database.include.list": "inventory",
"database.history.kafka.bootstrap.servers": "broker-kafka-bootstrap:9092",
"database.history.kafka.topic": "schema-changes.inventory"
}

View File

@ -16,7 +16,7 @@
|`NumberOfEventsFiltered`
|`long`
| The number of events that have been filtered by whitelist or blacklist filtering rules configured on the connector.
| The number of events that have been filtered by include/exclude list filtering rules configured on the connector.
|`MonitoredTables`
|`string[]`

View File

@ -16,7 +16,7 @@
|`NumberOfEventsFiltered`
|`long`
|The number of events that have been filtered by whitelist or blacklist filtering rules configured on the connector.
|The number of events that have been filtered by include/exclude list filtering rules configured on the connector.
|`MonitoredTables`
|`string[]`

View File

@ -12,7 +12,7 @@ When the connector restarts after having crashed or been stopped gracefully, the
This database history topic is for connector use only. The connector can optionally generate schema change events on a different topic that is intended for consumer applications. This is described in {link-prefix}:{link-mysql-connector}#how-the-mysql-connector-handles-schema-change-topics_{context}[how the MySQL connector handles schema change topics].
When the MySQL connector captures changes in a table to which a schema change tool such as `gh-ost` or `pt-online-schema-change` is applied then helper tables created during the migration process need to be included among whitelisted tables.
When the MySQL connector captures changes in a table to which a schema change tool such as `gh-ost` or `pt-online-schema-change` is applied then helper tables created during the migration process need to be included among included tables.
If downstream systems do not need the messages generated by the temporary table then a simple message transform can be written and applied to filter them out.

View File

@ -31,7 +31,7 @@ TIP: For a complete list of configuration properties, see {link-prefix}:{link-my
"database.password": "debezium-user-pw", <6>
"database.server.id": "184054", <7>
"database.server.name": "fullfillment", <8>
"database.whitelist": "inventory", <9>
"database.include.list": "inventory", <9>
"database.history.kafka.bootstrap.servers": "kafka:9092", <10>
"database.history.kafka.topic": "dbhistory.fullfillment", <11>
"include.schema.changes": "true" <12>
@ -90,7 +90,7 @@ TIP: For a complete list of configuration properties, see {link-prefix}:{link-my
database.password: dbz
database.server.id: 184054 // <5>
database.server.name: dbserver1 // <5>
database.whitelist: inventory // <6>
database.include.list: inventory // <6>
database.history.kafka.bootstrap.servers: my-cluster-kafka-bootstrap:9092 // <7>
database.history.kafka.topic: schema-changes.inventory // <7>
----

View File

@ -4,7 +4,7 @@
[id="enable-mysql-gtids-for-cdc_{context}"]
= Enabling MySQL Global Transaction Identifiers for {prodname}
Global transaction identifiers (GTIDs) uniquely identify transactions that occur on a server within a cluster. Though not required for the {prodname} MySQL connector, using GTIDs simplifies replication and allows you to more easily confirm if master and slave servers are consistent.
Global transaction identifiers (GTIDs) uniquely identify transactions that occur on a server within a cluster. Though not required for the {prodname} MySQL connector, using GTIDs simplifies replication and allows you to more easily confirm if primary and replica servers are consistent.
NOTE: GTIDs are only available from MySQL 5.6.5 and later. See the link:https://dev.mysql.com/doc/refman/{mysql-version}/en/replication-options-gtids.html#option_mysqld_gtid-mode[MySQL documentation] for more details.

View File

@ -54,23 +54,27 @@ Only alphanumeric characters and underscores should be used.
|
|A list of host/port pairs that the connector will use for establishing an initial connection to the Kafka cluster. This connection will be used for retrieving database schema history previously stored by the connector, and for writing each DDL statement read from the source database. This should point to the same Kafka cluster used by the Kafka Connect process.
|[[mysql-property-database-whitelist]]<<mysql-property-database-whitelist, `database.whitelist`>>
|[[mysql-property-database-whitelist]]<<mysql-property-database-whitelist [[mysql-property-database-include-list]]<<mysql-property-database-include-list, `database.include.list`>>
|_empty string_
|An optional comma-separated list of regular expressions that match database names to be monitored; any database name not included in the whitelist will be excluded from monitoring. By default all databases will be monitored. May not be used with `database.blacklist`.
|An optional comma-separated list of regular expressions that match database names to be monitored; any database name not included in `database.include.list` will be excluded from monitoring. By default all databases will be monitored.
Must not be used with `database.exclude.list`.
|[[mysql-property-database-blacklist]]<<mysql-property-database-blacklist, `database.blacklist`>>
|[[mysql-property-database-blacklist]]<<mysql-property-database-blacklist[[mysql-property-database-exclude-list]]<<mysql-property-database-exclude-list, `database.exclude.list`>>
|_empty string_
|An optional comma-separated list of regular expressions that match database names to be excluded from monitoring; any database name not included in the blacklist will be monitored. May not be used with `database.whitelist`.
|An optional comma-separated list of regular expressions that match database names to be excluded from monitoring; any database name not included in `database.exclude.list` will be monitored.
Must not be used with `database.include.list`.
|[[mysql-property-table-whitelist]]<<mysql-property-table-whitelist, `table.whitelist`>>
|[[mysql-property-table-whitelist]]<<mysql-property-table-whitelist [[mysql-property-table-include-list]]<<mysql-property-table-include-list, `table.include.list`>>
|_empty string_
|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be monitored; any table not included in the whitelist will be excluded from monitoring. Each identifier is of the form _databaseName_._tableName_. By default the connector will monitor every non-system table in each monitored database. May not be used with `table.blacklist`.
|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be monitored; any table not included in `table.include.list` will be excluded from monitoring. Each identifier is of the form _databaseName_._tableName_. By default the connector will monitor every non-system table in each monitored database.
Must not be used with `table.exclude.list`.
|[[mysql-property-table-blacklist]]<<mysql-property-table-blacklist, `table.blacklist`>>
|[[mysql-property-table-blacklist]]<<mysql-property-table-blacklist [[mysql-property-table-exclude-list]]<<mysql-property-table-exclude-list , `table.exclude.list`>>
|_empty string_
|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be excluded from monitoring; any table not included in the blacklist will be monitored. Each identifier is of the form _databaseName_._tableName_. May not be used with `table.whitelist`.
|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be excluded from monitoring; any table not included in `table.exclude.list` will be monitored. Each identifier is of the form _databaseName_._tableName_.
Must not be used with `table.include.list`.
|[[mysql-property-column-blacklist]]<<mysql-property-column-blacklist, `column.blacklist`>>
|[[mysql-property-column-blacklist]]<<mysql-property-column-blacklist [[mysql-property-column-exclude-list]]<<mysql-property-column-exclude-list, `column.exclude.list`>>
|_empty string_
|An optional comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event message values. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_.
@ -138,7 +142,7 @@ or `connect` always represents time and timestamp values using Kafka Connect's b
|`false`
|Boolean value that specifies whether the connector should include the original SQL query that generated the change event. +
Note: This option requires MySQL be configured with the binlog_rows_query_log_events option set to ON. Query will not be present for events generated from the snapshot process. +
WARNING: Enabling this option may expose tables or fields explicitly blacklisted or masked by including the original SQL statement in the change event. For this reason this option is defaulted to 'false'.
WARNING: Enabling this option may expose tables or fields explicitly excluded or masked by including the original SQL statement in the change event. For this reason this option is defaulted to 'false'.
|[[mysql-property-event-processing-failure-handling-mode]]<<mysql-property-event-processing-failure-handling-mode, `event.processing{zwsp}.failure.handling.mode`>>
|`fail`
@ -172,18 +176,20 @@ WARNING: Enabling this option may expose tables or fields explicitly blacklisted
|[[mysql-property-gtid-source-includes]]<<mysql-property-gtid-source-includes, `gtid.source.includes`>>
|
|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources matching one of these include patterns will be used. May not be used with `gtid.source.excludes`.
|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources matching one of these include patterns will be used.
Must not be used with `gtid.source.excludes`.
|[[mysql-property-gtid-source-excludes]]<<mysql-property-gtid-source-excludes, `gtid.source.excludes`>>
|
|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources matching none of these exclude patterns will be used. May not be used with `gtid.source.includes`.
|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources matching none of these exclude patterns will be used.
Must not be used with `gtid.source.includes`.
ifdef::community[]
// Do not include deprecated content in downstream doc
|[[mysql-property-gtid-new-channel-position]]<<mysql-property-gtid-new-channel-position, `gtid.new.channel.position`>> +
_deprecated and scheduled for removal_
|`earliest`
| When set to `latest`, when the connector sees a new GTID channel, it will start consuming from the last executed transaction in that GTID channel. If set to `earliest` (default), the connector starts reading that channel from the first available (not purged) GTID position. `earliest` is useful when you have a active-passive MySQL setup where {prodname} is connected to master, in this case during failover the slave with new UUID (and GTID channel) starts receiving writes before {prodname} is connected. These writes would be lost when using `latest`.
| When set to `latest`, when the connector sees a new GTID channel, it will start consuming from the last executed transaction in that GTID channel. If set to `earliest` (default), the connector starts reading that channel from the first available (not purged) GTID position. `earliest` is useful when you have a active-passive MySQL setup where {prodname} is connected to the primary server, in this case during failover the replica with new UUID (and GTID channel) starts receiving writes before {prodname} is connected. These writes would be lost when using `latest`.
endif::community[]
|[[mysql-property-tombstones-on-delete]]<<mysql-property-tombstones-on-delete, `tombstones.on.delete`>>
@ -219,7 +225,7 @@ The following table describes {link-prefix}:{link-mysql-connector}#advanced-mysq
|[[mysql-property-table-ignore-builtin]]<<mysql-property-table-ignore-builtin, `table.ignore.builtin`>>
|`true`
|Boolean value that specifies whether built-in system tables should be ignored. This applies regardless of the table whitelist or blacklists. By default system tables are excluded from monitoring, and no events are generated when changes are made to any of the system tables.
|Boolean value that specifies whether built-in system tables should be ignored. This applies regardless of the table include or exclude lists. By default system tables are excluded from monitoring, and no events are generated when changes are made to any of the system tables.
|[[mysql-property-database-history-kafka-recovery-poll-interval-ms]]<<mysql-property-database-history-kafka-recovery-poll-interval-ms, `database.history.kafka.recovery.poll.interval.ms`>>
|`100`

View File

@ -9,18 +9,18 @@ The {prodname} MySQL connector supports the following MySQL topologies:
Standalone::
When a single MySQL server is used, the server must have the binlog enabled (_and optionally GTIDs enabled_) so the {prodname} MySQL connector can monitor the server. This is often acceptable, since the binary log can also be used as an incremental link:https://dev.mysql.com/doc/refman/{mysql-version}/en/backup-methods.html[backup]. In this case, the MySQL connector always connects to and follows this standalone MySQL server instance.
Master and slave::
The {prodname} MySQL connector can follow one of the masters or one of the slaves (_if that slave has its binlog enabled_), but the connector only sees changes in the cluster that are visible to that server. Generally, this is not a problem except for the multi-master topologies.
Primary and replica::
The {prodname} MySQL connector can follow one of the primary servers or one of the replicas (_if that replica has its binlog enabled_), but the connector only sees changes in the cluster that are visible to that server. Generally, this is not a problem except for the multi-primary topologies.
+
The connector records its position in the servers binlog, which is different on each server in the cluster. Therefore, the connector will need to follow just one MySQL server instance. If that server fails, it must be restarted or recovered before the connector can continue.
High available clusters::
A variety of link:https://dev.mysql.com/doc/mysql-ha-scalability/en/[high availability] solutions exist for MySQL, and they make it far easier to tolerate and almost immediately recover from problems and failures. Most HA MySQL clusters use GTIDs so that slaves are able to keep track of all changes on any of the master.
A variety of link:https://dev.mysql.com/doc/mysql-ha-scalability/en/[high availability] solutions exist for MySQL, and they make it far easier to tolerate and almost immediately recover from problems and failures. Most HA MySQL clusters use GTIDs so that replicas are able to keep track of all changes on any of the primary servers.
Multi-master::
A link:https://dev.mysql.com/doc/refman/{mysql-version}/en/mysql-cluster-replication-multi-master.html[multi-master MySQL topology] uses one or more MySQL slaves that each replicate from multiple masters. This is a powerful way to aggregate the replication of multiple MySQL clusters, and requires using GTIDs.
Multi-primary::
A link: https://dev.mysql.com/doc/refman/{mysql-version}/en/mysql-cluster-replication-multi-source.html[NDB Cluster Replication] uses one or more MySQL replica nodes that each replicate from multiple primary servers. This is a powerful way to aggregate the replication of multiple MySQL clusters, and requires using GTIDs.
+
The {prodname} MySQL connector can use these multi-master MySQL slaves as sources, and can fail over to different multi-master MySQL slaves as long as thew new slave is caught up to the old slave (_e.g., the new slave has all of the transactions that were last seen on the first slave_). This works even if the connector is only using a subset of databases and/or tables, as the connector can be configured to include or exclude specific GTID sources when attempting to reconnect to a new multi-master MySQL slave and find the correct position in the binlog.
The {prodname} MySQL connector can use these multi-primary MySQL replicas as sources, and can fail over to different multi-primary MySQL replicas as long as thew new replica is caught up to the old replica (_e.g., the new replica has all the transactions that were last seen on the first replica_). This works even if the connector is only using a subset of databases and/or tables, as the connector can be configured to include or exclude specific GTID sources when attempting to reconnect to a new multi-primary MySQL replica and find the correct position in the binlog.
Hosted::
There is support for the {prodname} MySQL connector to use hosted options such as Amazon RDS and Amazon Aurora.

View File

@ -42,7 +42,7 @@ you will register the following connector:
"database.password": "dbz",
"database.server.id": "184054", // <5>
"database.server.name": "dbserver1", // <5>
"database.whitelist": "inventory", // <6>
"database.include.list": "inventory", // <6>
"database.history.kafka.bootstrap.servers": "kafka:9092", // <7>
"database.history.kafka.topic": "schema-changes.inventory" // <7>
}
@ -80,7 +80,7 @@ replace `localhost` with the IP address of of your Docker host.
[source,shell,options="nowrap"]
----
$ curl -i -X POST -H "Accept:application/json" -H "Content-Type:application/json" localhost:8083/connectors/ -d '{ "name": "inventory-connector", "config": { "connector.class": "io.debezium.connector.mysql.MySqlConnector", "tasks.max": "1", "database.hostname": "mysql", "database.port": "3306", "database.user": "debezium", "database.password": "dbz", "database.server.id": "184054", "database.server.name": "dbserver1", "database.whitelist": "inventory", "database.history.kafka.bootstrap.servers": "kafka:9092", "database.history.kafka.topic": "dbhistory.inventory" } }'
$ curl -i -X POST -H "Accept:application/json" -H "Content-Type:application/json" localhost:8083/connectors/ -d '{ "name": "inventory-connector", "config": { "connector.class": "io.debezium.connector.mysql.MySqlConnector", "tasks.max": "1", "database.hostname": "mysql", "database.port": "3306", "database.user": "debezium", "database.password": "dbz", "database.server.id": "184054", "database.server.name": "dbserver1", "database.include.list": "inventory", "database.history.kafka.bootstrap.servers": "kafka:9092", "database.history.kafka.topic": "dbhistory.inventory" } }'
----
ifdef::windows[]
@ -91,7 +91,7 @@ For example:
[source,shell,options="nowrap"]
----
$ curl -i -X POST -H "Accept:application/json" -H "Content-Type:application/json" localhost:8083/connectors/ -d '{ \"name\": \"inventory-connector\", \"config\": { \"connector.class\": \"io.debezium.connector.mysql.MySqlConnector\", \"tasks.max\": \"1\", \"database.hostname\": \"mysql\", \"database.port\": \"3306\", \"database.user\": \"debezium\", \"database.password\": \"dbz\", \"database.server.id\": \"184054\", \"database.server.name\": \"dbserver1\", \"database.whitelist\": \"inventory\", \"database.history.kafka.bootstrap.servers\": \"kafka:9092\", \"database.history.kafka.topic\": \"dbhistory.inventory\" } }'
$ curl -i -X POST -H "Accept:application/json" -H "Content-Type:application/json" localhost:8083/connectors/ -d '{ \"name\": \"inventory-connector\", \"config\": { \"connector.class\": \"io.debezium.connector.mysql.MySqlConnector\", \"tasks.max\": \"1\", \"database.hostname\": \"mysql\", \"database.port\": \"3306\", \"database.user\": \"debezium\", \"database.password\": \"dbz\", \"database.server.id\": \"184054\", \"database.server.name\": \"dbserver1\", \"database.include.list\": \"inventory\", \"database.history.kafka.bootstrap.servers\": \"kafka:9092\", \"database.history.kafka.topic\": \"dbhistory.inventory\" } }'
----
Otherwise, you might see an error like the following:

View File

@ -126,7 +126,7 @@ Next, the connector reports the nine steps that make up the snapshot operation:
2017-09-21 07:24:01,628 INFO MySQL|dbserver1|snapshot Step 0: disabling autocommit and enabling repeatable read transactions [io.debezium.connector.mysql.SnapshotReader]
2017-09-21 07:24:01,631 INFO MySQL|dbserver1|snapshot Step 1: start transaction with consistent snapshot [io.debezium.connector.mysql.SnapshotReader]
2017-09-21 07:24:01,634 INFO MySQL|dbserver1|snapshot Step 2: flush and obtain global read lock to prevent writes to database [io.debezium.connector.mysql.SnapshotReader]
2017-09-21 07:24:01,636 INFO MySQL|dbserver1|snapshot Step 3: read binlog position of MySQL master [io.debezium.connector.mysql.SnapshotReader]
2017-09-21 07:24:01,636 INFO MySQL|dbserver1|snapshot Step 3: read binlog position of MySQL primary server [io.debezium.connector.mysql.SnapshotReader]
2017-09-21 07:24:01,638 INFO MySQL|dbserver1|snapshot using binlog 'mysql-bin.000003' at position '154' and gtid '' [io.debezium.connector.mysql.SnapshotReader]
2017-09-21 07:24:01,638 INFO MySQL|dbserver1|snapshot Step 4: read list of available databases [io.debezium.connector.mysql.SnapshotReader]
2017-09-21 07:24:01,638 INFO MySQL|dbserver1|snapshot list of available databases is: [information_schema, inventory, mysql, performance_schema, sys] [io.debezium.connector.mysql.SnapshotReader]

View File

@ -432,7 +432,7 @@ node('Slave') {
"database.password": "dbz",
"database.server.id": "184054",
"database.server.name": "dbserver1",
"database.whitelist": "inventory",
"database.include.list": "inventory",
"database.history.kafka.bootstrap.servers": "kafka:9092",
"database.history.kafka.topic": "schema-changes.inventory"
}