DBZ-2432: Fixing misuse of assertj

- Some usages of assertj was not callig the actual assert of the DSL, fixing it
- Changing the Assertions to be staticlly imported
This commit is contained in:
Marcelo Avancini 2022-12-04 21:47:18 -03:00 committed by Jiri Pechanec
parent a7b561efad
commit 7754163c5d
62 changed files with 889 additions and 866 deletions

View File

@ -5,10 +5,11 @@
*/ */
package io.debezium.spi; package io.debezium.spi;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Properties; import java.util.Properties;
import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.SchemaBuilder;
import org.assertj.core.api.Assertions;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -92,15 +93,15 @@ public void before() {
public void matchingField() { public void matchingField() {
testConverter.configure(new Properties()); testConverter.configure(new Properties());
testConverter.converterFor(new BasicField("myfield", "db1.table1", "VARCHAR2(30)"), testRegistration); testConverter.converterFor(new BasicField("myfield", "db1.table1", "VARCHAR2(30)"), testRegistration);
Assertions.assertThat(testRegistration.fieldSchema.name()).isEqualTo("CUSTOM_STRING"); assertThat(testRegistration.fieldSchema.name()).isEqualTo("CUSTOM_STRING");
Assertions.assertThat(testRegistration.converter.convert(34)).isEqualTo("34"); assertThat(testRegistration.converter.convert(34)).isEqualTo("34");
} }
@Test @Test
public void nonMatchingField() { public void nonMatchingField() {
testConverter.configure(new Properties()); testConverter.configure(new Properties());
testConverter.converterFor(new BasicField("wrongfield", "db1.table1", "VARCHAR2(30)"), testRegistration); testConverter.converterFor(new BasicField("wrongfield", "db1.table1", "VARCHAR2(30)"), testRegistration);
Assertions.assertThat(testRegistration.fieldSchema).isNull(); assertThat(testRegistration.fieldSchema).isNull();
} }
@Test @Test
@ -109,10 +110,10 @@ public void configuredField() {
props.setProperty("field", "otherfield"); props.setProperty("field", "otherfield");
testConverter.configure(props); testConverter.configure(props);
testConverter.converterFor(new BasicField("myfield", "db1.table1", "VARCHAR2(30)"), testRegistration); testConverter.converterFor(new BasicField("myfield", "db1.table1", "VARCHAR2(30)"), testRegistration);
Assertions.assertThat(testRegistration.fieldSchema).isNull(); assertThat(testRegistration.fieldSchema).isNull();
testConverter.converterFor(new BasicField("otherfield", "db1.table1", "VARCHAR2(30)"), testRegistration); testConverter.converterFor(new BasicField("otherfield", "db1.table1", "VARCHAR2(30)"), testRegistration);
Assertions.assertThat(testRegistration.fieldSchema.name()).isEqualTo("CUSTOM_STRING"); assertThat(testRegistration.fieldSchema.name()).isEqualTo("CUSTOM_STRING");
Assertions.assertThat(testRegistration.converter.convert(34)).isEqualTo("34"); assertThat(testRegistration.converter.convert(34)).isEqualTo("34");
} }
} }

View File

@ -5,6 +5,9 @@
*/ */
package io.debezium.connector.mongodb; package io.debezium.connector.mongodb;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.entry;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -23,7 +26,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.bson.Document; import org.bson.Document;
import org.junit.After; import org.junit.After;
@ -279,7 +281,7 @@ protected <V> Map<Integer, V> consumeMixedWithIncrementalSnapshot(int recordCoun
final List<SourceRecord> dataRecords = records.recordsForTopic(topicName); final List<SourceRecord> dataRecords = records.recordsForTopic(topicName);
if (records.allRecordsInOrder().isEmpty()) { if (records.allRecordsInOrder().isEmpty()) {
noRecords++; noRecords++;
Assertions.assertThat(noRecords).describedAs(String.format("Too many no data record results, %d < %d", dbChanges.size(), recordCount)) assertThat(noRecords).describedAs(String.format("Too many no data record results, %d < %d", dbChanges.size(), recordCount))
.isLessThanOrEqualTo(MAXIMUM_NO_RECORDS_CONSUMES); .isLessThanOrEqualTo(MAXIMUM_NO_RECORDS_CONSUMES);
continue; continue;
} }
@ -302,7 +304,7 @@ protected <V> Map<Integer, V> consumeMixedWithIncrementalSnapshot(int recordCoun
} }
} }
Assertions.assertThat(dbChanges).hasSize(recordCount); assertThat(dbChanges).hasSize(recordCount);
return dbChanges; return dbChanges;
} }
@ -342,7 +344,7 @@ public void snapshotOnly() throws Exception {
final int expectedRecordCount = ROW_COUNT; final int expectedRecordCount = ROW_COUNT;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -358,7 +360,7 @@ public void invalidTablesInTheList() throws Exception {
final int expectedRecordCount = ROW_COUNT; final int expectedRecordCount = ROW_COUNT;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -392,7 +394,7 @@ public void snapshotOnlyWithRestart() throws Exception {
} }
}); });
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -410,7 +412,7 @@ public void inserts() throws Exception {
final int expectedRecordCount = ROW_COUNT * 2; final int expectedRecordCount = ROW_COUNT * 2;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -429,7 +431,7 @@ public void updates() throws Exception {
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount,
x -> x.getValue() >= 2000, null); x -> x.getValue() >= 2000, null);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i + 2000)); assertThat(dbChanges).contains(entry(i + 1, i + 2000));
} }
} }
@ -465,7 +467,7 @@ public void updatesWithRestart() throws Exception {
} }
}); });
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i + 2000)); assertThat(dbChanges).contains(entry(i + 1, i + 2000));
} }
} }
@ -484,7 +486,7 @@ public void updatesLargeChunk() throws Exception {
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount,
x -> x.getValue() >= 2000, null); x -> x.getValue() >= 2000, null);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i + 2000)); assertThat(dbChanges).contains(entry(i + 1, i + 2000));
} }
} }
@ -508,7 +510,7 @@ public void stopCurrentIncrementalSnapshotWithoutCollectionsAndTakeNewNewIncreme
// Consume any residual left-over events after stopping incremental snapshots such as open/close // Consume any residual left-over events after stopping incremental snapshots such as open/close
// and wait for the stop message in the connector logs // and wait for the stop message in the connector logs
Assertions.assertThat(consumeAnyRemainingIncrementalSnapshotEventsAndCheckForStopMessage( assertThat(consumeAnyRemainingIncrementalSnapshotEventsAndCheckForStopMessage(
interceptor, "Stopping incremental snapshot")).isTrue(); interceptor, "Stopping incremental snapshot")).isTrue();
// stop the connector // stop the connector
@ -517,7 +519,7 @@ public void stopCurrentIncrementalSnapshotWithoutCollectionsAndTakeNewNewIncreme
// restart the connector // restart the connector
// should start with no available records, should not have any incremental snapshot state // should start with no available records, should not have any incremental snapshot state
startConnector(); startConnector();
Assertions.assertThat(interceptor.containsMessage("No incremental snapshot in progress")).isTrue(); assertThat(interceptor.containsMessage("No incremental snapshot in progress")).isTrue();
sendAdHocSnapshotSignal(); sendAdHocSnapshotSignal();
@ -526,7 +528,7 @@ public void stopCurrentIncrementalSnapshotWithoutCollectionsAndTakeNewNewIncreme
final int expectedRecordCount = ROW_COUNT * 2; final int expectedRecordCount = ROW_COUNT * 2;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -550,7 +552,7 @@ public void stopCurrentIncrementalSnapshotWithAllCollectionsAndTakeNewNewIncreme
// Consume any residual left-over events after stopping incremental snapshots such as open/close // Consume any residual left-over events after stopping incremental snapshots such as open/close
// and wait for the stop message in the connector logs // and wait for the stop message in the connector logs
Assertions.assertThat(consumeAnyRemainingIncrementalSnapshotEventsAndCheckForStopMessage( assertThat(consumeAnyRemainingIncrementalSnapshotEventsAndCheckForStopMessage(
interceptor, "Removing '[" + fullDataCollectionName() + "]' collections from incremental snapshot")).isTrue(); interceptor, "Removing '[" + fullDataCollectionName() + "]' collections from incremental snapshot")).isTrue();
// stop the connector // stop the connector
@ -559,7 +561,7 @@ public void stopCurrentIncrementalSnapshotWithAllCollectionsAndTakeNewNewIncreme
// restart the connector // restart the connector
// should start with no available records, should not have any incremental snapshot state // should start with no available records, should not have any incremental snapshot state
startConnector(); startConnector();
Assertions.assertThat(interceptor.containsMessage("No incremental snapshot in progress")).isTrue(); assertThat(interceptor.containsMessage("No incremental snapshot in progress")).isTrue();
sendAdHocSnapshotSignal(); sendAdHocSnapshotSignal();
@ -568,7 +570,7 @@ public void stopCurrentIncrementalSnapshotWithAllCollectionsAndTakeNewNewIncreme
final int expectedRecordCount = ROW_COUNT * 2; final int expectedRecordCount = ROW_COUNT * 2;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -584,7 +586,7 @@ public void removeNotYetCapturedCollectionFromInProgressIncrementalSnapshot() th
startConnector(x -> x.with(CommonConnectorConfig.INCREMENTAL_SNAPSHOT_CHUNK_SIZE, 250)); startConnector(x -> x.with(CommonConnectorConfig.INCREMENTAL_SNAPSHOT_CHUNK_SIZE, 250));
final List<String> collectionIds = fullDataCollectionNames(); final List<String> collectionIds = fullDataCollectionNames();
Assertions.assertThat(collectionIds).hasSize(2); assertThat(collectionIds).hasSize(2);
final String collectionIdToRemove = collectionIds.get(1); final String collectionIdToRemove = collectionIds.get(1);
@ -601,7 +603,7 @@ public void removeNotYetCapturedCollectionFromInProgressIncrementalSnapshot() th
final int expectedRecordCount = ROW_COUNT * 2; final int expectedRecordCount = ROW_COUNT * 2;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, topicName()); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, topicName());
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -617,10 +619,10 @@ public void removeStartedCapturedCollectionFromInProgressIncrementalSnapshot() t
startConnector(x -> x.with(CommonConnectorConfig.INCREMENTAL_SNAPSHOT_CHUNK_SIZE, 250)); startConnector(x -> x.with(CommonConnectorConfig.INCREMENTAL_SNAPSHOT_CHUNK_SIZE, 250));
final List<String> collectionIds = fullDataCollectionNames(); final List<String> collectionIds = fullDataCollectionNames();
Assertions.assertThat(collectionIds).hasSize(2); assertThat(collectionIds).hasSize(2);
final List<String> topicNames = topicNames(); final List<String> topicNames = topicNames();
Assertions.assertThat(topicNames).hasSize(2); assertThat(topicNames).hasSize(2);
final String collectionIdToRemove = collectionIds.get(0); final String collectionIdToRemove = collectionIds.get(0);
@ -637,7 +639,7 @@ public void removeStartedCapturedCollectionFromInProgressIncrementalSnapshot() t
final int expectedRecordCount = ROW_COUNT * 2; final int expectedRecordCount = ROW_COUNT * 2;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, topicNames.get(1)); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, topicNames.get(1));
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -671,7 +673,7 @@ public void pauseDuringSnapshot() throws Exception {
final int expectedRecordCount = ROW_COUNT; final int expectedRecordCount = ROW_COUNT;
Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount - beforeResume); Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount - beforeResume);
for (int i = beforeResume + 1; i < expectedRecordCount; i++) { for (int i = beforeResume + 1; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }

View File

@ -5,6 +5,7 @@
*/ */
package io.debezium.connector.mongodb; package io.debezium.connector.mongodb;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.util.Arrays; import java.util.Arrays;
@ -15,7 +16,6 @@
import java.util.function.Consumer; import java.util.function.Consumer;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.assertj.core.api.Assertions;
import org.bson.BsonDocument; import org.bson.BsonDocument;
import org.bson.BsonString; import org.bson.BsonString;
import org.bson.Document; import org.bson.Document;
@ -129,9 +129,9 @@ public static String captureMode() {
public static void assertChangeStreamUpdate(ObjectId oid, Struct value, String after, List<String> removedFields, public static void assertChangeStreamUpdate(ObjectId oid, Struct value, String after, List<String> removedFields,
String updatedFields) { String updatedFields) {
Assertions.assertThat(value.getString("after")).isEqualTo(after.replace("<OID>", oid.toHexString())); assertThat(value.getString("after")).isEqualTo(after.replace("<OID>", oid.toHexString()));
Assertions.assertThat(value.getStruct("updateDescription").getString("updatedFields")).isEqualTo(updatedFields); assertThat(value.getStruct("updateDescription").getString("updatedFields")).isEqualTo(updatedFields);
Assertions.assertThat(value.getStruct("updateDescription").getArray("removedFields")).isEqualTo(removedFields); assertThat(value.getStruct("updateDescription").getArray("removedFields")).isEqualTo(removedFields);
} }
public static void assertChangeStreamUpdateAsDocs(ObjectId oid, Struct value, String after, public static void assertChangeStreamUpdateAsDocs(ObjectId oid, Struct value, String after,
@ -139,27 +139,27 @@ public static void assertChangeStreamUpdateAsDocs(ObjectId oid, Struct value, St
Document expectedAfter = TestHelper.getDocumentWithoutLanguageVersion(after.replace("<OID>", oid.toHexString())); Document expectedAfter = TestHelper.getDocumentWithoutLanguageVersion(after.replace("<OID>", oid.toHexString()));
Document actualAfter = TestHelper Document actualAfter = TestHelper
.getDocumentWithoutLanguageVersion(value.getString("after")); .getDocumentWithoutLanguageVersion(value.getString("after"));
Assertions.assertThat(actualAfter).isEqualTo(expectedAfter); assertThat(actualAfter).isEqualTo(expectedAfter);
final String actualUpdatedFields = value.getStruct("updateDescription").getString("updatedFields"); final String actualUpdatedFields = value.getStruct("updateDescription").getString("updatedFields");
if (actualUpdatedFields != null) { if (actualUpdatedFields != null) {
Assertions.assertThat(updatedFields).isNotNull(); assertThat(updatedFields).isNotNull();
try { try {
Assertions.assertThat((Object) mapper.readTree(actualUpdatedFields)).isEqualTo(mapper.readTree(updatedFields)); assertThat((Object) mapper.readTree(actualUpdatedFields)).isEqualTo(mapper.readTree(updatedFields));
} }
catch (JsonProcessingException e) { catch (JsonProcessingException e) {
fail("Failed to parse JSON <" + actualUpdatedFields + "> or <" + updatedFields + ">"); fail("Failed to parse JSON <" + actualUpdatedFields + "> or <" + updatedFields + ">");
} }
} }
else { else {
Assertions.assertThat(updatedFields).isNull(); assertThat(updatedFields).isNull();
} }
final List<Object> actualRemovedFields = value.getStruct("updateDescription").getArray("removedFields"); final List<Object> actualRemovedFields = value.getStruct("updateDescription").getArray("removedFields");
if (actualRemovedFields != null) { if (actualRemovedFields != null) {
Assertions.assertThat(removedFields).isNotNull(); assertThat(removedFields).isNotNull();
Assertions.assertThat(actualRemovedFields.containsAll(removedFields) && removedFields.containsAll(actualRemovedFields)); assertThat(actualRemovedFields.containsAll(removedFields) && removedFields.containsAll(actualRemovedFields));
} }
else { else {
Assertions.assertThat(removedFields).isNull(); assertThat(removedFields).isNull();
} }
} }
} }

View File

@ -26,7 +26,6 @@
import org.apache.kafka.connect.errors.DataException; import org.apache.kafka.connect.errors.DataException;
import org.apache.kafka.connect.header.Header; import org.apache.kafka.connect.header.Header;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.bson.Document; import org.bson.Document;
import org.bson.RawBsonDocument; import org.bson.RawBsonDocument;
import org.bson.types.ObjectId; import org.bson.types.ObjectId;
@ -272,9 +271,9 @@ public void shouldSupportDbRef() throws InterruptedException, IOException {
validate(transformed); validate(transformed);
final Struct value = ((Struct) transformed.value()).getStruct("data"); final Struct value = ((Struct) transformed.value()).getStruct("data");
Assertions.assertThat(value.getString("_ref")).isEqualTo("a2"); assertThat(value.getString("_ref")).isEqualTo("a2");
Assertions.assertThat(value.getInt32("_id")).isEqualTo(4); assertThat(value.getInt32("_id")).isEqualTo(4);
Assertions.assertThat(value.getString("_db")).isEqualTo("b2"); assertThat(value.getString("_db")).isEqualTo("b2");
} }
@Test @Test
@ -306,8 +305,8 @@ public void shouldSupportSubSanitizeFieldName() throws InterruptedException, IOE
final SourceRecord transformed = transformation.apply(records.allRecordsInOrder().get(0)); final SourceRecord transformed = transformation.apply(records.allRecordsInOrder().get(0));
validate(transformed); validate(transformed);
final Struct metric = ((Struct) transformed.value()).getStruct("metrics").getStruct("metric__fct"); final Struct metric = ((Struct) transformed.value()).getStruct("metrics").getStruct("metric__fct");
Assertions.assertThat(metric.getInt32("min")).isEqualTo(0); assertThat(metric.getInt32("min")).isEqualTo(0);
Assertions.assertThat(metric.getInt32("max")).isEqualTo(1); assertThat(metric.getInt32("max")).isEqualTo(1);
} }
@Test @Test

View File

@ -6,6 +6,7 @@
package io.debezium.connector.mysql; package io.debezium.connector.mysql;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.entry; import static org.assertj.core.api.Assertions.entry;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
@ -24,7 +25,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -297,7 +297,7 @@ record -> {
LocalDate dt = dateTime.toLocalDate(); LocalDate dt = dateTime.toLocalDate();
LocalDate d = LocalDate.parse(String.format("%s-05-01", 2000 + i)); LocalDate d = LocalDate.parse(String.format("%s-05-01", 2000 + i));
LocalTime t = LocalTime.parse(String.format("0%s:00:00", i)); LocalTime t = LocalTime.parse(String.format("0%s:00:00", i));
Assertions.assertThat(dbChanges).contains(entry(i + 1, List.of(dt, d, t))); assertThat(dbChanges).contains(entry(i + 1, List.of(dt, d, t)));
} }
} }
@ -332,7 +332,7 @@ record -> {
}, },
DATABASE.topicForTable("a_date"), DATABASE.topicForTable("a_date"),
null); null);
Assertions.assertThat(dbChanges).contains(entry(1, Arrays.asList(0, null))); assertThat(dbChanges).contains(entry(1, Arrays.asList(0, null)));
assertFalse(logInterceptor.containsWarnMessage("Invalid length when read MySQL DATE value. BIN_LEN_DATE is 0.")); assertFalse(logInterceptor.containsWarnMessage("Invalid length when read MySQL DATE value. BIN_LEN_DATE is 0."));
} }
} }

View File

@ -28,7 +28,6 @@
import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.SchemaBuilder;
import org.assertj.core.api.Assertions;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -628,15 +627,15 @@ public void shouldHandleQuotes() {
assertThat(((MySqlAntlrDdlParser) parser).getParsingExceptionsFromWalker().size()).isEqualTo(0); assertThat(((MySqlAntlrDdlParser) parser).getParsingExceptionsFromWalker().size()).isEqualTo(0);
assertThat(tables.size()).isEqualTo(9); assertThat(tables.size()).isEqualTo(9);
Assertions.assertThat(tables.forTable(null, null, "mytable1")).isNotNull(); assertThat(tables.forTable(null, null, "mytable1")).isNotNull();
Assertions.assertThat(tables.forTable(null, null, "mytable2")).isNotNull(); assertThat(tables.forTable(null, null, "mytable2")).isNotNull();
Assertions.assertThat(tables.forTable("db", null, "mytable3")).isNotNull(); assertThat(tables.forTable("db", null, "mytable3")).isNotNull();
Assertions.assertThat(tables.forTable("db", null, "mytable4")).isNotNull(); assertThat(tables.forTable("db", null, "mytable4")).isNotNull();
Assertions.assertThat(tables.forTable("db", null, "mytable5")).isNotNull(); assertThat(tables.forTable("db", null, "mytable5")).isNotNull();
Assertions.assertThat(tables.forTable("db", null, "myta`ble6")).isNotNull(); assertThat(tables.forTable("db", null, "myta`ble6")).isNotNull();
Assertions.assertThat(tables.forTable("db", null, "mytable7`")).isNotNull(); assertThat(tables.forTable("db", null, "mytable7`")).isNotNull();
Assertions.assertThat(tables.forTable("`db", null, "mytable8")).isNotNull(); assertThat(tables.forTable("`db", null, "mytable8")).isNotNull();
Assertions.assertThat(tables.forTable("`db", null, "myta\"\"ble9")).isNotNull(); assertThat(tables.forTable("`db", null, "myta\"\"ble9")).isNotNull();
} }
@Test @Test
@ -665,17 +664,17 @@ public void shouldDropPrimaryKeyColumn() {
assertThat(tables.size()).isEqualTo(1); assertThat(tables.size()).isEqualTo(1);
Table table = tables.forTable(null, null, "mytable"); Table table = tables.forTable(null, null, "mytable");
Assertions.assertThat(table.primaryKeyColumnNames()).isEqualTo(Collections.singletonList("id")); assertThat(table.primaryKeyColumnNames()).isEqualTo(Collections.singletonList("id"));
parser.parse("ALTER TABLE mytable DROP COLUMN id", tables); parser.parse("ALTER TABLE mytable DROP COLUMN id", tables);
table = tables.forTable(null, null, "mytable"); table = tables.forTable(null, null, "mytable");
Assertions.assertThat(table.primaryKeyColumnNames()).isEmpty(); assertThat(table.primaryKeyColumnNames()).isEmpty();
Assertions.assertThat(table.primaryKeyColumns()).isEmpty(); assertThat(table.primaryKeyColumns()).isEmpty();
parser.parse("ALTER TABLE mytable ADD PRIMARY KEY(id2)", tables); parser.parse("ALTER TABLE mytable ADD PRIMARY KEY(id2)", tables);
table = tables.forTable(null, null, "mytable"); table = tables.forTable(null, null, "mytable");
Assertions.assertThat(table.primaryKeyColumnNames()).isEqualTo(Collections.singletonList("id2")); assertThat(table.primaryKeyColumnNames()).isEqualTo(Collections.singletonList("id2"));
Assertions.assertThat(table.primaryKeyColumns()).hasSize(1); assertThat(table.primaryKeyColumns()).hasSize(1);
} }
@Test @Test

View File

@ -30,7 +30,6 @@
import org.apache.kafka.connect.errors.DataException; import org.apache.kafka.connect.errors.DataException;
import org.apache.kafka.connect.header.Header; import org.apache.kafka.connect.header.Header;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -964,7 +963,7 @@ public void shouldIgnoreCreateIndexForNonCapturedTablesNotStoredInHistory() thro
} }
final SourceRecord record = consumeRecord(); final SourceRecord record = consumeRecord();
Assertions.assertThat(record.topic()).isEqualTo(DATABASE.topicForTable("customers")); assertThat(record.topic()).isEqualTo(DATABASE.topicForTable("customers"));
} }
@Test @Test
@ -2175,8 +2174,8 @@ public void shouldRewriteIdentityKey() throws InterruptedException, SQLException
recordsForTopic.forEach(record -> { recordsForTopic.forEach(record -> {
Struct key = (Struct) record.key(); Struct key = (Struct) record.key();
Assertions.assertThat(key.get("id")).isNotNull(); assertThat(key.get("id")).isNotNull();
Assertions.assertThat(key.get("name")).isNotNull(); assertThat(key.get("name")).isNotNull();
}); });
} }
@ -2205,8 +2204,8 @@ public void shouldRewriteIdentityKeyWithWhitespace() throws InterruptedException
recordsForTopic.forEach(record -> { recordsForTopic.forEach(record -> {
Struct key = (Struct) record.key(); Struct key = (Struct) record.key();
Assertions.assertThat(key.get("id")).isNotNull(); assertThat(key.get("id")).isNotNull();
Assertions.assertThat(key.get("name")).isNotNull(); assertThat(key.get("name")).isNotNull();
}); });
} }
@ -2233,8 +2232,8 @@ public void shouldRewriteIdentityKeyWithMsgKeyColumnsFieldRegexValidation() thro
recordsForTopic.forEach(record -> { recordsForTopic.forEach(record -> {
Struct key = (Struct) record.key(); Struct key = (Struct) record.key();
Assertions.assertThat(key.get("id")).isNotNull(); assertThat(key.get("id")).isNotNull();
Assertions.assertThat(key.get("name")).isNotNull(); assertThat(key.get("name")).isNotNull();
}); });
} }

View File

@ -14,7 +14,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -117,19 +116,19 @@ private SourceRecord consumeInsert() throws InterruptedException {
} }
private void assertFloatChangeRecord(SourceRecord sourceRecord) { private void assertFloatChangeRecord(SourceRecord sourceRecord) {
Assertions.assertThat(sourceRecord).isNotNull(); assertThat(sourceRecord).isNotNull();
final Struct change = ((Struct) sourceRecord.value()).getStruct("after"); final Struct change = ((Struct) sourceRecord.value()).getStruct("after");
final float f2 = (float) 5.61; final float f2 = (float) 5.61;
final float f3 = (float) 30.12346; final float f3 = (float) 30.12346;
Assertions.assertThat(change.getFloat32("f1")).isEqualTo((float) 5.6); assertThat(change.getFloat32("f1")).isEqualTo((float) 5.6);
Assertions.assertThat(change.getFloat64("f2")).isEqualTo(Double.valueOf(((Number) f2).doubleValue())); assertThat(change.getFloat64("f2")).isEqualTo(Double.valueOf(((Number) f2).doubleValue()));
Assertions.assertThat(change.getFloat64("f3")).isEqualTo(Double.valueOf(((Number) f3).doubleValue())); assertThat(change.getFloat64("f3")).isEqualTo(Double.valueOf(((Number) f3).doubleValue()));
Assertions.assertThat(change.getFloat32("f4_23")).isEqualTo((float) 64.1); assertThat(change.getFloat32("f4_23")).isEqualTo((float) 64.1);
Assertions.assertThat(change.getFloat32("f4_24")).isEqualTo((float) 64.1); assertThat(change.getFloat32("f4_24")).isEqualTo((float) 64.1);
// Mysql will convert float(25) to double type // Mysql will convert float(25) to double type
Assertions.assertThat(change.getFloat64("f4_25")).isEqualTo(64.1); assertThat(change.getFloat64("f4_25")).isEqualTo(64.1);
// Mysql will treat "float unsigned" as float type // Mysql will treat "float unsigned" as float type
Assertions.assertThat(change.getFloat32("weight")).isEqualTo((float) 64.1234); assertThat(change.getFloat32("weight")).isEqualTo((float) 64.1234);
} }
} }

View File

@ -5,13 +5,14 @@
*/ */
package io.debezium.connector.mysql; package io.debezium.connector.mysql;
import static org.assertj.core.api.Assertions.assertThat;
import java.nio.file.Path; import java.nio.file.Path;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List; import java.util.List;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -99,7 +100,7 @@ public void shouldStoreSingleRename() throws SQLException, InterruptedException
} }
records = consumeRecordsByTopic(1); records = consumeRecordsByTopic(1);
final List<SourceRecord> schemaChanges = records.recordsForTopic(DATABASE.getServerName()); final List<SourceRecord> schemaChanges = records.recordsForTopic(DATABASE.getServerName());
Assertions.assertThat(getDdl(schemaChanges, 0)).startsWith("RENAME TABLE `t-1` TO `new-t-1`"); assertThat(getDdl(schemaChanges, 0)).startsWith("RENAME TABLE `t-1` TO `new-t-1`");
stopConnector(); stopConnector();
@ -126,8 +127,8 @@ public void shouldStoreMultipleRenames() throws SQLException, InterruptedExcepti
} }
records = consumeRecordsByTopic(2); records = consumeRecordsByTopic(2);
final List<SourceRecord> schemaChanges = records.recordsForTopic(DATABASE.getServerName()); final List<SourceRecord> schemaChanges = records.recordsForTopic(DATABASE.getServerName());
Assertions.assertThat(getDdl(schemaChanges, 0)).startsWith("RENAME TABLE `t-1` TO `new-t-1`"); assertThat(getDdl(schemaChanges, 0)).startsWith("RENAME TABLE `t-1` TO `new-t-1`");
Assertions.assertThat(getDdl(schemaChanges, 1)).startsWith("RENAME TABLE `t.2` TO `new.t.2`"); assertThat(getDdl(schemaChanges, 1)).startsWith("RENAME TABLE `t.2` TO `new.t.2`");
stopConnector(); stopConnector();
@ -154,7 +155,7 @@ public void shouldStoreAlterRename() throws SQLException, InterruptedException {
} }
records = consumeRecordsByTopic(1); records = consumeRecordsByTopic(1);
final List<SourceRecord> schemaChanges = records.recordsForTopic(DATABASE.getServerName()); final List<SourceRecord> schemaChanges = records.recordsForTopic(DATABASE.getServerName());
Assertions.assertThat(getDdl(schemaChanges, 0)).startsWith("ALTER TABLE `t-1` RENAME TO `new-t-1`"); assertThat(getDdl(schemaChanges, 0)).startsWith("ALTER TABLE `t-1` RENAME TO `new-t-1`");
stopConnector(); stopConnector();
@ -166,14 +167,14 @@ public void shouldStoreAlterRename() throws SQLException, InterruptedException {
private void assertDdls(SourceRecords records) { private void assertDdls(SourceRecords records) {
final List<SourceRecord> schemaChanges = records.recordsForTopic(DATABASE.getServerName()); final List<SourceRecord> schemaChanges = records.recordsForTopic(DATABASE.getServerName());
int index = 0; int index = 0;
Assertions.assertThat(getDdl(schemaChanges, index++)).startsWith("SET"); assertThat(getDdl(schemaChanges, index++)).startsWith("SET");
Assertions.assertThat(getDdl(schemaChanges, index++)).startsWith("DROP TABLE IF EXISTS `" + DATABASE.getDatabaseName() + "`.`t-1`"); assertThat(getDdl(schemaChanges, index++)).startsWith("DROP TABLE IF EXISTS `" + DATABASE.getDatabaseName() + "`.`t-1`");
Assertions.assertThat(getDdl(schemaChanges, index++)).startsWith("DROP TABLE IF EXISTS `" + DATABASE.getDatabaseName() + "`.`t.2`"); assertThat(getDdl(schemaChanges, index++)).startsWith("DROP TABLE IF EXISTS `" + DATABASE.getDatabaseName() + "`.`t.2`");
Assertions.assertThat(getDdl(schemaChanges, index++)).startsWith("DROP DATABASE IF EXISTS `" + DATABASE.getDatabaseName() + "`"); assertThat(getDdl(schemaChanges, index++)).startsWith("DROP DATABASE IF EXISTS `" + DATABASE.getDatabaseName() + "`");
Assertions.assertThat(getDdl(schemaChanges, index++)).startsWith("CREATE DATABASE `" + DATABASE.getDatabaseName() + "`"); assertThat(getDdl(schemaChanges, index++)).startsWith("CREATE DATABASE `" + DATABASE.getDatabaseName() + "`");
Assertions.assertThat(getDdl(schemaChanges, index++)).startsWith("USE `" + DATABASE.getDatabaseName() + "`"); assertThat(getDdl(schemaChanges, index++)).startsWith("USE `" + DATABASE.getDatabaseName() + "`");
Assertions.assertThat(getDdl(schemaChanges, index++)).startsWith("CREATE TABLE `t-1`"); assertThat(getDdl(schemaChanges, index++)).startsWith("CREATE TABLE `t-1`");
Assertions.assertThat(getDdl(schemaChanges, index++)).startsWith("CREATE TABLE `t.2`"); assertThat(getDdl(schemaChanges, index++)).startsWith("CREATE TABLE `t.2`");
} }
private String getDdl(final List<SourceRecord> schemaChanges, int index) { private String getDdl(final List<SourceRecord> schemaChanges, int index) {

View File

@ -13,7 +13,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -75,7 +74,7 @@ private Struct consume(SchemaNameAdjustmentMode adjustmentMode) throws Interrupt
SourceRecords records = consumeRecordsByTopic(6 + 1); // 6 DDL changes, 1 INSERT SourceRecords records = consumeRecordsByTopic(6 + 1); // 6 DDL changes, 1 INSERT
final List<SourceRecord> results = records.recordsForTopic(DATABASE.topicForTable("name-adjustment")); final List<SourceRecord> results = records.recordsForTopic(DATABASE.topicForTable("name-adjustment"));
Assertions.assertThat(results).hasSize(1); assertThat(results).hasSize(1);
return (Struct) results.get(0).value(); return (Struct) results.get(0).value();
} }

View File

@ -5,13 +5,14 @@
*/ */
package io.debezium.connector.mysql; package io.debezium.connector.mysql;
import static org.assertj.core.api.Assertions.assertThat;
import java.nio.file.Path; import java.nio.file.Path;
import java.sql.Connection; import java.sql.Connection;
import java.sql.SQLException; import java.sql.SQLException;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -202,42 +203,42 @@ private void consumeInitial() throws InterruptedException {
private void assertIntChangeRecord() throws InterruptedException { private void assertIntChangeRecord() throws InterruptedException {
final SourceRecord record = consumeRecord(); final SourceRecord record = consumeRecord();
Assertions.assertThat(record).isNotNull(); assertThat(record).isNotNull();
final Struct change = ((Struct) record.value()).getStruct("after"); final Struct change = ((Struct) record.value()).getStruct("after");
Assertions.assertThat(change.getInt16("ti")).isEqualTo((short) 100); assertThat(change.getInt16("ti")).isEqualTo((short) 100);
Assertions.assertThat(change.getInt16("ti1")).isEqualTo((short) 5); assertThat(change.getInt16("ti1")).isEqualTo((short) 5);
Assertions.assertThat(change.getInt16("ti2")).isEqualTo((short) 50); assertThat(change.getInt16("ti2")).isEqualTo((short) 50);
Assertions.assertThat(change.getInt16("b")).isEqualTo((short) 1); assertThat(change.getInt16("b")).isEqualTo((short) 1);
} }
private void assertBooleanChangeRecord() throws InterruptedException { private void assertBooleanChangeRecord() throws InterruptedException {
final SourceRecord record = consumeRecord(); final SourceRecord record = consumeRecord();
Assertions.assertThat(record).isNotNull(); assertThat(record).isNotNull();
final Struct change = ((Struct) record.value()).getStruct("after"); final Struct change = ((Struct) record.value()).getStruct("after");
Assertions.assertThat(change.getInt16("ti")).isEqualTo((short) 100); assertThat(change.getInt16("ti")).isEqualTo((short) 100);
Assertions.assertThat(change.getInt16("ti1")).isEqualTo((short) 5); assertThat(change.getInt16("ti1")).isEqualTo((short) 5);
Assertions.assertThat(change.getInt16("ti2")).isEqualTo((short) 50); assertThat(change.getInt16("ti2")).isEqualTo((short) 50);
Assertions.assertThat(change.getBoolean("b")).isEqualTo(true); assertThat(change.getBoolean("b")).isEqualTo(true);
} }
private void assertDefaultValueBooleanChangeRecord() throws InterruptedException { private void assertDefaultValueBooleanChangeRecord() throws InterruptedException {
final SourceRecord record = consumeRecord(); final SourceRecord record = consumeRecord();
Assertions.assertThat(record).isNotNull(); assertThat(record).isNotNull();
final Struct change = ((Struct) record.value()).getStruct("after"); final Struct change = ((Struct) record.value()).getStruct("after");
Assertions.assertThat(change.getBoolean("b")).isEqualTo(true); assertThat(change.getBoolean("b")).isEqualTo(true);
Assertions.assertThat(change.schema().field("b").schema().defaultValue()).isEqualTo(false); assertThat(change.schema().field("b").schema().defaultValue()).isEqualTo(false);
} }
private void assertUnsignedBooleanChangeRecord() throws InterruptedException { private void assertUnsignedBooleanChangeRecord() throws InterruptedException {
final SourceRecord record = consumeRecord(); final SourceRecord record = consumeRecord();
Assertions.assertThat(record).isNotNull(); assertThat(record).isNotNull();
final Struct change = ((Struct) record.value()).getStruct("after"); final Struct change = ((Struct) record.value()).getStruct("after");
Assertions.assertThat(change.getInt16("ti1")).isEqualTo((short) 1); assertThat(change.getInt16("ti1")).isEqualTo((short) 1);
Assertions.assertThat(change.getBoolean("ti2")).isEqualTo(true); assertThat(change.getBoolean("ti2")).isEqualTo(true);
Assertions.assertThat(change.getBoolean("ti3")).isEqualTo(false); assertThat(change.getBoolean("ti3")).isEqualTo(false);
} }
} }

View File

@ -5,13 +5,14 @@
*/ */
package io.debezium.connector.mysql; package io.debezium.connector.mysql;
import static org.assertj.core.api.Assertions.assertThat;
import java.nio.file.Path; import java.nio.file.Path;
import java.sql.Connection; import java.sql.Connection;
import java.sql.SQLException; import java.sql.SQLException;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -156,79 +157,79 @@ public void shouldProcessTwoAndForDigitYearsInConnector() throws SQLException, I
private void assertChangeRecordByDatabase() throws InterruptedException { private void assertChangeRecordByDatabase() throws InterruptedException {
final SourceRecord record = consumeRecord(); final SourceRecord record = consumeRecord();
Assertions.assertThat(record).isNotNull(); assertThat(record).isNotNull();
final Struct change = ((Struct) record.value()).getStruct("after"); final Struct change = ((Struct) record.value()).getStruct("after");
// YEAR does not differentiate between 0018 and 18 // YEAR does not differentiate between 0018 and 18
Assertions.assertThat(change.getInt32("y18")).isEqualTo(2018); assertThat(change.getInt32("y18")).isEqualTo(2018);
Assertions.assertThat(change.getInt32("y0018")).isEqualTo(2018); assertThat(change.getInt32("y0018")).isEqualTo(2018);
Assertions.assertThat(change.getInt32("y2018")).isEqualTo(2018); assertThat(change.getInt32("y2018")).isEqualTo(2018);
// days elapsed since epoch till 2018-04-01 // days elapsed since epoch till 2018-04-01
Assertions.assertThat(change.getInt32("d18")).isEqualTo(17622); assertThat(change.getInt32("d18")).isEqualTo(17622);
// days counted backward from epoch to 0018-04-01 // days counted backward from epoch to 0018-04-01
Assertions.assertThat(change.getInt32("d0018")).isEqualTo(-712863); assertThat(change.getInt32("d0018")).isEqualTo(-712863);
// days elapsed since epoch till 2018-04-01 // days elapsed since epoch till 2018-04-01
Assertions.assertThat(change.getInt32("d2018")).isEqualTo(17622); assertThat(change.getInt32("d2018")).isEqualTo(17622);
// nanos elapsed since epoch till 2018-04-01 // nanos elapsed since epoch till 2018-04-01
Assertions.assertThat(change.getInt64("dt18")).isEqualTo(1_522_586_096_000L); assertThat(change.getInt64("dt18")).isEqualTo(1_522_586_096_000L);
// Assert for 0018 will not work as long is able to handle only 292 years of nanos so we are underflowing // Assert for 0018 will not work as long is able to handle only 292 years of nanos so we are underflowing
// nanos elapsed since epoch till 2018-04-01 // nanos elapsed since epoch till 2018-04-01
Assertions.assertThat(change.getInt64("dt2018")).isEqualTo(1_522_586_096_000L); assertThat(change.getInt64("dt2018")).isEqualTo(1_522_586_096_000L);
// YEAR does not differentiate between 0078 and 78 // YEAR does not differentiate between 0078 and 78
Assertions.assertThat(change.getInt32("y78")).isEqualTo(1978); assertThat(change.getInt32("y78")).isEqualTo(1978);
Assertions.assertThat(change.getInt32("y0078")).isEqualTo(1978); assertThat(change.getInt32("y0078")).isEqualTo(1978);
Assertions.assertThat(change.getInt32("y1978")).isEqualTo(1978); assertThat(change.getInt32("y1978")).isEqualTo(1978);
// days elapsed since epoch till 1978-04-01 // days elapsed since epoch till 1978-04-01
Assertions.assertThat(change.getInt32("d78")).isEqualTo(3012); assertThat(change.getInt32("d78")).isEqualTo(3012);
// days counted backward from epoch to 0078-04-01 // days counted backward from epoch to 0078-04-01
Assertions.assertThat(change.getInt32("d0078")).isEqualTo(-690948); assertThat(change.getInt32("d0078")).isEqualTo(-690948);
// days elapsed since epoch till 1978-04-01 // days elapsed since epoch till 1978-04-01
Assertions.assertThat(change.getInt32("d1978")).isEqualTo(3012); assertThat(change.getInt32("d1978")).isEqualTo(3012);
// nanos elapsed since epoch till 1978-04-01 // nanos elapsed since epoch till 1978-04-01
Assertions.assertThat(change.getInt64("dt78")).isEqualTo(260_282_096_000L); assertThat(change.getInt64("dt78")).isEqualTo(260_282_096_000L);
// Assert for 0018 will not work as long is able to handle only 292 years of nanos so we are underflowing // Assert for 0018 will not work as long is able to handle only 292 years of nanos so we are underflowing
// nanos elapsed since epoch till 1978-04-01 // nanos elapsed since epoch till 1978-04-01
Assertions.assertThat(change.getInt64("dt1978")).isEqualTo(260_282_096_000L); assertThat(change.getInt64("dt1978")).isEqualTo(260_282_096_000L);
} }
private void assertChangeRecordByConnector() throws InterruptedException { private void assertChangeRecordByConnector() throws InterruptedException {
final SourceRecord record = consumeRecord(); final SourceRecord record = consumeRecord();
Assertions.assertThat(record).isNotNull(); assertThat(record).isNotNull();
final Struct change = ((Struct) record.value()).getStruct("after"); final Struct change = ((Struct) record.value()).getStruct("after");
// YEAR does not differentiate between 0018 and 18 // YEAR does not differentiate between 0018 and 18
Assertions.assertThat(change.getInt32("y18")).isEqualTo(2018); assertThat(change.getInt32("y18")).isEqualTo(2018);
Assertions.assertThat(change.getInt32("y0018")).isEqualTo(2018); assertThat(change.getInt32("y0018")).isEqualTo(2018);
Assertions.assertThat(change.getInt32("y2018")).isEqualTo(2018); assertThat(change.getInt32("y2018")).isEqualTo(2018);
// days elapsed since epoch till 2018-04-01 // days elapsed since epoch till 2018-04-01
Assertions.assertThat(change.getInt32("d18")).isEqualTo(17622); assertThat(change.getInt32("d18")).isEqualTo(17622);
Assertions.assertThat(change.getInt32("d0018")).isEqualTo(17622); assertThat(change.getInt32("d0018")).isEqualTo(17622);
Assertions.assertThat(change.getInt32("d2018")).isEqualTo(17622); assertThat(change.getInt32("d2018")).isEqualTo(17622);
// nanos elapsed since epoch till 2018-04-01 // nanos elapsed since epoch till 2018-04-01
Assertions.assertThat(change.getInt64("dt18")).isEqualTo(1_522_586_096_000L); assertThat(change.getInt64("dt18")).isEqualTo(1_522_586_096_000L);
Assertions.assertThat(change.getInt64("dt0018")).isEqualTo(1_522_586_096_000L); assertThat(change.getInt64("dt0018")).isEqualTo(1_522_586_096_000L);
Assertions.assertThat(change.getInt64("dt2018")).isEqualTo(1_522_586_096_000L); assertThat(change.getInt64("dt2018")).isEqualTo(1_522_586_096_000L);
// YEAR does not differentiate between 0078 and 78 // YEAR does not differentiate between 0078 and 78
Assertions.assertThat(change.getInt32("y78")).isEqualTo(1978); assertThat(change.getInt32("y78")).isEqualTo(1978);
Assertions.assertThat(change.getInt32("y0078")).isEqualTo(1978); assertThat(change.getInt32("y0078")).isEqualTo(1978);
Assertions.assertThat(change.getInt32("y1978")).isEqualTo(1978); assertThat(change.getInt32("y1978")).isEqualTo(1978);
// days elapsed since epoch till 1978-04-01 // days elapsed since epoch till 1978-04-01
Assertions.assertThat(change.getInt32("d78")).isEqualTo(3012); assertThat(change.getInt32("d78")).isEqualTo(3012);
Assertions.assertThat(change.getInt32("d0078")).isEqualTo(3012); assertThat(change.getInt32("d0078")).isEqualTo(3012);
Assertions.assertThat(change.getInt32("d1978")).isEqualTo(3012); assertThat(change.getInt32("d1978")).isEqualTo(3012);
// nanos elapsed since epoch till 1978-04-01 // nanos elapsed since epoch till 1978-04-01
Assertions.assertThat(change.getInt64("dt78")).isEqualTo(260_282_096_000L); assertThat(change.getInt64("dt78")).isEqualTo(260_282_096_000L);
Assertions.assertThat(change.getInt64("dt0078")).isEqualTo(260_282_096_000L); assertThat(change.getInt64("dt0078")).isEqualTo(260_282_096_000L);
Assertions.assertThat(change.getInt64("dt1978")).isEqualTo(260_282_096_000L); assertThat(change.getInt64("dt1978")).isEqualTo(260_282_096_000L);
} }
} }

View File

@ -5,6 +5,7 @@
*/ */
package io.debezium.connector.mysql; package io.debezium.connector.mysql;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.entry; import static org.assertj.core.api.Assertions.entry;
import java.io.File; import java.io.File;
@ -24,7 +25,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
@ -153,7 +153,7 @@ public void emptyHighWatermark() throws Exception {
final int expectedRecordCount = ROW_COUNT; final int expectedRecordCount = ROW_COUNT;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -188,7 +188,7 @@ public void filteredEvents() throws Exception {
final int expectedRecordCount = ROW_COUNT; final int expectedRecordCount = ROW_COUNT;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
finally { finally {
@ -214,7 +214,7 @@ record -> ((Struct) record.value()).getStruct("after").getInt32(valueFieldName()
DATABASE.topicForTable("a4"), DATABASE.topicForTable("a4"),
null); null);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -236,7 +236,7 @@ record -> ((Struct) record.value()).getStruct("after").getInt32(valueFieldName()
DATABASE.topicForTable("a42"), DATABASE.topicForTable("a42"),
null); null);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -321,7 +321,7 @@ public void testPauseDuringSnapshotKafkaSignal() throws Exception {
dbChanges = consumeMixedWithIncrementalSnapshot(ROW_COUNT - beforeResume); dbChanges = consumeMixedWithIncrementalSnapshot(ROW_COUNT - beforeResume);
for (int i = beforeResume + 1; i < ROW_COUNT; i++) { for (int i = beforeResume + 1; i < ROW_COUNT; i++) {
Assertions.assertThat(dbChanges).contains(entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }

View File

@ -16,7 +16,6 @@
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -127,7 +126,7 @@ public void shouldProcessPurgedGtidSet() throws SQLException, InterruptedExcepti
final Pattern p = Pattern.compile(".*:(.*)-.*"); final Pattern p = Pattern.compile(".*:(.*)-.*");
final Matcher m = p.matcher(gtids); final Matcher m = p.matcher(gtids);
m.matches(); m.matches();
Assertions.assertThat(m.group(1)).isNotEqualTo("1"); assertThat(m.group(1)).isNotEqualTo("1");
}); });
stopConnector(); stopConnector();

View File

@ -20,7 +20,6 @@
import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.config.ConfigValue; import org.apache.kafka.common.config.ConfigValue;
import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.serialization.StringSerializer;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
@ -374,7 +373,7 @@ public void shouldValidateMandatoryValues() {
.build(); .build();
final Map<String, ConfigValue> issues = config.validate(KafkaSchemaHistory.ALL_FIELDS); final Map<String, ConfigValue> issues = config.validate(KafkaSchemaHistory.ALL_FIELDS);
Assertions.assertThat(issues.keySet()).isEqualTo(Collect.unmodifiableSet( assertThat(issues.keySet()).isEqualTo(Collect.unmodifiableSet(
"schema.history.internal.name", "schema.history.internal.name",
"schema.history.internal.connector.class", "schema.history.internal.connector.class",
"schema.history.internal.kafka.topic", "schema.history.internal.kafka.topic",

View File

@ -5,6 +5,7 @@
*/ */
package io.debezium.connector.oracle; package io.debezium.connector.oracle;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
@ -14,7 +15,7 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -101,7 +102,7 @@ private Struct consume(BinaryHandlingMode binaryMode) throws InterruptedExceptio
SourceRecords records = consumeRecordsByTopic(1); SourceRecords records = consumeRecordsByTopic(1);
final List<SourceRecord> results = records.recordsForTopic("server1.DEBEZIUM.BINARY_MODE_TEST"); final List<SourceRecord> results = records.recordsForTopic("server1.DEBEZIUM.BINARY_MODE_TEST");
Assertions.assertThat(results).hasSize(1); assertThat(results).hasSize(1);
return (Struct) ((Struct) results.get(0).value()).get("after"); return (Struct) ((Struct) results.get(0).value()).get("after");
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.connector.oracle; package io.debezium.connector.oracle;
import static org.assertj.core.api.Assertions.assertThat;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List; import java.util.List;
@ -12,7 +14,6 @@
import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -97,26 +98,26 @@ public void snapshotSchemaChanges() throws Exception {
// DDL for 3 tables // DDL for 3 tables
SourceRecords records = consumeRecordsByTopic(3); SourceRecords records = consumeRecordsByTopic(3);
final List<SourceRecord> schemaRecords = records.allRecordsInOrder(); final List<SourceRecord> schemaRecords = records.allRecordsInOrder();
Assertions.assertThat(schemaRecords).hasSize(3); assertThat(schemaRecords).hasSize(3);
schemaRecords.forEach(record -> { schemaRecords.forEach(record -> {
Assertions.assertThat(record.topic()).isEqualTo("server1"); assertThat(record.topic()).isEqualTo("server1");
Assertions.assertThat(((Struct) record.key()).getString("databaseName")).isEqualTo(TestHelper.getDatabaseName()); assertThat(((Struct) record.key()).getString("databaseName")).isEqualTo(TestHelper.getDatabaseName());
Assertions.assertThat(record.sourceOffset().get("snapshot")).isEqualTo(true); assertThat(record.sourceOffset().get("snapshot")).isEqualTo(true);
}); });
Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("snapshot")).isEqualTo("true"); assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
Assertions.assertThat(((Struct) schemaRecords.get(1).value()).getStruct("source").getString("snapshot")).isEqualTo("true"); assertThat(((Struct) schemaRecords.get(1).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
Assertions.assertThat(((Struct) schemaRecords.get(2).value()).getStruct("source").getString("snapshot")).isEqualTo("true"); assertThat(((Struct) schemaRecords.get(2).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("schema")).isEqualTo("DEBEZIUM"); assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("schema")).isEqualTo("DEBEZIUM");
Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getString("ddl")).contains("CREATE TABLE"); assertThat(((Struct) schemaRecords.get(0).value()).getString("ddl")).contains("CREATE TABLE");
Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getString("schemaName")).isEqualTo("DEBEZIUM"); assertThat(((Struct) schemaRecords.get(0).value()).getString("schemaName")).isEqualTo("DEBEZIUM");
final List<Struct> tableChanges = ((Struct) schemaRecords.get(0).value()).getArray("tableChanges"); final List<Struct> tableChanges = ((Struct) schemaRecords.get(0).value()).getArray("tableChanges");
Assertions.assertThat(tableChanges).hasSize(1); assertThat(tableChanges).hasSize(1);
Assertions.assertThat(tableChanges.get(0).get("type")).isEqualTo("CREATE"); assertThat(tableChanges.get(0).get("type")).isEqualTo("CREATE");
records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.DEBEZIUM.TABLEA")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.DEBEZIUM.TABLEA")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.DEBEZIUM.TABLEB")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.DEBEZIUM.TABLEB")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.DEBEZIUM.TABLEB").forEach(record -> { records.recordsForTopic("server1.DEBEZIUM.TABLEB").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),

View File

@ -13,7 +13,7 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -126,12 +126,12 @@ public void signalSchemaChange() throws Exception {
final SourceRecord pre = records.get(0); final SourceRecord pre = records.get(0);
final SourceRecord post = records.get(7); final SourceRecord post = records.get(7);
Assertions.assertThat(((Struct) pre.key()).schema().fields()).hasSize(1); assertThat(((Struct) pre.key()).schema().fields()).hasSize(1);
final Struct postKey = (Struct) post.key(); final Struct postKey = (Struct) post.key();
Assertions.assertThat(postKey.schema().fields()).hasSize(2); assertThat(postKey.schema().fields()).hasSize(2);
Assertions.assertThat(postKey.schema().field("ID")).isNotNull(); assertThat(postKey.schema().field("ID")).isNotNull();
Assertions.assertThat(postKey.schema().field("NAME")).isNotNull(); assertThat(postKey.schema().field("NAME")).isNotNull();
stopConnector(); stopConnector();
@ -146,8 +146,8 @@ public void signalSchemaChange() throws Exception {
final SourceRecord post2 = records.get(0); final SourceRecord post2 = records.get(0);
final Struct postKey2 = (Struct) post2.key(); final Struct postKey2 = (Struct) post2.key();
Assertions.assertThat(postKey2.schema().fields()).hasSize(2); assertThat(postKey2.schema().fields()).hasSize(2);
Assertions.assertThat(postKey2.schema().field("ID")).isNotNull(); assertThat(postKey2.schema().field("ID")).isNotNull();
Assertions.assertThat(postKey2.schema().field("NAME")).isNotNull(); assertThat(postKey2.schema().field("NAME")).isNotNull();
} }
} }

View File

@ -26,7 +26,6 @@
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import org.apache.kafka.connect.storage.FileOffsetBackingStore; import org.apache.kafka.connect.storage.FileOffsetBackingStore;
import org.apache.kafka.connect.util.Callback; import org.apache.kafka.connect.util.Callback;
import org.assertj.core.api.Assertions;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Rule; import org.junit.Rule;
@ -95,14 +94,14 @@ public void shouldSerializeToJson() throws Exception {
.notifying((records, committer) -> { .notifying((records, committer) -> {
for (ChangeEvent<String, String> r : records) { for (ChangeEvent<String, String> r : records) {
Assertions.assertThat(r.key()).isNotNull(); assertThat(r.key()).isNotNull();
Assertions.assertThat(r.value()).isNotNull(); assertThat(r.value()).isNotNull();
try { try {
final Document key = DocumentReader.defaultReader().read(r.key()); final Document key = DocumentReader.defaultReader().read(r.key());
final Document value = DocumentReader.defaultReader().read(r.value()); final Document value = DocumentReader.defaultReader().read(r.value());
Assertions.assertThat(key.getInteger("id")).isEqualTo(1); assertThat(key.getInteger("id")).isEqualTo(1);
Assertions.assertThat(value.getDocument("after").getInteger("id")).isEqualTo(1); assertThat(value.getDocument("after").getInteger("id")).isEqualTo(1);
Assertions.assertThat(value.getDocument("after").getString("val")).isEqualTo("value1"); assertThat(value.getDocument("after").getString("val")).isEqualTo("value1");
} }
catch (IOException e) { catch (IOException e) {
throw new IllegalStateException(e); throw new IllegalStateException(e);
@ -147,8 +146,8 @@ public void shouldSerializeToAvro() throws Exception {
@Override @Override
public void handle(boolean success, String message, Throwable error) { public void handle(boolean success, String message, Throwable error) {
Assertions.assertThat(success).isFalse(); assertThat(success).isFalse();
Assertions.assertThat(message).contains("Failed to serialize Avro data from topic test_server.engine.test"); assertThat(message).contains("Failed to serialize Avro data from topic test_server.engine.test");
allLatch.countDown(); allLatch.countDown();
} }
}) })
@ -184,13 +183,13 @@ public void shouldSerializeToCloudEvents() throws Exception {
for (ChangeEvent<String, String> r : records) { for (ChangeEvent<String, String> r : records) {
try { try {
final Document key = DocumentReader.defaultReader().read(r.key()); final Document key = DocumentReader.defaultReader().read(r.key());
Assertions.assertThat(key.getInteger("id")).isEqualTo(1); assertThat(key.getInteger("id")).isEqualTo(1);
Assertions.assertThat(r.value()).isNotNull(); assertThat(r.value()).isNotNull();
final Document value = DocumentReader.defaultReader().read(r.value()); final Document value = DocumentReader.defaultReader().read(r.value());
Assertions.assertThat(value.getString("id")).contains("txId"); assertThat(value.getString("id")).contains("txId");
Assertions.assertThat(value.getDocument("data").getDocument("payload").getDocument("after").getInteger("id")).isEqualTo(1); assertThat(value.getDocument("data").getDocument("payload").getDocument("after").getInteger("id")).isEqualTo(1);
Assertions.assertThat(value.getDocument("data").getDocument("payload").getDocument("after").getString("val")).isEqualTo("value1"); assertThat(value.getDocument("data").getDocument("payload").getDocument("after").getString("val")).isEqualTo("value1");
} }
catch (IOException e) { catch (IOException e) {
throw new IllegalStateException(e); throw new IllegalStateException(e);
@ -280,7 +279,7 @@ public void connectorStopped() {
} }
engine.close(); engine.close();
Assertions.assertThat(offsetStoreSetCalls.get()).isGreaterThanOrEqualTo(1); assertThat(offsetStoreSetCalls.get()).isGreaterThanOrEqualTo(1);
offsetStoreSetCalls.set(0); offsetStoreSetCalls.set(0);
for (int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
@ -292,7 +291,7 @@ public void connectorStopped() {
} }
engine.close(); engine.close();
Assertions.assertThat(offsetStoreSetCalls.get()).isGreaterThanOrEqualTo(1); assertThat(offsetStoreSetCalls.get()).isGreaterThanOrEqualTo(1);
Assertions.assertThat(exception.get()).isNull(); assertThat(exception.get()).isNull();
} }
} }

View File

@ -7,6 +7,8 @@
package io.debezium.connector.postgresql; package io.debezium.connector.postgresql;
import static io.debezium.junit.EqualityCheck.LESS_THAN; import static io.debezium.junit.EqualityCheck.LESS_THAN;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.entry;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List; import java.util.List;
@ -14,7 +16,6 @@
import java.util.Set; import java.util.Set;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
@ -169,7 +170,7 @@ record -> ((Struct) record.value()).getStruct("after").getInt32(valueFieldName()
"test_server.s1.a4", "test_server.s1.a4",
null); null);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -191,7 +192,7 @@ record -> ((Struct) record.value()).getStruct("after").getInt32(valueFieldName()
"test_server.s1.a42", "test_server.s1.a42",
null); null);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -215,7 +216,7 @@ record -> ((Struct) record.value()).getStruct("after").getInt32(valueFieldName()
"test_server.s1.anumeric", "test_server.s1.anumeric",
null); null);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -269,11 +270,11 @@ record -> ((Struct) record.value()).getStruct("after").getInt32(valueFieldName()
null); null);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
for (int i = 0; i < expectedPartRecordCount; i++) { for (int i = 0; i < expectedPartRecordCount; i++) {
Assertions.assertThat(dbChangesPart1).contains(Assertions.entry(i + 1, i)); assertThat(dbChangesPart1).contains(entry(i + 1, i));
Assertions.assertThat(dbChangesPart2).contains(Assertions.entry(i + 1 + expectedPartRecordCount, i + expectedPartRecordCount)); assertThat(dbChangesPart2).contains(entry(i + 1 + expectedPartRecordCount, i + expectedPartRecordCount));
} }
} }
@ -292,7 +293,7 @@ record -> ((Struct) record.value()).getStruct("source"),
null, null,
topicName()); topicName());
Set<Map.Entry<Integer, Struct>> entries = dbChanges.entrySet(); Set<Map.Entry<Integer, Struct>> entries = dbChanges.entrySet();
Assertions.assertThat(ROW_COUNT == entries.size()); assertThat(ROW_COUNT == entries.size());
for (Map.Entry<Integer, Struct> e : entries) { for (Map.Entry<Integer, Struct> e : entries) {
Assert.assertTrue(e.getValue().getInt64("xmin") == null); Assert.assertTrue(e.getValue().getInt64("xmin") == null);
Assert.assertTrue(e.getValue().getInt64("lsn") == null); Assert.assertTrue(e.getValue().getInt64("lsn") == null);

View File

@ -47,7 +47,6 @@
import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.errors.DataException; import org.apache.kafka.connect.errors.DataException;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.awaitility.core.ConditionTimeoutException; import org.awaitility.core.ConditionTimeoutException;
import org.junit.After; import org.junit.After;
@ -363,7 +362,7 @@ public void shouldConsumeMessagesFromSnapshot() throws Exception {
waitForSnapshotToBeCompleted(); waitForSnapshotToBeCompleted();
SourceRecords records = consumeRecordsByTopic(recordCount); SourceRecords records = consumeRecordsByTopic(recordCount);
Assertions.assertThat(records.recordsForTopic("test_server.s1.a")).hasSize(recordCount); assertThat(records.recordsForTopic("test_server.s1.a")).hasSize(recordCount);
} }
@Test @Test
@ -384,7 +383,7 @@ public void shouldConsumeMessagesFromSnapshotOld() throws Exception {
waitForSnapshotToBeCompleted(); waitForSnapshotToBeCompleted();
SourceRecords records = consumeRecordsByTopic(recordCount); SourceRecords records = consumeRecordsByTopic(recordCount);
Assertions.assertThat(records.recordsForTopic("test_server.s1.a")).hasSize(recordCount); assertThat(records.recordsForTopic("test_server.s1.a")).hasSize(recordCount);
} }
@Test @Test
@ -1271,7 +1270,7 @@ public void shouldCloseTxAfterTypeQuery() throws Exception {
SourceRecord record = records.get(0); SourceRecord record = records.get(0);
VerifyRecord.isValidInsert(record, PK_FIELD, 1); VerifyRecord.isValidInsert(record, PK_FIELD, 1);
final String isbn = new String(((Struct) record.value()).getStruct("after").getBytes("aa")); final String isbn = new String(((Struct) record.value()).getStruct("after").getBytes("aa"));
Assertions.assertThat(isbn).isEqualTo("0-393-04002-X"); assertThat(isbn).isEqualTo("0-393-04002-X");
TestHelper.assertNoOpenTransactions(); TestHelper.assertNoOpenTransactions();
} }
@ -1365,7 +1364,7 @@ public void shouldRegularlyFlushLsn() throws InterruptedException, SQLException
} }
// Theoretically the LSN should change for each record but in reality there can be // Theoretically the LSN should change for each record but in reality there can be
// unfortunate timings so let's suppose the change will happen in 75 % of cases // unfortunate timings so let's suppose the change will happen in 75 % of cases
Assertions.assertThat(flushLsn.size()).isGreaterThanOrEqualTo((recordCount * 3) / 4); assertThat(flushLsn.size()).isGreaterThanOrEqualTo((recordCount * 3) / 4);
} }
@Test @Test
@ -1390,7 +1389,7 @@ public void shouldRegularlyFlushLsnWithTxMonitoring() throws InterruptedExceptio
final SourceRecords firstRecords = consumeDmlRecordsByTopic(1); final SourceRecords firstRecords = consumeDmlRecordsByTopic(1);
assertThat(firstRecords.topics().size()).isEqualTo(2); assertThat(firstRecords.topics().size()).isEqualTo(2);
assertThat(firstRecords.recordsForTopic(txTopic).size()).isGreaterThanOrEqualTo(2); assertThat(firstRecords.recordsForTopic(txTopic).size()).isGreaterThanOrEqualTo(2);
Assertions.assertThat(firstRecords.recordsForTopic(txTopic).get(1).sourceOffset().containsKey("lsn_commit")).isTrue(); assertThat(firstRecords.recordsForTopic(txTopic).get(1).sourceOffset().containsKey("lsn_commit")).isTrue();
stopConnector(); stopConnector();
assertConnectorNotRunning(); assertConnectorNotRunning();
@ -1422,7 +1421,7 @@ public void shouldRegularlyFlushLsnWithTxMonitoring() throws InterruptedExceptio
} }
// Theoretically the LSN should change for each record but in reality there can be // Theoretically the LSN should change for each record but in reality there can be
// unfortunate timings so let's suppose the change will happen in 75 % of cases // unfortunate timings so let's suppose the change will happen in 75 % of cases
Assertions.assertThat(flushLsn.size()).isGreaterThanOrEqualTo((recordCount * 3) / 4); assertThat(flushLsn.size()).isGreaterThanOrEqualTo((recordCount * 3) / 4);
} }
@Test @Test
@ -2133,14 +2132,14 @@ public void shouldRewriteIdentityKey() throws InterruptedException {
SourceRecords records = consumeRecordsByTopic(2); SourceRecords records = consumeRecordsByTopic(2);
records.recordsForTopic("test_server.s1.a").forEach(record -> { records.recordsForTopic("test_server.s1.a").forEach(record -> {
Struct key = (Struct) record.key(); Struct key = (Struct) record.key();
Assertions.assertThat(key.get(PK_FIELD)).isNotNull(); assertThat(key.get(PK_FIELD)).isNotNull();
Assertions.assertThat(key.get("aa")).isNotNull(); assertThat(key.get("aa")).isNotNull();
}); });
records.recordsForTopic("test_server.s2.a").forEach(record -> { records.recordsForTopic("test_server.s2.a").forEach(record -> {
Struct key = (Struct) record.key(); Struct key = (Struct) record.key();
Assertions.assertThat(key.get(PK_FIELD)).isNotNull(); assertThat(key.get(PK_FIELD)).isNotNull();
Assertions.assertThat(key.get("pk")).isNotNull(); assertThat(key.get("pk")).isNotNull();
Assertions.assertThat(key.schema().field("aa")).isNull(); assertThat(key.schema().field("aa")).isNull();
}); });
stopConnector(); stopConnector();

View File

@ -14,7 +14,6 @@
import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -58,7 +57,7 @@ public void shouldSetTheNullValueInSnapshot() throws Exception {
waitForSnapshotToBeCompleted("postgres", TestHelper.TEST_SERVER); waitForSnapshotToBeCompleted("postgres", TestHelper.TEST_SERVER);
final SourceRecords records = consumeRecordsByTopic(1); final SourceRecords records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("test_server.s1.a")).hasSize(1); assertThat(records.recordsForTopic("test_server.s1.a")).hasSize(1);
final SourceRecord sourceRecord = records.allRecordsInOrder().get(0); final SourceRecord sourceRecord = records.allRecordsInOrder().get(0);
assertDefaultValueChangeRecord(sourceRecord); assertDefaultValueChangeRecord(sourceRecord);
@ -78,7 +77,7 @@ public void shouldSetTheNullValueInStreaming() throws Exception {
createTableAndInsertData(); createTableAndInsertData();
final SourceRecords records = consumeRecordsByTopic(1); final SourceRecords records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("test_server.s1.a")).hasSize(1); assertThat(records.recordsForTopic("test_server.s1.a")).hasSize(1);
final SourceRecord sourceRecord = records.allRecordsInOrder().get(0); final SourceRecord sourceRecord = records.allRecordsInOrder().get(0);
assertDefaultValueChangeRecord(sourceRecord); assertDefaultValueChangeRecord(sourceRecord);
@ -144,26 +143,26 @@ private void createTableAndInsertData() {
private void assertDefaultValueChangeRecord(SourceRecord sourceRecord) { private void assertDefaultValueChangeRecord(SourceRecord sourceRecord) {
final Schema valueSchema = sourceRecord.valueSchema(); final Schema valueSchema = sourceRecord.valueSchema();
Assertions.assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt32("dint")).isNull(); assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt32("dint")).isNull();
Assertions.assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc1")).isNull(); assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc1")).isNull();
Assertions.assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc2")).isEqualTo("NULL"); assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc2")).isEqualTo("NULL");
Assertions.assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc3")).isEqualTo("MYVALUE"); assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc3")).isEqualTo("MYVALUE");
Assertions.assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc4")).isEqualTo("NULL"); assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc4")).isEqualTo("NULL");
Assertions.assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc5")).isEqualTo("NULL::character varying"); assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc5")).isEqualTo("NULL::character varying");
Assertions.assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc6")).isNull(); assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc6")).isNull();
Assertions.assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt64("dt1")).isNotNull(); assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt64("dt1")).isNotNull();
Assertions.assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt32("dt2")).isNotNull(); assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt32("dt2")).isNotNull();
Assertions.assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt64("dt3")).isNotNull(); assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt64("dt3")).isNotNull();
Assertions.assertThat(valueSchema.field("after").schema().field("dint").schema().defaultValue()).isNull(); assertThat(valueSchema.field("after").schema().field("dint").schema().defaultValue()).isNull();
Assertions.assertThat(valueSchema.field("after").schema().field("dvc1").schema().defaultValue()).isNull(); assertThat(valueSchema.field("after").schema().field("dvc1").schema().defaultValue()).isNull();
Assertions.assertThat(valueSchema.field("after").schema().field("dvc2").schema().defaultValue()).isEqualTo("NULL"); assertThat(valueSchema.field("after").schema().field("dvc2").schema().defaultValue()).isEqualTo("NULL");
Assertions.assertThat(valueSchema.field("after").schema().field("dvc3").schema().defaultValue()).isEqualTo("MYVALUE"); assertThat(valueSchema.field("after").schema().field("dvc3").schema().defaultValue()).isEqualTo("MYVALUE");
Assertions.assertThat(valueSchema.field("after").schema().field("dvc4").schema().defaultValue()).isEqualTo("NULL"); assertThat(valueSchema.field("after").schema().field("dvc4").schema().defaultValue()).isEqualTo("NULL");
Assertions.assertThat(valueSchema.field("after").schema().field("dvc5").schema().defaultValue()).isEqualTo("NULL::character varying"); assertThat(valueSchema.field("after").schema().field("dvc5").schema().defaultValue()).isEqualTo("NULL::character varying");
Assertions.assertThat(valueSchema.field("after").schema().field("dvc6").schema().defaultValue()).isNull(); assertThat(valueSchema.field("after").schema().field("dvc6").schema().defaultValue()).isNull();
Assertions.assertThat(valueSchema.field("after").schema().field("dt1").schema().defaultValue()).isEqualTo(0L); assertThat(valueSchema.field("after").schema().field("dt1").schema().defaultValue()).isEqualTo(0L);
Assertions.assertThat(valueSchema.field("after").schema().field("dt2").schema().defaultValue()).isEqualTo(0); assertThat(valueSchema.field("after").schema().field("dt2").schema().defaultValue()).isEqualTo(0);
Assertions.assertThat(valueSchema.field("after").schema().field("dt3").schema().defaultValue()).isEqualTo(0L); assertThat(valueSchema.field("after").schema().field("dt3").schema().defaultValue()).isEqualTo(0L);
} }
} }

View File

@ -5,7 +5,8 @@
*/ */
package io.debezium.connector.postgresql; package io.debezium.connector.postgresql;
import org.assertj.core.api.Assertions; import static org.assertj.core.api.Assertions.assertThat;
import org.junit.Test; import org.junit.Test;
import org.postgresql.util.PSQLException; import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState; import org.postgresql.util.PSQLState;
@ -28,37 +29,37 @@ public class PostgresErrorHandlerTest {
@Test @Test
public void classifiedPSQLExceptionIsRetryable() { public void classifiedPSQLExceptionIsRetryable() {
PSQLException testException = new PSQLException(A_CLASSIFIED_EXCEPTION, PSQLState.CONNECTION_FAILURE); PSQLException testException = new PSQLException(A_CLASSIFIED_EXCEPTION, PSQLState.CONNECTION_FAILURE);
Assertions.assertThat(errorHandler.isRetriable(testException)).isTrue(); assertThat(errorHandler.isRetriable(testException)).isTrue();
} }
@Test @Test
public void nonCommunicationExceptionNotRetryable() { public void nonCommunicationExceptionNotRetryable() {
Exception testException = new NullPointerException(); Exception testException = new NullPointerException();
Assertions.assertThat(errorHandler.isRetriable(testException)).isFalse(); assertThat(errorHandler.isRetriable(testException)).isFalse();
} }
@Test @Test
public void nullThrowableIsNotRetryable() { public void nullThrowableIsNotRetryable() {
Assertions.assertThat(errorHandler.isRetriable(null)).isFalse(); assertThat(errorHandler.isRetriable(null)).isFalse();
} }
@Test @Test
public void encapsulatedPSQLExceptionIsRetriable() { public void encapsulatedPSQLExceptionIsRetriable() {
Exception testException = new IllegalArgumentException( Exception testException = new IllegalArgumentException(
new PSQLException("definitely not a postgres error", PSQLState.CONNECTION_FAILURE)); new PSQLException("definitely not a postgres error", PSQLState.CONNECTION_FAILURE));
Assertions.assertThat(errorHandler.isRetriable(testException)).isTrue(); assertThat(errorHandler.isRetriable(testException)).isTrue();
} }
@Test @Test
public void classifiedPSQLExceptionWrappedInDebeziumExceptionIsRetryable() { public void classifiedPSQLExceptionWrappedInDebeziumExceptionIsRetryable() {
PSQLException psqlException = new PSQLException(A_CLASSIFIED_EXCEPTION, PSQLState.CONNECTION_FAILURE); PSQLException psqlException = new PSQLException(A_CLASSIFIED_EXCEPTION, PSQLState.CONNECTION_FAILURE);
DebeziumException testException = new DebeziumException(psqlException); DebeziumException testException = new DebeziumException(psqlException);
Assertions.assertThat(errorHandler.isRetriable(testException)).isTrue(); assertThat(errorHandler.isRetriable(testException)).isTrue();
} }
@Test @Test
public void randomUnhandledExceptionIsNotRetryable() { public void randomUnhandledExceptionIsNotRetryable() {
RuntimeException testException = new RuntimeException(); RuntimeException testException = new RuntimeException();
Assertions.assertThat(errorHandler.isRetriable(testException)).isFalse(); assertThat(errorHandler.isRetriable(testException)).isFalse();
} }
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.connector.postgresql; package io.debezium.connector.postgresql;
import static org.assertj.core.api.Assertions.assertThat;
import java.lang.management.ManagementFactory; import java.lang.management.ManagementFactory;
import java.time.Duration; import java.time.Duration;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
@ -15,7 +17,6 @@
import javax.management.MalformedObjectNameException; import javax.management.MalformedObjectNameException;
import javax.management.ObjectName; import javax.management.ObjectName;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
@ -147,15 +148,15 @@ private void assertSnapshotMetrics() throws Exception {
waitForSnapshotToBeCompleted(); waitForSnapshotToBeCompleted();
// Check snapshot metrics // Check snapshot metrics
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "TotalTableCount")).isEqualTo(1); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "TotalTableCount")).isEqualTo(1);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "CapturedTables")).isEqualTo(new String[]{ "public.simple" }); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "CapturedTables")).isEqualTo(new String[]{ "public.simple" });
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "TotalNumberOfEventsSeen")).isEqualTo(2L); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "TotalNumberOfEventsSeen")).isEqualTo(2L);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "RemainingTableCount")).isEqualTo(0); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "RemainingTableCount")).isEqualTo(0);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotRunning")).isEqualTo(false); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotRunning")).isEqualTo(false);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotAborted")).isEqualTo(false); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotAborted")).isEqualTo(false);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotCompleted")).isEqualTo(true); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotCompleted")).isEqualTo(true);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotPaused")).isEqualTo(false); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotPaused")).isEqualTo(false);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotPausedDurationInSeconds")).isEqualTo(0L); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotPausedDurationInSeconds")).isEqualTo(0L);
} }
private void assertSnapshotNotExecutedMetrics() throws Exception { private void assertSnapshotNotExecutedMetrics() throws Exception {
@ -172,13 +173,13 @@ private void assertSnapshotNotExecutedMetrics() throws Exception {
}); });
// Check snapshot metrics // Check snapshot metrics
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "TotalTableCount")).isEqualTo(0); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "TotalTableCount")).isEqualTo(0);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "CapturedTables")).isEqualTo(new String[]{}); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "CapturedTables")).isEqualTo(new String[]{});
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "TotalNumberOfEventsSeen")).isEqualTo(0L); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "TotalNumberOfEventsSeen")).isEqualTo(0L);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "RemainingTableCount")).isEqualTo(0); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "RemainingTableCount")).isEqualTo(0);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotRunning")).isEqualTo(false); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotRunning")).isEqualTo(false);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotAborted")).isEqualTo(false); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotAborted")).isEqualTo(false);
Assertions.assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotCompleted")).isEqualTo(false); assertThat(mBeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotCompleted")).isEqualTo(false);
} }
private void assertStreamingMetrics() throws Exception { private void assertStreamingMetrics() throws Exception {
@ -195,8 +196,8 @@ private void assertStreamingMetrics() throws Exception {
// Check streaming metrics // Check streaming metrics
Testing.print("****ASSERTIONS****"); Testing.print("****ASSERTIONS****");
Assertions.assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "Connected")).isEqualTo(true); assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "Connected")).isEqualTo(true);
Assertions.assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "TotalNumberOfEventsSeen")).isEqualTo(2L); assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "TotalNumberOfEventsSeen")).isEqualTo(2L);
// todo: this does not seem to be populated? // todo: this does not seem to be populated?
// Assertions.assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "CapturedTables")).isEqualTo(new String[] {"public.simple"}); // Assertions.assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "CapturedTables")).isEqualTo(new String[] {"public.simple"});
} }
@ -246,7 +247,7 @@ public void oneRecordInQueue() throws Exception {
long value = (long) mBeanServer.getAttribute(getStreamingMetricsObjectName(), "CurrentQueueSizeInBytes"); long value = (long) mBeanServer.getAttribute(getStreamingMetricsObjectName(), "CurrentQueueSizeInBytes");
return value > 0; return value > 0;
}); });
Assertions.assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "CurrentQueueSizeInBytes")).isNotEqualTo(0L); assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "CurrentQueueSizeInBytes")).isNotEqualTo(0L);
LOGGER.info("Wait for the queue to contain second record"); LOGGER.info("Wait for the queue to contain second record");
Awaitility.await() Awaitility.await()
@ -259,7 +260,7 @@ public void oneRecordInQueue() throws Exception {
return value == 9; return value == 9;
}); });
LOGGER.info("Wait for second record to be in queue"); LOGGER.info("Wait for second record to be in queue");
Assertions.assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "QueueRemainingCapacity")).isEqualTo(9); assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "QueueRemainingCapacity")).isEqualTo(9);
LOGGER.info("Empty queue"); LOGGER.info("Empty queue");
step2.countDown(); step2.countDown();
@ -274,7 +275,7 @@ public void oneRecordInQueue() throws Exception {
long value = (long) mBeanServer.getAttribute(getStreamingMetricsObjectName(), "CurrentQueueSizeInBytes"); long value = (long) mBeanServer.getAttribute(getStreamingMetricsObjectName(), "CurrentQueueSizeInBytes");
return value == 0; return value == 0;
}); });
Assertions.assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "CurrentQueueSizeInBytes")).isEqualTo(0L); assertThat(mBeanServer.getAttribute(getStreamingMetricsObjectName(), "CurrentQueueSizeInBytes")).isEqualTo(0L);
stopConnector(); stopConnector();
} }

View File

@ -31,7 +31,6 @@
import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.Before; import org.junit.Before;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
@ -426,7 +425,7 @@ public void shouldGenerateSnapshotsForPartitionedTables() throws Exception {
int expectedTotalCount = expectedTopicCounts.values().stream().mapToInt(Integer::intValue).sum(); int expectedTotalCount = expectedTopicCounts.values().stream().mapToInt(Integer::intValue).sum();
TestConsumer consumer = testConsumer(expectedTotalCount); TestConsumer consumer = testConsumer(expectedTotalCount);
consumer.await(TestHelper.waitTimeForRecords() * 30, TimeUnit.SECONDS); consumer.await(TestHelper.waitTimeForRecords() * 30L, TimeUnit.SECONDS);
Map<String, Integer> actualTopicCounts = new HashMap<>(); Map<String, Integer> actualTopicCounts = new HashMap<>();
AtomicInteger actualTotalCount = new AtomicInteger(0); AtomicInteger actualTotalCount = new AtomicInteger(0);
@ -436,7 +435,7 @@ public void shouldGenerateSnapshotsForPartitionedTables() throws Exception {
Struct key = (Struct) record.key(); Struct key = (Struct) record.key();
if (key != null) { if (key != null) {
final Integer id = key.getInt32("pk"); final Integer id = key.getInt32("pk");
Assertions.assertThat(ids).doesNotContain(id); assertThat(ids).doesNotContain(id);
ids.add(id); ids.add(id);
} }

View File

@ -48,7 +48,6 @@
import org.apache.kafka.connect.header.Header; import org.apache.kafka.connect.header.Header;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.apache.kafka.connect.storage.MemoryOffsetBackingStore; import org.apache.kafka.connect.storage.MemoryOffsetBackingStore;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.awaitility.core.ConditionTimeoutException; import org.awaitility.core.ConditionTimeoutException;
import org.junit.Before; import org.junit.Before;
@ -356,13 +355,13 @@ public void shouldReceiveChangesForInsertsCustomTypes() throws Exception {
public void shouldProcessNotNullColumnsConnectDateTypes() throws Exception { public void shouldProcessNotNullColumnsConnectDateTypes() throws Exception {
final Struct before = testProcessNotNullColumns(TemporalPrecisionMode.CONNECT); final Struct before = testProcessNotNullColumns(TemporalPrecisionMode.CONNECT);
if (before != null) { if (before != null) {
Assertions.assertThat(before.get("created_at")).isEqualTo(new java.util.Date(0)); assertThat(before.get("created_at")).isEqualTo(new java.util.Date(0));
Assertions.assertThat(before.get("created_at_tz")).isEqualTo("1970-01-01T00:00:00Z"); assertThat(before.get("created_at_tz")).isEqualTo("1970-01-01T00:00:00Z");
Assertions.assertThat(before.get("ctime")).isEqualTo(new java.util.Date(0)); assertThat(before.get("ctime")).isEqualTo(new java.util.Date(0));
Assertions.assertThat(before.get("ctime_tz")).isEqualTo("00:00:00Z"); assertThat(before.get("ctime_tz")).isEqualTo("00:00:00Z");
Assertions.assertThat(before.get("cdate")).isEqualTo(new java.util.Date(0)); assertThat(before.get("cdate")).isEqualTo(new java.util.Date(0));
Assertions.assertThat(before.get("cmoney")).isEqualTo(new BigDecimal("0.00")); assertThat(before.get("cmoney")).isEqualTo(new BigDecimal("0.00"));
Assertions.assertThat(before.get("cbits")).isEqualTo(new byte[0]); assertThat(before.get("cbits")).isEqualTo(new byte[0]);
} }
} }
@ -371,13 +370,13 @@ public void shouldProcessNotNullColumnsConnectDateTypes() throws Exception {
public void shouldProcessNotNullColumnsAdaptiveDateTypes() throws Exception { public void shouldProcessNotNullColumnsAdaptiveDateTypes() throws Exception {
final Struct before = testProcessNotNullColumns(TemporalPrecisionMode.ADAPTIVE); final Struct before = testProcessNotNullColumns(TemporalPrecisionMode.ADAPTIVE);
if (before != null) { if (before != null) {
Assertions.assertThat(before.get("created_at")).isEqualTo(0L); assertThat(before.get("created_at")).isEqualTo(0L);
Assertions.assertThat(before.get("created_at_tz")).isEqualTo("1970-01-01T00:00:00Z"); assertThat(before.get("created_at_tz")).isEqualTo("1970-01-01T00:00:00Z");
Assertions.assertThat(before.get("ctime")).isEqualTo(0L); assertThat(before.get("ctime")).isEqualTo(0L);
Assertions.assertThat(before.get("ctime_tz")).isEqualTo("00:00:00Z"); assertThat(before.get("ctime_tz")).isEqualTo("00:00:00Z");
Assertions.assertThat(before.get("cdate")).isEqualTo(0); assertThat(before.get("cdate")).isEqualTo(0);
Assertions.assertThat(before.get("cmoney")).isEqualTo(new BigDecimal("0.00")); assertThat(before.get("cmoney")).isEqualTo(new BigDecimal("0.00"));
Assertions.assertThat(before.get("cbits")).isEqualTo(new byte[0]); assertThat(before.get("cbits")).isEqualTo(new byte[0]);
} }
} }
@ -386,13 +385,13 @@ public void shouldProcessNotNullColumnsAdaptiveDateTypes() throws Exception {
public void shouldProcessNotNullColumnsAdaptiveMsDateTypes() throws Exception { public void shouldProcessNotNullColumnsAdaptiveMsDateTypes() throws Exception {
final Struct before = testProcessNotNullColumns(TemporalPrecisionMode.ADAPTIVE_TIME_MICROSECONDS); final Struct before = testProcessNotNullColumns(TemporalPrecisionMode.ADAPTIVE_TIME_MICROSECONDS);
if (before != null) { if (before != null) {
Assertions.assertThat(before.get("created_at")).isEqualTo(0L); assertThat(before.get("created_at")).isEqualTo(0L);
Assertions.assertThat(before.get("created_at_tz")).isEqualTo("1970-01-01T00:00:00Z"); assertThat(before.get("created_at_tz")).isEqualTo("1970-01-01T00:00:00Z");
Assertions.assertThat(before.get("ctime")).isEqualTo(0L); assertThat(before.get("ctime")).isEqualTo(0L);
Assertions.assertThat(before.get("ctime_tz")).isEqualTo("00:00:00Z"); assertThat(before.get("ctime_tz")).isEqualTo("00:00:00Z");
Assertions.assertThat(before.get("cdate")).isEqualTo(0); assertThat(before.get("cdate")).isEqualTo(0);
Assertions.assertThat(before.get("cmoney")).isEqualTo(new BigDecimal("0.00")); assertThat(before.get("cmoney")).isEqualTo(new BigDecimal("0.00"));
Assertions.assertThat(before.get("cbits")).isEqualTo(new byte[0]); assertThat(before.get("cbits")).isEqualTo(new byte[0]);
} }
} }
@ -402,31 +401,31 @@ public void shouldProcessNotNullColumnsFallbacksReplicaIdentity() throws Excepti
// Use adaptive here as its the connector default // Use adaptive here as its the connector default
final Struct before = testProcessNotNullColumns(TemporalPrecisionMode.ADAPTIVE); final Struct before = testProcessNotNullColumns(TemporalPrecisionMode.ADAPTIVE);
if (before != null) { if (before != null) {
Assertions.assertThat(before.get("csmallint")).isEqualTo((short) 0); assertThat(before.get("csmallint")).isEqualTo((short) 0);
Assertions.assertThat(before.get("cinteger")).isEqualTo(0); assertThat(before.get("cinteger")).isEqualTo(0);
Assertions.assertThat(before.get("cbigint")).isEqualTo(0L); assertThat(before.get("cbigint")).isEqualTo(0L);
Assertions.assertThat(before.get("creal")).isEqualTo(0.f); assertThat(before.get("creal")).isEqualTo(0.f);
Assertions.assertThat(before.get("cbool")).isEqualTo(false); assertThat(before.get("cbool")).isEqualTo(false);
Assertions.assertThat(before.get("cfloat8")).isEqualTo(0.0); assertThat(before.get("cfloat8")).isEqualTo(0.0);
Assertions.assertThat(before.get("cnumeric")).isEqualTo(new BigDecimal("0.00")); assertThat(before.get("cnumeric")).isEqualTo(new BigDecimal("0.00"));
Assertions.assertThat(before.get("cvarchar")).isEqualTo(""); assertThat(before.get("cvarchar")).isEqualTo("");
Assertions.assertThat(before.get("cbox")).isEqualTo(new byte[0]); assertThat(before.get("cbox")).isEqualTo(new byte[0]);
Assertions.assertThat(before.get("ccircle")).isEqualTo(new byte[0]); assertThat(before.get("ccircle")).isEqualTo(new byte[0]);
Assertions.assertThat(before.get("cinterval")).isEqualTo(0L); assertThat(before.get("cinterval")).isEqualTo(0L);
Assertions.assertThat(before.get("cline")).isEqualTo(new byte[0]); assertThat(before.get("cline")).isEqualTo(new byte[0]);
Assertions.assertThat(before.get("clseg")).isEqualTo(new byte[0]); assertThat(before.get("clseg")).isEqualTo(new byte[0]);
Assertions.assertThat(before.get("cpath")).isEqualTo(new byte[0]); assertThat(before.get("cpath")).isEqualTo(new byte[0]);
Assertions.assertThat(before.get("cpoint")).isEqualTo(Point.createValue(Point.builder().build(), 0, 0)); assertThat(before.get("cpoint")).isEqualTo(Point.createValue(Point.builder().build(), 0, 0));
Assertions.assertThat(before.get("cpolygon")).isEqualTo(new byte[0]); assertThat(before.get("cpolygon")).isEqualTo(new byte[0]);
Assertions.assertThat(before.get("cchar")).isEqualTo(""); assertThat(before.get("cchar")).isEqualTo("");
Assertions.assertThat(before.get("ctext")).isEqualTo(""); assertThat(before.get("ctext")).isEqualTo("");
Assertions.assertThat(before.get("cjson")).isEqualTo(""); assertThat(before.get("cjson")).isEqualTo("");
Assertions.assertThat(before.get("cxml")).isEqualTo(""); assertThat(before.get("cxml")).isEqualTo("");
Assertions.assertThat(before.get("cuuid")).isEqualTo(""); assertThat(before.get("cuuid")).isEqualTo("");
Assertions.assertThat(before.get("cvarbit")).isEqualTo(new byte[0]); assertThat(before.get("cvarbit")).isEqualTo(new byte[0]);
Assertions.assertThat(before.get("cinet")).isEqualTo(""); assertThat(before.get("cinet")).isEqualTo("");
Assertions.assertThat(before.get("ccidr")).isEqualTo(""); assertThat(before.get("ccidr")).isEqualTo("");
Assertions.assertThat(before.get("cmacaddr")).isEqualTo(""); assertThat(before.get("cmacaddr")).isEqualTo("");
} }
} }
@ -1699,7 +1698,7 @@ public void testEmptyChangesProducesHeartbeat() throws Exception {
if (record == null) { if (record == null) {
return false; return false;
} }
Assertions.assertThat(record.valueSchema().name()).endsWith(".Heartbeat"); assertThat(record.valueSchema().name()).endsWith(".Heartbeat");
lsns.add((Long) record.sourceOffset().get("lsn")); lsns.add((Long) record.sourceOffset().get("lsn"));
return true; return true;
}); });
@ -1712,7 +1711,7 @@ public void testEmptyChangesProducesHeartbeat() throws Exception {
// Expecting changes for the empty DDL change // Expecting changes for the empty DDL change
Awaitility.await().atMost(TestHelper.waitTimeForRecords() * 10, TimeUnit.SECONDS).until(() -> { Awaitility.await().atMost(TestHelper.waitTimeForRecords() * 10, TimeUnit.SECONDS).until(() -> {
final SourceRecord record = consumeRecord(); final SourceRecord record = consumeRecord();
Assertions.assertThat(record.valueSchema().name()).endsWith(".Heartbeat"); assertThat(record.valueSchema().name()).endsWith(".Heartbeat");
lsns.add((Long) record.sourceOffset().get("lsn")); lsns.add((Long) record.sourceOffset().get("lsn"));
// CREATE SCHEMA should change LSN // CREATE SCHEMA should change LSN
return lsns.size() == 2; return lsns.size() == 2;
@ -1925,10 +1924,10 @@ public void shouldStartConsumingFromSlotLocation() throws Exception {
// After loss of offset and not doing snapshot we always stream the first record available in replication slot // After loss of offset and not doing snapshot we always stream the first record available in replication slot
// even if we have seen it as it is not possible to make a difference from plain snapshot never mode // even if we have seen it as it is not possible to make a difference from plain snapshot never mode
Assertions.assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("insert2"); assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("insert2");
Assertions.assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("insert3"); assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("insert3");
Assertions.assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("insert4"); assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("insert4");
stopConnector(); stopConnector();
} }

View File

@ -6,12 +6,13 @@
package io.debezium.connector.postgresql; package io.debezium.connector.postgresql;
import static org.assertj.core.api.Assertions.assertThat;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -72,8 +73,8 @@ public void signalLog() throws InterruptedException {
TestHelper.execute("INSERT INTO s1.debezium_signal VALUES('1', 'log', '{\"message\": \"Signal message at offset ''{}''\"}')"); TestHelper.execute("INSERT INTO s1.debezium_signal VALUES('1', 'log', '{\"message\": \"Signal message at offset ''{}''\"}')");
final SourceRecords records = consumeRecordsByTopic(2); final SourceRecords records = consumeRecordsByTopic(2);
Assertions.assertThat(records.allRecordsInOrder()).hasSize(2); assertThat(records.allRecordsInOrder()).hasSize(2);
Assertions.assertThat(logInterceptor.containsMessage("Received signal")).isTrue(); assertThat(logInterceptor.containsMessage("Received signal")).isTrue();
} }
@Test @Test
@ -102,8 +103,8 @@ public void signalingDisabled() throws InterruptedException {
TestHelper.execute(INSERT_STMT); TestHelper.execute(INSERT_STMT);
final SourceRecords records = consumeRecordsByTopic(2); final SourceRecords records = consumeRecordsByTopic(2);
Assertions.assertThat(records.allRecordsInOrder()).hasSize(2); assertThat(records.allRecordsInOrder()).hasSize(2);
Assertions.assertThat(logInterceptor.containsMessage("Received signal")).isFalse(); assertThat(logInterceptor.containsMessage("Received signal")).isFalse();
} }
@Test @Test
@ -168,16 +169,16 @@ public void signalSchemaChange() throws InterruptedException {
TestHelper.execute(INSERT_STMT); TestHelper.execute(INSERT_STMT);
final SourceRecords records = consumeRecordsByTopic(3); final SourceRecords records = consumeRecordsByTopic(3);
Assertions.assertThat(records.allRecordsInOrder()).hasSize(3); assertThat(records.allRecordsInOrder()).hasSize(3);
final SourceRecord pre = records.allRecordsInOrder().get(0); final SourceRecord pre = records.allRecordsInOrder().get(0);
final SourceRecord post = records.allRecordsInOrder().get(2); final SourceRecord post = records.allRecordsInOrder().get(2);
Assertions.assertThat(((Struct) pre.key()).schema().fields()).hasSize(1); assertThat(((Struct) pre.key()).schema().fields()).hasSize(1);
final Struct postKey = (Struct) post.key(); final Struct postKey = (Struct) post.key();
Assertions.assertThat(postKey.schema().fields()).hasSize(2); assertThat(postKey.schema().fields()).hasSize(2);
Assertions.assertThat(postKey.schema().field("pk")).isNotNull(); assertThat(postKey.schema().field("pk")).isNotNull();
Assertions.assertThat(postKey.schema().field("aa")).isNotNull(); assertThat(postKey.schema().field("aa")).isNotNull();
} }
} }

View File

@ -6,13 +6,14 @@
package io.debezium.connector.postgresql; package io.debezium.connector.postgresql;
import static org.assertj.core.api.Assertions.assertThat;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -61,8 +62,8 @@ public void shouldUseOverriddenSelectStatementDuringSnapshotting() throws Except
consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS); consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer); final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer);
Assertions.assertThat(recordsByTopic.get("test_server.over.t1")).hasSize(3); assertThat(recordsByTopic.get("test_server.over.t1")).hasSize(3);
Assertions.assertThat(recordsByTopic.get("test_server.over.t2")).hasSize(6); assertThat(recordsByTopic.get("test_server.over.t2")).hasSize(6);
} }
@Test @Test
@ -80,8 +81,8 @@ public void shouldUseMultipleOverriddenSelectStatementsDuringSnapshotting() thro
consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS); consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer); final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer);
Assertions.assertThat(recordsByTopic.get("test_server.over.t1")).hasSize(2); assertThat(recordsByTopic.get("test_server.over.t1")).hasSize(2);
Assertions.assertThat(recordsByTopic.get("test_server.over.t2")).hasSize(3); assertThat(recordsByTopic.get("test_server.over.t2")).hasSize(3);
} }
private void buildProducer(Configuration.Builder config) { private void buildProducer(Configuration.Builder config) {

View File

@ -6,6 +6,8 @@
package io.debezium.connector.postgresql; package io.debezium.connector.postgresql;
import static org.assertj.core.api.Assertions.assertThat;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -13,7 +15,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -54,11 +55,11 @@ public void shouldProcessFromSnapshot() throws Exception {
TestConsumer consumer = testConsumer(expectedRecordsCount, "nopk"); TestConsumer consumer = testConsumer(expectedRecordsCount, "nopk");
consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS); consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer); final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().field("pk")).isNotNull(); assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().fields()).hasSize(1); assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().field("pk")).isNotNull(); assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().fields()).hasSize(1); assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t3").get(0).keySchema()).isNull(); assertThat(recordsByTopic.get("test_server.nopk.t3").get(0).keySchema()).isNull();
} }
@Test @Test
@ -76,11 +77,11 @@ public void shouldProcessFromSnapshotOld() throws Exception {
TestConsumer consumer = testConsumer(expectedRecordsCount, "nopk"); TestConsumer consumer = testConsumer(expectedRecordsCount, "nopk");
consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS); consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer); final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().field("pk")).isNotNull(); assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().fields()).hasSize(1); assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().field("pk")).isNotNull(); assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().fields()).hasSize(1); assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t3").get(0).keySchema()).isNull(); assertThat(recordsByTopic.get("test_server.nopk.t3").get(0).keySchema()).isNull();
} }
@Test @Test
@ -100,11 +101,11 @@ public void shouldProcessFromStreaming() throws Exception {
TestConsumer consumer = testConsumer(expectedRecordsCount, "nopk"); TestConsumer consumer = testConsumer(expectedRecordsCount, "nopk");
consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS); consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer); final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().field("pk")).isNotNull(); assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().fields()).hasSize(1); assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().field("pk")).isNotNull(); assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().fields()).hasSize(1); assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t3").get(0).keySchema()).isNull(); assertThat(recordsByTopic.get("test_server.nopk.t3").get(0).keySchema()).isNull();
TestHelper.execute("UPDATE nopk.t3 SET val = 300 WHERE pk = 3;"); TestHelper.execute("UPDATE nopk.t3 SET val = 300 WHERE pk = 3;");
TestHelper.execute("DELETE FROM nopk.t3;"); TestHelper.execute("DELETE FROM nopk.t3;");
@ -113,13 +114,13 @@ public void shouldProcessFromStreaming() throws Exception {
final Map<String, List<SourceRecord>> recordsByTopic2 = recordsByTopic(2, consumer); final Map<String, List<SourceRecord>> recordsByTopic2 = recordsByTopic(2, consumer);
final SourceRecord update = recordsByTopic2.get("test_server.nopk.t3").get(0); final SourceRecord update = recordsByTopic2.get("test_server.nopk.t3").get(0);
final SourceRecord delete = recordsByTopic2.get("test_server.nopk.t3").get(1); final SourceRecord delete = recordsByTopic2.get("test_server.nopk.t3").get(1);
Assertions.assertThat(update.keySchema()).isNull(); assertThat(update.keySchema()).isNull();
Assertions.assertThat(delete.keySchema()).isNull(); assertThat(delete.keySchema()).isNull();
Assertions.assertThat(((Struct) update.value()).getStruct("before").get("val")).isEqualTo(30); assertThat(((Struct) update.value()).getStruct("before").get("val")).isEqualTo(30);
Assertions.assertThat(((Struct) update.value()).getStruct("after").get("val")).isEqualTo(300); assertThat(((Struct) update.value()).getStruct("after").get("val")).isEqualTo(300);
Assertions.assertThat(((Struct) delete.value()).getStruct("before").get("val")).isEqualTo(300); assertThat(((Struct) delete.value()).getStruct("before").get("val")).isEqualTo(300);
} }
@Test @Test
@ -138,10 +139,10 @@ public void shouldProcessFromStreamingOld() throws Exception {
TestConsumer consumer = testConsumer(expectedRecordsCount, "nopk"); TestConsumer consumer = testConsumer(expectedRecordsCount, "nopk");
consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS); consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer); final Map<String, List<SourceRecord>> recordsByTopic = recordsByTopic(expectedRecordsCount, consumer);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().field("pk")).isNotNull(); assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().fields()).hasSize(1); assertThat(recordsByTopic.get("test_server.nopk.t1").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().field("pk")).isNotNull(); assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().fields()).hasSize(1); assertThat(recordsByTopic.get("test_server.nopk.t2").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(recordsByTopic.get("test_server.nopk.t3").get(0).keySchema()).isNull(); assertThat(recordsByTopic.get("test_server.nopk.t3").get(0).keySchema()).isNull();
} }
} }

View File

@ -6,6 +6,8 @@
package io.debezium.connector.postgresql; package io.debezium.connector.postgresql;
import static org.assertj.core.api.Assertions.assertThat;
import java.sql.SQLException; import java.sql.SQLException;
import java.time.Duration; import java.time.Duration;
import java.util.ArrayList; import java.util.ArrayList;
@ -17,7 +19,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -94,7 +95,7 @@ public void transactionMetadata() throws InterruptedException {
final List<SourceRecord> records = new ArrayList<>(); final List<SourceRecord> records = new ArrayList<>();
// Database sometimes insert an empty transaction, we must skip those // Database sometimes insert an empty transaction, we must skip those
Awaitility.await("Skip empty transactions and find the data").atMost(Duration.ofSeconds(TestHelper.waitTimeForRecords() * 3)).until(() -> { Awaitility.await("Skip empty transactions and find the data").atMost(Duration.ofSeconds(TestHelper.waitTimeForRecords() * 3L)).until(() -> {
final List<SourceRecord> candidate = consumeRecordsByTopic(2).allRecordsInOrder(); final List<SourceRecord> candidate = consumeRecordsByTopic(2).allRecordsInOrder();
if (candidate.get(1).topic().contains("transaction")) { if (candidate.get(1).topic().contains("transaction")) {
// empty transaction, should be skipped // empty transaction, should be skipped
@ -105,7 +106,7 @@ public void transactionMetadata() throws InterruptedException {
return true; return true;
}); });
Assertions.assertThat(records).hasSize(4); assertThat(records).hasSize(4);
final String beginTxId = assertBeginTransaction(records.get(0)); final String beginTxId = assertBeginTransaction(records.get(0));
assertRecordTransactionMetadata(records.get(1), beginTxId, 1, 1); assertRecordTransactionMetadata(records.get(1), beginTxId, 1, 1);
assertRecordTransactionMetadata(records.get(2), beginTxId, 2, 1); assertRecordTransactionMetadata(records.get(2), beginTxId, 2, 1);
@ -118,13 +119,13 @@ protected String assertBeginTransaction(SourceRecord record) {
final Struct beginKey = (Struct) record.key(); final Struct beginKey = (Struct) record.key();
final Map<String, Object> offset = (Map<String, Object>) record.sourceOffset(); final Map<String, Object> offset = (Map<String, Object>) record.sourceOffset();
Assertions.assertThat(begin.getString("status")).isEqualTo("BEGIN"); assertThat(begin.getString("status")).isEqualTo("BEGIN");
Assertions.assertThat(begin.getInt64("event_count")).isNull(); assertThat(begin.getInt64("event_count")).isNull();
final String txId = begin.getString("id"); final String txId = begin.getString("id");
Assertions.assertThat(beginKey.getString("id")).isEqualTo(txId); assertThat(beginKey.getString("id")).isEqualTo(txId);
final String expectedId = Arrays.stream(txId.split(":")).findFirst().get(); final String expectedId = Arrays.stream(txId.split(":")).findFirst().get();
Assertions.assertThat(offset.get("transaction_id")).isEqualTo(expectedId); assertThat(offset.get("transaction_id")).isEqualTo(expectedId);
return txId; return txId;
} }
@ -136,16 +137,15 @@ protected void assertEndTransaction(SourceRecord record, String beginTxId, long
final String expectedId = Arrays.stream(beginTxId.split(":")).findFirst().get(); final String expectedId = Arrays.stream(beginTxId.split(":")).findFirst().get();
final String expectedTxId = String.format("%s:%s", expectedId, offset.get("lsn")); final String expectedTxId = String.format("%s:%s", expectedId, offset.get("lsn"));
Assertions.assertThat(end.getString("status")).isEqualTo("END"); assertThat(end.getString("status")).isEqualTo("END");
Assertions.assertThat(end.getString("id")).isEqualTo(expectedTxId); assertThat(end.getString("id")).isEqualTo(expectedTxId);
Assertions.assertThat(end.getInt64("event_count")).isEqualTo(expectedEventCount); assertThat(end.getInt64("event_count")).isEqualTo(expectedEventCount);
Assertions.assertThat(endKey.getString("id")).isEqualTo(expectedTxId); assertThat(endKey.getString("id")).isEqualTo(expectedTxId);
Assertions assertThat(end.getArray("data_collections").stream().map(x -> (Struct) x)
.assertThat(end.getArray("data_collections").stream().map(x -> (Struct) x)
.collect(Collectors.toMap(x -> x.getString("data_collection"), x -> x.getInt64("event_count")))) .collect(Collectors.toMap(x -> x.getString("data_collection"), x -> x.getInt64("event_count"))))
.isEqualTo(expectedPerTableCount.entrySet().stream().collect(Collectors.toMap(x -> x.getKey(), x -> x.getValue().longValue()))); .isEqualTo(expectedPerTableCount.entrySet().stream().collect(Collectors.toMap(x -> x.getKey(), x -> x.getValue().longValue())));
Assertions.assertThat(offset.get("transaction_id")).isEqualTo(expectedId); assertThat(offset.get("transaction_id")).isEqualTo(expectedId);
} }
@Override @Override
@ -155,9 +155,9 @@ protected void assertRecordTransactionMetadata(SourceRecord record, String begin
final String expectedId = Arrays.stream(beginTxId.split(":")).findFirst().get(); final String expectedId = Arrays.stream(beginTxId.split(":")).findFirst().get();
final String expectedTxId = String.format("%s:%s", expectedId, offset.get("lsn")); final String expectedTxId = String.format("%s:%s", expectedId, offset.get("lsn"));
Assertions.assertThat(change.getString("id")).isEqualTo(expectedTxId); assertThat(change.getString("id")).isEqualTo(expectedTxId);
Assertions.assertThat(change.getInt64("total_order")).isEqualTo(expectedTotalOrder); assertThat(change.getInt64("total_order")).isEqualTo(expectedTotalOrder);
Assertions.assertThat(change.getInt64("data_collection_order")).isEqualTo(expectedCollectionOrder); assertThat(change.getInt64("data_collection_order")).isEqualTo(expectedCollectionOrder);
Assertions.assertThat(offset.get("transaction_id")).isEqualTo(expectedId); assertThat(offset.get("transaction_id")).isEqualTo(expectedId);
} }
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.connector.postgresql.connection; package io.debezium.connector.postgresql.connection;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration; import java.time.Duration;
import java.time.LocalDate; import java.time.LocalDate;
import java.time.LocalTime; import java.time.LocalTime;
@ -15,7 +17,6 @@
import java.time.format.TextStyle; import java.time.format.TextStyle;
import java.util.Locale; import java.util.Locale;
import org.assertj.core.api.Assertions;
import org.junit.Test; import org.junit.Test;
public class ISODateTimeFormatTest { public class ISODateTimeFormatTest {
@ -24,74 +25,74 @@ public class ISODateTimeFormatTest {
@Test @Test
public void testTimestampToInstant() { public void testTimestampToInstant() {
ZoneOffset offset = ZoneOffset.UTC; ZoneOffset offset = ZoneOffset.UTC;
Assertions.assertThat(DateTimeFormat.get().timestampToInstant("2016-11-04 13:51:30")) assertThat(DateTimeFormat.get().timestampToInstant("2016-11-04 13:51:30"))
.isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 0, offset).toInstant()); .isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 0, offset).toInstant());
Assertions.assertThat(DateTimeFormat.get().timestampToInstant("2016-11-04 13:51:30.123")) assertThat(DateTimeFormat.get().timestampToInstant("2016-11-04 13:51:30.123"))
.isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_000_000, offset).toInstant()); .isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_000_000, offset).toInstant());
Assertions.assertThat(DateTimeFormat.get().timestampToInstant("2016-11-04 13:51:30.123000")) assertThat(DateTimeFormat.get().timestampToInstant("2016-11-04 13:51:30.123000"))
.isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_000_000, offset).toInstant()); .isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_000_000, offset).toInstant());
Assertions.assertThat(DateTimeFormat.get().timestampToInstant("2016-11-04 13:51:30.123456")) assertThat(DateTimeFormat.get().timestampToInstant("2016-11-04 13:51:30.123456"))
.isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_456_000, offset).toInstant()); .isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_456_000, offset).toInstant());
Assertions.assertThat(DateTimeFormat.get().timestampToInstant("2016-11-04 13:51:30.123456")) assertThat(DateTimeFormat.get().timestampToInstant("2016-11-04 13:51:30.123456"))
.isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_456_000, offset).toInstant()); .isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_456_000, offset).toInstant());
Assertions.assertThat(DateTimeFormat.get().timestampToInstant("0002-12-01 17:00:00 " + BCE_DISPLAY_NAME)) assertThat(DateTimeFormat.get().timestampToInstant("0002-12-01 17:00:00 " + BCE_DISPLAY_NAME))
.isEqualTo(OffsetDateTime.of(-1, 12, 1, 17, 0, 0, 0, offset).toInstant()); .isEqualTo(OffsetDateTime.of(-1, 12, 1, 17, 0, 0, 0, offset).toInstant());
Assertions.assertThat(DateTimeFormat.get().timestampToInstant("20160-11-04 13:51:30.123456")) assertThat(DateTimeFormat.get().timestampToInstant("20160-11-04 13:51:30.123456"))
.isEqualTo(OffsetDateTime.of(20160, 11, 4, 13, 51, 30, 123_456_000, offset).toInstant()); .isEqualTo(OffsetDateTime.of(20160, 11, 4, 13, 51, 30, 123_456_000, offset).toInstant());
} }
@Test @Test
public void testTimestampWithTimeZoneToOffsetTime() { public void testTimestampWithTimeZoneToOffsetTime() {
Assertions.assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30+02")) assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30+02"))
.isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 0, ZoneOffset.ofHours(2))); .isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 0, ZoneOffset.ofHours(2)));
Assertions.assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30.123+02")) assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30.123+02"))
.isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_000_000, ZoneOffset.ofHours(2))); .isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_000_000, ZoneOffset.ofHours(2)));
Assertions.assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30.123000+02")) assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30.123000+02"))
.isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_000_000, ZoneOffset.ofHours(2))); .isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_000_000, ZoneOffset.ofHours(2)));
Assertions.assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30.123789+02")) assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30.123789+02"))
.isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_789_000, ZoneOffset.ofHours(2))); .isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_789_000, ZoneOffset.ofHours(2)));
Assertions.assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30.123789+02:30")) assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30.123789+02:30"))
.isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_789_000, ZoneOffset.ofHoursMinutes(2, 30))); .isEqualTo(OffsetDateTime.of(2016, 11, 4, 13, 51, 30, 123_789_000, ZoneOffset.ofHoursMinutes(2, 30)));
Assertions.assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30.123789+02:30 " + BCE_DISPLAY_NAME)) assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("2016-11-04 13:51:30.123789+02:30 " + BCE_DISPLAY_NAME))
.isEqualTo(OffsetDateTime.of(-2015, 11, 4, 13, 51, 30, 123_789_000, ZoneOffset.ofHoursMinutes(2, 30))); .isEqualTo(OffsetDateTime.of(-2015, 11, 4, 13, 51, 30, 123_789_000, ZoneOffset.ofHoursMinutes(2, 30)));
Assertions.assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("20160-11-04 13:51:30.123789+02:30 " + BCE_DISPLAY_NAME)) assertThat(DateTimeFormat.get().timestampWithTimeZoneToOffsetDateTime("20160-11-04 13:51:30.123789+02:30 " + BCE_DISPLAY_NAME))
.isEqualTo(OffsetDateTime.of(-20159, 11, 4, 13, 51, 30, 123_789_000, ZoneOffset.ofHoursMinutes(2, 30))); .isEqualTo(OffsetDateTime.of(-20159, 11, 4, 13, 51, 30, 123_789_000, ZoneOffset.ofHoursMinutes(2, 30)));
} }
@Test @Test
public void testDate() { public void testDate() {
Assertions.assertThat(DateTimeFormat.get().date("2016-11-04")).isEqualTo(LocalDate.of(2016, 11, 4)); assertThat(DateTimeFormat.get().date("2016-11-04")).isEqualTo(LocalDate.of(2016, 11, 4));
Assertions.assertThat(DateTimeFormat.get().date("2016-11-04 " + BCE_DISPLAY_NAME)).isEqualTo(LocalDate.of(-2015, 11, 4)); assertThat(DateTimeFormat.get().date("2016-11-04 " + BCE_DISPLAY_NAME)).isEqualTo(LocalDate.of(-2015, 11, 4));
Assertions.assertThat(DateTimeFormat.get().date("20160-11-04")).isEqualTo(LocalDate.of(20160, 11, 4)); assertThat(DateTimeFormat.get().date("20160-11-04")).isEqualTo(LocalDate.of(20160, 11, 4));
Assertions.assertThat(DateTimeFormat.get().date("20160-11-04 " + BCE_DISPLAY_NAME)).isEqualTo(LocalDate.of(-20159, 11, 4)); assertThat(DateTimeFormat.get().date("20160-11-04 " + BCE_DISPLAY_NAME)).isEqualTo(LocalDate.of(-20159, 11, 4));
Assertions.assertThat(DateTimeFormat.get().date("12345678-11-04")).isEqualTo(LocalDate.of(12345678, 11, 4)); assertThat(DateTimeFormat.get().date("12345678-11-04")).isEqualTo(LocalDate.of(12345678, 11, 4));
} }
@Test @Test
public void testTime() { public void testTime() {
Assertions.assertThat(DateTimeFormat.get().time("13:51:30")).isEqualTo(LocalTime.of(13, 51, 30)); assertThat(DateTimeFormat.get().time("13:51:30")).isEqualTo(LocalTime.of(13, 51, 30));
} }
@Test @Test
public void testTimeWithTimeZone() { public void testTimeWithTimeZone() {
Assertions.assertThat(DateTimeFormat.get().timeWithTimeZone("13:51:30+02")).isEqualTo(OffsetTime.of(11, 51, 30, 0, ZoneOffset.UTC)); assertThat(DateTimeFormat.get().timeWithTimeZone("13:51:30+02")).isEqualTo(OffsetTime.of(11, 51, 30, 0, ZoneOffset.UTC));
} }
@Test @Test
public void testSystemTimestampToInstant() { public void testSystemTimestampToInstant() {
Assertions.assertThat(DateTimeFormat.get().systemTimestampToInstant("2017-10-17 13:51:30Z")) assertThat(DateTimeFormat.get().systemTimestampToInstant("2017-10-17 13:51:30Z"))
.isEqualTo(OffsetDateTime.of(2017, 10, 17, 13, 51, 30, 0, ZoneOffset.UTC).toInstant()); .isEqualTo(OffsetDateTime.of(2017, 10, 17, 13, 51, 30, 0, ZoneOffset.UTC).toInstant());
Assertions.assertThat(DateTimeFormat.get().systemTimestampToInstant("2017-10-17 13:51:30.000Z")) assertThat(DateTimeFormat.get().systemTimestampToInstant("2017-10-17 13:51:30.000Z"))
.isEqualTo(OffsetDateTime.of(2017, 10, 17, 13, 51, 30, 0, ZoneOffset.UTC).toInstant()); .isEqualTo(OffsetDateTime.of(2017, 10, 17, 13, 51, 30, 0, ZoneOffset.UTC).toInstant());
Assertions.assertThat(DateTimeFormat.get().systemTimestampToInstant("2017-10-17 13:51:30.456Z")) assertThat(DateTimeFormat.get().systemTimestampToInstant("2017-10-17 13:51:30.456Z"))
.isEqualTo(OffsetDateTime.of(2017, 10, 17, 13, 51, 30, Duration.ofMillis(456).getNano(), ZoneOffset.UTC).toInstant()); .isEqualTo(OffsetDateTime.of(2017, 10, 17, 13, 51, 30, Duration.ofMillis(456).getNano(), ZoneOffset.UTC).toInstant());
Assertions.assertThat(DateTimeFormat.get().systemTimestampToInstant("2017-10-17 13:51:30.345123Z")) assertThat(DateTimeFormat.get().systemTimestampToInstant("2017-10-17 13:51:30.345123Z"))
.isEqualTo(OffsetDateTime.of(2017, 10, 17, 13, 51, 30, 345_123_000, ZoneOffset.UTC).toInstant()); .isEqualTo(OffsetDateTime.of(2017, 10, 17, 13, 51, 30, 345_123_000, ZoneOffset.UTC).toInstant());
Assertions.assertThat(DateTimeFormat.get().systemTimestampToInstant("2018-03-22 12:30:56.824452+05:30")) assertThat(DateTimeFormat.get().systemTimestampToInstant("2018-03-22 12:30:56.824452+05:30"))
.isEqualTo(OffsetDateTime.of(2018, 3, 22, 12, 30, 56, 824_452_000, ZoneOffset.ofHoursMinutes(5, 30)).toInstant()); .isEqualTo(OffsetDateTime.of(2018, 3, 22, 12, 30, 56, 824_452_000, ZoneOffset.ofHoursMinutes(5, 30)).toInstant());
Assertions.assertThat(DateTimeFormat.get().systemTimestampToInstant("2018-03-22 12:30:56.824452+05")) assertThat(DateTimeFormat.get().systemTimestampToInstant("2018-03-22 12:30:56.824452+05"))
.isEqualTo(OffsetDateTime.of(2018, 3, 22, 12, 30, 56, 824_452_000, ZoneOffset.ofHours(5)).toInstant()); .isEqualTo(OffsetDateTime.of(2018, 3, 22, 12, 30, 56, 824_452_000, ZoneOffset.ofHours(5)).toInstant());
Assertions.assertThat(DateTimeFormat.get().systemTimestampToInstant("20180-03-22 12:30:56.824452+05")) assertThat(DateTimeFormat.get().systemTimestampToInstant("20180-03-22 12:30:56.824452+05"))
.isEqualTo(OffsetDateTime.of(20180, 3, 22, 12, 30, 56, 824_452_000, ZoneOffset.ofHours(5)).toInstant()); .isEqualTo(OffsetDateTime.of(20180, 3, 22, 12, 30, 56, 824_452_000, ZoneOffset.ofHours(5)).toInstant());
} }
} }

View File

@ -5,13 +5,14 @@
*/ */
package io.debezium.connector.sqlserver; package io.debezium.connector.sqlserver;
import static org.assertj.core.api.Assertions.assertThat;
import java.sql.SQLException; import java.sql.SQLException;
import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -78,7 +79,7 @@ private void testDatabase() throws Exception {
assertConnectorIsRunning(); assertConnectorIsRunning();
SourceRecords records = consumeRecordsByTopic(1); SourceRecords records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.MyTableOne")).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.MyTableOne")).hasSize(1);
SourceRecord record = records.recordsForTopic("server1.testDB1.dbo.MyTableOne").get(0); SourceRecord record = records.recordsForTopic("server1.testDB1.dbo.MyTableOne").get(0);
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -94,11 +95,11 @@ private void testDatabase() throws Exception {
.name("server1.testDB1.dbo.MyTableOne.Key") .name("server1.testDB1.dbo.MyTableOne.Key")
.field("Id", Schema.INT32_SCHEMA) .field("Id", Schema.INT32_SCHEMA)
.build()); .build());
Assertions.assertThat(((Struct) ((Struct) record.value()).get("after")).getInt32("Id")).isEqualTo(1); assertThat(((Struct) ((Struct) record.value()).get("after")).getInt32("Id")).isEqualTo(1);
connection.execute("INSERT INTO MyTableOne VALUES(2, 'b')"); connection.execute("INSERT INTO MyTableOne VALUES(2, 'b')");
records = consumeRecordsByTopic(1); records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.MyTableOne")).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.MyTableOne")).hasSize(1);
record = records.recordsForTopic("server1.testDB1.dbo.MyTableOne").get(0); record = records.recordsForTopic("server1.testDB1.dbo.MyTableOne").get(0);
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -114,14 +115,14 @@ record = records.recordsForTopic("server1.testDB1.dbo.MyTableOne").get(0);
.name("server1.testDB1.dbo.MyTableOne.Key") .name("server1.testDB1.dbo.MyTableOne.Key")
.field("Id", Schema.INT32_SCHEMA) .field("Id", Schema.INT32_SCHEMA)
.build()); .build());
Assertions.assertThat(((Struct) ((Struct) record.value()).get("after")).getInt32("Id")).isEqualTo(2); assertThat(((Struct) ((Struct) record.value()).get("after")).getInt32("Id")).isEqualTo(2);
connection.execute( connection.execute(
"CREATE TABLE MyTableTwo (Id int primary key, ColB varchar(30))"); "CREATE TABLE MyTableTwo (Id int primary key, ColB varchar(30))");
TestHelper.enableTableCdc(connection, "MyTableTwo"); TestHelper.enableTableCdc(connection, "MyTableTwo");
connection.execute("INSERT INTO MyTableTwo VALUES(3, 'b')"); connection.execute("INSERT INTO MyTableTwo VALUES(3, 'b')");
records = consumeRecordsByTopic(1); records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.MyTableTwo")).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.MyTableTwo")).hasSize(1);
record = records.recordsForTopic("server1.testDB1.dbo.MyTableTwo").get(0); record = records.recordsForTopic("server1.testDB1.dbo.MyTableTwo").get(0);
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -137,6 +138,6 @@ record = records.recordsForTopic("server1.testDB1.dbo.MyTableTwo").get(0);
.name("server1.testDB1.dbo.MyTableTwo.Key") .name("server1.testDB1.dbo.MyTableTwo.Key")
.field("Id", Schema.INT32_SCHEMA) .field("Id", Schema.INT32_SCHEMA)
.build()); .build());
Assertions.assertThat(((Struct) ((Struct) record.value()).get("after")).getInt32("Id")).isEqualTo(3); assertThat(((Struct) ((Struct) record.value()).get("after")).getInt32("Id")).isEqualTo(3);
} }
} }

View File

@ -5,10 +5,11 @@
*/ */
package io.debezium.connector.sqlserver; package io.debezium.connector.sqlserver;
import static org.assertj.core.api.Assertions.assertThat;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -74,8 +75,8 @@ public void warn() throws Exception {
connection.execute("INSERT INTO tablea VALUES (1, 'seed')"); connection.execute("INSERT INTO tablea VALUES (1, 'seed')");
SourceRecords records = consumeRecordsByTopic(1); SourceRecords records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNull();
// Will allow insertion of strings into what was originally a BIGINT NOT NULL column // Will allow insertion of strings into what was originally a BIGINT NOT NULL column
// This will cause NumberFormatExceptions which return nulls and thus an error due to the column being NOT NULL // This will cause NumberFormatExceptions which return nulls and thus an error due to the column being NOT NULL
@ -90,8 +91,8 @@ public void warn() throws Exception {
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE); records = consumeRecordsByTopic(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNull();
Awaitility.await() Awaitility.await()
.alias("Found warning message in logs") .alias("Found warning message in logs")
@ -116,8 +117,8 @@ public void ignore() throws Exception {
connection.execute("INSERT INTO tablea VALUES (1, 'seed')"); connection.execute("INSERT INTO tablea VALUES (1, 'seed')");
SourceRecords records = consumeRecordsByTopic(1); SourceRecords records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNull();
// Will allow insertion of strings into what was originally a BIGINT NOT NULL column // Will allow insertion of strings into what was originally a BIGINT NOT NULL column
// This will cause NumberFormatExceptions which return nulls and thus an error due to the column being NOT NULL // This will cause NumberFormatExceptions which return nulls and thus an error due to the column being NOT NULL
@ -132,8 +133,8 @@ public void ignore() throws Exception {
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE); records = consumeRecordsByTopic(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNull();
} }
@Test @Test
@ -152,8 +153,8 @@ public void fail() throws Exception {
connection.execute("INSERT INTO tablea VALUES (1, 'seed')"); connection.execute("INSERT INTO tablea VALUES (1, 'seed')");
SourceRecords records = consumeRecordsByTopic(1); SourceRecords records = consumeRecordsByTopic(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNull();
// Will allow insertion of strings into what was originally a BIGINT NOT NULL column // Will allow insertion of strings into what was originally a BIGINT NOT NULL column
// This will cause NumberFormatExceptions which return nulls and thus an error due to the column being NOT NULL // This will cause NumberFormatExceptions which return nulls and thus an error due to the column being NOT NULL

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.connector.sqlserver; package io.debezium.connector.sqlserver;
import static org.assertj.core.api.Assertions.assertThat;
import java.math.BigDecimal; import java.math.BigDecimal;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List; import java.util.List;
@ -13,7 +15,6 @@
import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -89,13 +90,13 @@ public void decimalModeConfigString() throws Exception {
connection.execute("INSERT INTO tablenuma VALUES (111.1111, 1111111, 1111111.1, 1111111 );"); connection.execute("INSERT INTO tablenuma VALUES (111.1111, 1111111, 1111111.1, 1111111 );");
final SourceRecords records = consumeRecordsByTopic(1); final SourceRecords records = consumeRecordsByTopic(1);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablenuma"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablenuma");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
final Struct valueA = (Struct) tableA.get(0).value(); final Struct valueA = (Struct) tableA.get(0).value();
assertSchema(valueA, Schema.OPTIONAL_STRING_SCHEMA); assertSchema(valueA, Schema.OPTIONAL_STRING_SCHEMA);
Assertions.assertThat(((Struct) valueA.get("after")).get("cola")).isEqualTo("111.1111"); assertThat(((Struct) valueA.get("after")).get("cola")).isEqualTo("111.1111");
Assertions.assertThat(((Struct) valueA.get("after")).get("colb")).isEqualTo("1111111"); assertThat(((Struct) valueA.get("after")).get("colb")).isEqualTo("1111111");
Assertions.assertThat(((Struct) valueA.get("after")).get("colc")).isEqualTo("1111111.1"); assertThat(((Struct) valueA.get("after")).get("colc")).isEqualTo("1111111.1");
Assertions.assertThat(((Struct) valueA.get("after")).get("cold")).isEqualTo("1111111"); assertThat(((Struct) valueA.get("after")).get("cold")).isEqualTo("1111111");
stopConnector(); stopConnector();
} }
@ -121,13 +122,13 @@ public void decimalModeConfigDouble() throws Exception {
connection.execute("INSERT INTO tablenumb VALUES (222.2222, 22222, 22222.2, 2222222 );"); connection.execute("INSERT INTO tablenumb VALUES (222.2222, 22222, 22222.2, 2222222 );");
final SourceRecords records = consumeRecordsByTopic(1); final SourceRecords records = consumeRecordsByTopic(1);
final List<SourceRecord> results = records.recordsForTopic("server1.testDB1.dbo.tablenumb"); final List<SourceRecord> results = records.recordsForTopic("server1.testDB1.dbo.tablenumb");
Assertions.assertThat(results).hasSize(1); assertThat(results).hasSize(1);
final Struct valueA = (Struct) results.get(0).value(); final Struct valueA = (Struct) results.get(0).value();
assertSchema(valueA, Schema.OPTIONAL_FLOAT64_SCHEMA); assertSchema(valueA, Schema.OPTIONAL_FLOAT64_SCHEMA);
Assertions.assertThat(((Struct) valueA.get("after")).get("cola")).isEqualTo(222.2222d); assertThat(((Struct) valueA.get("after")).get("cola")).isEqualTo(222.2222d);
Assertions.assertThat(((Struct) valueA.get("after")).get("colb")).isEqualTo(22222d); assertThat(((Struct) valueA.get("after")).get("colb")).isEqualTo(22222d);
Assertions.assertThat(((Struct) valueA.get("after")).get("colc")).isEqualTo(22222.2d); assertThat(((Struct) valueA.get("after")).get("colc")).isEqualTo(22222.2d);
Assertions.assertThat(((Struct) valueA.get("after")).get("cold")).isEqualTo(2222222d); assertThat(((Struct) valueA.get("after")).get("cold")).isEqualTo(2222222d);
stopConnector(); stopConnector();
} }
@ -152,27 +153,27 @@ public void decimalModeConfigPrecise() throws Exception {
connection.execute("INSERT INTO tablenumc VALUES (333.3333, 3333, 3333.3, 33333333 );"); connection.execute("INSERT INTO tablenumc VALUES (333.3333, 3333, 3333.3, 33333333 );");
final SourceRecords records = consumeRecordsByTopic(1); final SourceRecords records = consumeRecordsByTopic(1);
final List<SourceRecord> results = records.recordsForTopic("server1.testDB1.dbo.tablenumc"); final List<SourceRecord> results = records.recordsForTopic("server1.testDB1.dbo.tablenumc");
Assertions.assertThat(results).hasSize(1); assertThat(results).hasSize(1);
final Struct valueA = (Struct) results.get(0).value(); final Struct valueA = (Struct) results.get(0).value();
Assertions.assertThat(valueA.schema().field("after").schema().field("cola").schema()) assertThat(valueA.schema().field("after").schema().field("cola").schema())
.isEqualTo(Decimal.builder(4).parameter("connect.decimal.precision", "8").optional().schema()); .isEqualTo(Decimal.builder(4).parameter("connect.decimal.precision", "8").optional().schema());
Assertions.assertThat(valueA.schema().field("after").schema().field("colb").schema()) assertThat(valueA.schema().field("after").schema().field("colb").schema())
.isEqualTo(Decimal.builder(0).parameter("connect.decimal.precision", "18").optional().schema()); .isEqualTo(Decimal.builder(0).parameter("connect.decimal.precision", "18").optional().schema());
Assertions.assertThat(valueA.schema().field("after").schema().field("colc").schema()) assertThat(valueA.schema().field("after").schema().field("colc").schema())
.isEqualTo(Decimal.builder(1).parameter("connect.decimal.precision", "8").optional().schema()); .isEqualTo(Decimal.builder(1).parameter("connect.decimal.precision", "8").optional().schema());
Assertions.assertThat(valueA.schema().field("after").schema().field("cold").schema()) assertThat(valueA.schema().field("after").schema().field("cold").schema())
.isEqualTo(Decimal.builder(0).parameter("connect.decimal.precision", "18").optional().schema()); .isEqualTo(Decimal.builder(0).parameter("connect.decimal.precision", "18").optional().schema());
Assertions.assertThat(((Struct) valueA.get("after")).get("cola")).isEqualTo(BigDecimal.valueOf(333.3333)); assertThat(((Struct) valueA.get("after")).get("cola")).isEqualTo(BigDecimal.valueOf(333.3333));
Assertions.assertThat(((Struct) valueA.get("after")).get("colb")).isEqualTo(BigDecimal.valueOf(3333)); assertThat(((Struct) valueA.get("after")).get("colb")).isEqualTo(BigDecimal.valueOf(3333));
Assertions.assertThat(((Struct) valueA.get("after")).get("colc")).isEqualTo(BigDecimal.valueOf(3333.3)); assertThat(((Struct) valueA.get("after")).get("colc")).isEqualTo(BigDecimal.valueOf(3333.3));
Assertions.assertThat(((Struct) valueA.get("after")).get("cold")).isEqualTo(BigDecimal.valueOf(33333333)); assertThat(((Struct) valueA.get("after")).get("cold")).isEqualTo(BigDecimal.valueOf(33333333));
stopConnector(); stopConnector();
} }
private void assertSchema(Struct valueA, Schema expected) { private void assertSchema(Struct valueA, Schema expected) {
Assertions.assertThat(valueA.schema().field("after").schema().field("cola").schema()).isEqualTo(expected); assertThat(valueA.schema().field("after").schema().field("cola").schema()).isEqualTo(expected);
Assertions.assertThat(valueA.schema().field("after").schema().field("colb").schema()).isEqualTo(expected); assertThat(valueA.schema().field("after").schema().field("colb").schema()).isEqualTo(expected);
Assertions.assertThat(valueA.schema().field("after").schema().field("colc").schema()).isEqualTo(expected); assertThat(valueA.schema().field("after").schema().field("colc").schema()).isEqualTo(expected);
Assertions.assertThat(valueA.schema().field("after").schema().field("cold").schema()).isEqualTo(expected); assertThat(valueA.schema().field("after").schema().field("cold").schema()).isEqualTo(expected);
} }
} }

View File

@ -15,7 +15,6 @@
import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -93,30 +92,30 @@ public void streamingSchemaChanges() throws Exception {
// DDL for 3 tables // DDL for 3 tables
SourceRecords records = consumeRecordsByTopic(3); SourceRecords records = consumeRecordsByTopic(3);
final List<SourceRecord> schemaRecords = records.allRecordsInOrder(); final List<SourceRecord> schemaRecords = records.allRecordsInOrder();
Assertions.assertThat(schemaRecords).hasSize(3); assertThat(schemaRecords).hasSize(3);
schemaRecords.forEach(record -> { schemaRecords.forEach(record -> {
Assertions.assertThat(record.topic()).isEqualTo("server1"); assertThat(record.topic()).isEqualTo("server1");
Assertions.assertThat(((Struct) record.key()).getString("databaseName")).isEqualTo("testDB1"); assertThat(((Struct) record.key()).getString("databaseName")).isEqualTo("testDB1");
Assertions.assertThat(record.sourceOffset().get("snapshot")).isEqualTo(true); assertThat(record.sourceOffset().get("snapshot")).isEqualTo(true);
}); });
Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("snapshot")).isEqualTo("true"); assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
Assertions.assertThat(((Struct) schemaRecords.get(1).value()).getStruct("source").getString("snapshot")).isEqualTo("true"); assertThat(((Struct) schemaRecords.get(1).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
Assertions.assertThat(((Struct) schemaRecords.get(2).value()).getStruct("source").getString("snapshot")).isEqualTo("last"); assertThat(((Struct) schemaRecords.get(2).value()).getStruct("source").getString("snapshot")).isEqualTo("last");
List<Struct> tableChanges = ((Struct) schemaRecords.get(0).value()).getArray("tableChanges"); List<Struct> tableChanges = ((Struct) schemaRecords.get(0).value()).getArray("tableChanges");
Assertions.assertThat(tableChanges).hasSize(1); assertThat(tableChanges).hasSize(1);
Assertions.assertThat(tableChanges.get(0).get("type")).isEqualTo("CREATE"); assertThat(tableChanges.get(0).get("type")).isEqualTo("CREATE");
waitForAvailableRecords(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS); waitForAvailableRecords(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES, 24); records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES, 24);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
final List<SourceRecord> tablebRecords = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tablebRecords = records.recordsForTopic("server1.testDB1.dbo.tableb");
// Additional schema change record was emitted // Additional schema change record was emitted
if (tablebRecords.size() == RECORDS_PER_TABLE - 1) { if (tablebRecords.size() == RECORDS_PER_TABLE - 1) {
tablebRecords.add(consumeRecord()); tablebRecords.add(consumeRecord());
} }
Assertions.assertThat(tablebRecords).hasSize(RECORDS_PER_TABLE); assertThat(tablebRecords).hasSize(RECORDS_PER_TABLE);
tablebRecords.forEach(record -> { tablebRecords.forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -147,22 +146,22 @@ public void streamingSchemaChanges() throws Exception {
// DDL for 1 table // DDL for 1 table
records = consumeRecordsByTopic(1); records = consumeRecordsByTopic(1);
Assertions.assertThat(records.allRecordsInOrder()).hasSize(1); assertThat(records.allRecordsInOrder()).hasSize(1);
final SourceRecord schemaRecord = records.allRecordsInOrder().get(0); final SourceRecord schemaRecord = records.allRecordsInOrder().get(0);
Assertions.assertThat(schemaRecord.topic()).isEqualTo("server1"); assertThat(schemaRecord.topic()).isEqualTo("server1");
Assertions.assertThat(((Struct) schemaRecord.key()).getString("databaseName")).isEqualTo("testDB1"); assertThat(((Struct) schemaRecord.key()).getString("databaseName")).isEqualTo("testDB1");
Assertions.assertThat(schemaRecord.sourceOffset().get("snapshot")).isNull(); assertThat(schemaRecord.sourceOffset().get("snapshot")).isNull();
Assertions.assertThat(((Struct) schemaRecord.value()).getStruct("source").getString("snapshot")).isNull(); assertThat(((Struct) schemaRecord.value()).getStruct("source").getString("snapshot")).isNull();
tableChanges = ((Struct) schemaRecord.value()).getArray("tableChanges"); tableChanges = ((Struct) schemaRecord.value()).getArray("tableChanges");
Assertions.assertThat(tableChanges).hasSize(1); assertThat(tableChanges).hasSize(1);
Assertions.assertThat(tableChanges.get(0).get("type")).isEqualTo("ALTER"); assertThat(tableChanges.get(0).get("type")).isEqualTo("ALTER");
Assertions.assertThat(lastUpdate.sourceOffset()).isEqualTo(schemaRecord.sourceOffset()); assertThat(lastUpdate.sourceOffset()).isEqualTo(schemaRecord.sourceOffset());
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
@ -183,8 +182,8 @@ public void streamingSchemaChanges() throws Exception {
"INSERT INTO tableb VALUES(" + id + ", 'b3')"); "INSERT INTO tableb VALUES(" + id + ", 'b3')");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -225,23 +224,23 @@ public void snapshotSchemaChanges() throws Exception {
// DDL for 3 tables // DDL for 3 tables
SourceRecords records = consumeRecordsByTopic(3); SourceRecords records = consumeRecordsByTopic(3);
final List<SourceRecord> schemaRecords = records.allRecordsInOrder(); final List<SourceRecord> schemaRecords = records.allRecordsInOrder();
Assertions.assertThat(schemaRecords).hasSize(3); assertThat(schemaRecords).hasSize(3);
schemaRecords.forEach(record -> { schemaRecords.forEach(record -> {
Assertions.assertThat(record.topic()).isEqualTo("server1"); assertThat(record.topic()).isEqualTo("server1");
Assertions.assertThat(((Struct) record.key()).getString("databaseName")).isEqualTo("testDB1"); assertThat(((Struct) record.key()).getString("databaseName")).isEqualTo("testDB1");
Assertions.assertThat(record.sourceOffset().get("snapshot")).isEqualTo(true); assertThat(record.sourceOffset().get("snapshot")).isEqualTo(true);
}); });
Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("snapshot")).isEqualTo("true"); assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
Assertions.assertThat(((Struct) schemaRecords.get(1).value()).getStruct("source").getString("snapshot")).isEqualTo("true"); assertThat(((Struct) schemaRecords.get(1).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
Assertions.assertThat(((Struct) schemaRecords.get(2).value()).getStruct("source").getString("snapshot")).isEqualTo("true"); assertThat(((Struct) schemaRecords.get(2).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
final List<Struct> tableChanges = ((Struct) schemaRecords.get(0).value()).getArray("tableChanges"); final List<Struct> tableChanges = ((Struct) schemaRecords.get(0).value()).getArray("tableChanges");
Assertions.assertThat(tableChanges).hasSize(1); assertThat(tableChanges).hasSize(1);
Assertions.assertThat(tableChanges.get(0).get("type")).isEqualTo("CREATE"); assertThat(tableChanges.get(0).get("type")).isEqualTo("CREATE");
records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -280,7 +279,7 @@ public void schemaChangeAfterSnapshot() throws Exception {
// 1 schema event + 1 data event // 1 schema event + 1 data event
Testing.Print.enable(); Testing.Print.enable();
SourceRecords records = consumeRecordsByTopic(1 + 1); SourceRecords records = consumeRecordsByTopic(1 + 1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablec")).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablec")).hasSize(1);
stopConnector(); stopConnector();
assertConnectorNotRunning(); assertConnectorNotRunning();
@ -305,7 +304,7 @@ public void schemaChangeAfterSnapshot() throws Exception {
// 1-2 schema events + 1 data event // 1-2 schema events + 1 data event
records = consumeRecordsByTopic(2 + 1); records = consumeRecordsByTopic(2 + 1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tabled")).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.tabled")).hasSize(1);
final List<SourceRecord> schemaEvents = records.recordsForTopic("server1"); final List<SourceRecord> schemaEvents = records.recordsForTopic("server1");
@ -316,8 +315,8 @@ public void schemaChangeAfterSnapshot() throws Exception {
} }
final SourceRecord schemaEventD = schemaEvents.get(schemaEvents.size() - 1); final SourceRecord schemaEventD = schemaEvents.get(schemaEvents.size() - 1);
Assertions.assertThat(((Struct) schemaEventD.value()).getStruct("source").getString("schema")).isEqualTo("dbo"); assertThat(((Struct) schemaEventD.value()).getStruct("source").getString("schema")).isEqualTo("dbo");
Assertions.assertThat(((Struct) schemaEventD.value()).getStruct("source").getString("table")).isEqualTo("tabled"); assertThat(((Struct) schemaEventD.value()).getStruct("source").getString("table")).isEqualTo("tabled");
} }
@Test @Test

View File

@ -21,7 +21,6 @@
import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
@ -298,8 +297,8 @@ public void takeSchemaOnlySnapshotAndSendHeartbeat() throws Exception {
TestHelper.waitForSnapshotToBeCompleted(); TestHelper.waitForSnapshotToBeCompleted();
final SourceRecord record = consumeRecord(); final SourceRecord record = consumeRecord();
Assertions.assertThat(record).isNotNull(); assertThat(record).isNotNull();
Assertions.assertThat(record.topic()).startsWith("__debezium-heartbeat"); assertThat(record.topic()).startsWith("__debezium-heartbeat");
} }
@Test @Test
@ -327,8 +326,8 @@ public void shouldSelectivelySnapshotTables() throws SQLException, InterruptedEx
List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.table_a"); List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.table_a");
List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.table_b"); List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.table_b");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
Assertions.assertThat(tableB).isNull(); assertThat(tableB).isNull();
TestHelper.waitForSnapshotToBeCompleted(); TestHelper.waitForSnapshotToBeCompleted();
connection.execute("INSERT INTO table_a VALUES(22, 'some_name', 556)"); connection.execute("INSERT INTO table_a VALUES(22, 'some_name', 556)");
connection.execute("INSERT INTO table_b VALUES(24, 'some_name', 558)"); connection.execute("INSERT INTO table_b VALUES(24, 'some_name', 558)");
@ -337,8 +336,8 @@ public void shouldSelectivelySnapshotTables() throws SQLException, InterruptedEx
tableA = records.recordsForTopic("server1.testDB1.dbo.table_a"); tableA = records.recordsForTopic("server1.testDB1.dbo.table_a");
tableB = records.recordsForTopic("server1.testDB1.dbo.table_b"); tableB = records.recordsForTopic("server1.testDB1.dbo.table_b");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
Assertions.assertThat(tableB).hasSize(1); assertThat(tableB).hasSize(1);
stopConnector(); stopConnector();
} }
@ -389,12 +388,12 @@ public void testColumnExcludeList() throws Exception {
.put("name", "some_name") .put("name", "some_name")
.put("amount", 447); .put("amount", 447);
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldIsEqualTo(expectedValueA) .valueAfterFieldIsEqualTo(expectedValueA)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA); .valueAfterFieldSchemaIsEqualTo(expectedSchemaA);
Assertions.assertThat(tableB).hasSize(1); assertThat(tableB).hasSize(1);
SourceRecordAssert.assertThat(tableB.get(0)) SourceRecordAssert.assertThat(tableB.get(0))
.valueAfterFieldIsEqualTo(expectedValueB) .valueAfterFieldIsEqualTo(expectedValueB)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaB); .valueAfterFieldSchemaIsEqualTo(expectedSchemaB);
@ -424,12 +423,12 @@ public void reoderCapturedTables() throws Exception {
List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.table_a"); List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.table_a");
List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.table_b"); List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.table_b");
Assertions.assertThat(tableB).hasSize(1); assertThat(tableB).hasSize(1);
Assertions.assertThat(tableA).isNull(); assertThat(tableA).isNull();
records = consumeRecordsByTopic(1); records = consumeRecordsByTopic(1);
tableA = records.recordsForTopic("server1.testDB1.dbo.table_a"); tableA = records.recordsForTopic("server1.testDB1.dbo.table_a");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
stopConnector(); stopConnector();
} }
@ -460,18 +459,18 @@ public void reoderCapturedTablesWithOverlappingTableWhitelist() throws Exception
List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.table_ab"); List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.table_ab");
List<SourceRecord> tableC = records.recordsForTopic("server1.testDB1.dbo.table_ac"); List<SourceRecord> tableC = records.recordsForTopic("server1.testDB1.dbo.table_ac");
Assertions.assertThat(tableB).hasSize(1); assertThat(tableB).hasSize(1);
Assertions.assertThat(tableA).isNull(); assertThat(tableA).isNull();
Assertions.assertThat(tableC).isNull(); assertThat(tableC).isNull();
records = consumeRecordsByTopic(1); records = consumeRecordsByTopic(1);
tableA = records.recordsForTopic("server1.testDB1.dbo.table_a"); tableA = records.recordsForTopic("server1.testDB1.dbo.table_a");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
Assertions.assertThat(tableC).isNull(); assertThat(tableC).isNull();
records = consumeRecordsByTopic(1); records = consumeRecordsByTopic(1);
tableC = records.recordsForTopic("server1.testDB1.dbo.table_ac"); tableC = records.recordsForTopic("server1.testDB1.dbo.table_ac");
Assertions.assertThat(tableC).hasSize(1); assertThat(tableC).hasSize(1);
stopConnector(); stopConnector();
} }
@ -503,18 +502,18 @@ public void reoderCapturedTablesWithoutTableWhitelist() throws Exception {
List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.table_ab"); List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.table_ab");
List<SourceRecord> tableC = records.recordsForTopic("server1.testDB1.dbo.table_ac"); List<SourceRecord> tableC = records.recordsForTopic("server1.testDB1.dbo.table_ac");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
Assertions.assertThat(tableB).isNull(); assertThat(tableB).isNull();
Assertions.assertThat(tableC).isNull(); assertThat(tableC).isNull();
records = consumeRecordsByTopic(1); records = consumeRecordsByTopic(1);
tableB = records.recordsForTopic("server1.testDB1.dbo.table_ab"); tableB = records.recordsForTopic("server1.testDB1.dbo.table_ab");
Assertions.assertThat(tableB).hasSize(1); assertThat(tableB).hasSize(1);
Assertions.assertThat(tableC).isNull(); assertThat(tableC).isNull();
records = consumeRecordsByTopic(1); records = consumeRecordsByTopic(1);
tableC = records.recordsForTopic("server1.testDB1.dbo.table_ac"); tableC = records.recordsForTopic("server1.testDB1.dbo.table_ac");
Assertions.assertThat(tableC).hasSize(1); assertThat(tableC).hasSize(1);
stopConnector(); stopConnector();
} }
@ -583,8 +582,8 @@ public void shouldHandleBracketsInSnapshotSelect() throws InterruptedException,
assertThat(recordsForTopic.get(0).key()).isNotNull(); assertThat(recordsForTopic.get(0).key()).isNotNull();
Struct value = (Struct) ((Struct) recordsForTopic.get(0).value()).get("after"); Struct value = (Struct) ((Struct) recordsForTopic.get(0).value()).get("after");
System.out.println("DATA: " + value); System.out.println("DATA: " + value);
Assertions.assertThat(value.get("id")).isEqualTo(1); assertThat(value.get("id")).isEqualTo(1);
Assertions.assertThat(value.get("name")).isEqualTo("k"); assertThat(value.get("name")).isEqualTo("k");
stopConnector(); stopConnector();
} }

View File

@ -5,6 +5,7 @@
*/ */
package io.debezium.connector.sqlserver; package io.debezium.connector.sqlserver;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
@ -13,7 +14,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -100,7 +100,7 @@ private Struct consume(BinaryHandlingMode binaryMode) throws InterruptedExceptio
SourceRecords records = consumeRecordsByTopic(1); SourceRecords records = consumeRecordsByTopic(1);
final List<SourceRecord> results = records.recordsForTopic("server1.testDB1.dbo.binary_mode_test"); final List<SourceRecord> results = records.recordsForTopic("server1.testDB1.dbo.binary_mode_test");
Assertions.assertThat(results).hasSize(1); assertThat(results).hasSize(1);
return (Struct) ((Struct) results.get(0).value()).get("after"); return (Struct) ((Struct) results.get(0).value()).get("after");
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.connector.sqlserver; package io.debezium.connector.sqlserver;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException; import java.io.IOException;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList; import java.util.ArrayList;
@ -16,7 +18,6 @@
import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -87,8 +88,8 @@ public void addTable() throws Exception {
} }
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
// Enable CDC for already existing table // Enable CDC for already existing table
TestHelper.enableTableCdc(connection, "tablec"); TestHelper.enableTableCdc(connection, "tablec");
@ -106,8 +107,8 @@ public void addTable() throws Exception {
"INSERT INTO tabled VALUES(" + id + ", 'd')"); "INSERT INTO tabled VALUES(" + id + ", 'd')");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablec")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablec")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tabled")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tabled")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tablec").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tablec").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -153,8 +154,8 @@ public void removeTable() throws Exception {
} }
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
// Disable CDC for a table // Disable CDC for a table
TestHelper.disableTableCdc(connection, "tableb"); TestHelper.disableTableCdc(connection, "tableb");
@ -167,8 +168,8 @@ public void removeTable() throws Exception {
"INSERT INTO tableb VALUES(" + id + ", 'b2')"); "INSERT INTO tableb VALUES(" + id + ", 'b2')");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE); records = consumeRecordsByTopic(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNullOrEmpty(); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).isNullOrEmpty();
} }
@Test @Test
@ -228,8 +229,8 @@ private void addColumnToTable(Configuration config, boolean pauseAfterCaptureCha
} }
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -252,8 +253,8 @@ private void addColumnToTable(Configuration config, boolean pauseAfterCaptureCha
"INSERT INTO tableb VALUES(" + id + ", 'b2', 2)"); "INSERT INTO tableb VALUES(" + id + ", 'b2', 2)");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
@ -279,8 +280,8 @@ private void addColumnToTable(Configuration config, boolean pauseAfterCaptureCha
"INSERT INTO tableb VALUES(" + id + ", 'b3', 3)"); "INSERT INTO tableb VALUES(" + id + ", 'b3', 3)");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
@ -302,8 +303,8 @@ private void addColumnToTable(Configuration config, boolean pauseAfterCaptureCha
"INSERT INTO tableb VALUES(" + id + ", 'b4', 4)"); "INSERT INTO tableb VALUES(" + id + ", 'b4', 4)");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -341,8 +342,8 @@ public void removeColumnFromTable() throws Exception {
} }
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -366,8 +367,8 @@ public void removeColumnFromTable() throws Exception {
"INSERT INTO tableb VALUES(" + id + ")"); "INSERT INTO tableb VALUES(" + id + ")");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
@ -387,8 +388,8 @@ public void removeColumnFromTable() throws Exception {
"INSERT INTO tableb VALUES(" + id + ")"); "INSERT INTO tableb VALUES(" + id + ")");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -427,7 +428,7 @@ public void removeColumnFromTableWithoutChangingCapture() throws Exception {
} }
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb2")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb2")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb2").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb2").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -463,8 +464,8 @@ public void addColumnToTableWithParallelWrites() throws Exception {
} }
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -498,8 +499,8 @@ public void addColumnToTableWithParallelWrites() throws Exception {
TestHelper.enableTableCdc(connection, "tableb", "after_change"); TestHelper.enableTableCdc(connection, "tableb", "after_change");
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
final AtomicInteger beforeChangeCount = new AtomicInteger(); final AtomicInteger beforeChangeCount = new AtomicInteger();
final AtomicInteger afterChangeCount = new AtomicInteger(); final AtomicInteger afterChangeCount = new AtomicInteger();
@ -509,11 +510,11 @@ public void addColumnToTableWithParallelWrites() throws Exception {
} }
else { else {
beforeChangeCount.incrementAndGet(); beforeChangeCount.incrementAndGet();
Assertions.assertThat(afterChangeCount.intValue()).isZero(); assertThat(afterChangeCount.intValue()).isZero();
} }
}); });
Assertions.assertThat(beforeChangeCount.intValue()).isPositive(); assertThat(beforeChangeCount.intValue()).isPositive();
Assertions.assertThat(afterChangeCount.intValue()).isPositive(); assertThat(afterChangeCount.intValue()).isPositive();
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START_3 + i; final int id = ID_START_3 + i;
@ -523,8 +524,8 @@ public void addColumnToTableWithParallelWrites() throws Exception {
"INSERT INTO tableb VALUES(" + id + ", 'b1', 'b2')"); "INSERT INTO tableb VALUES(" + id + ", 'b1', 'b2')");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -562,8 +563,8 @@ public void readHistoryAfterRestart() throws Exception {
} }
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
// Enable a second capture instance // Enable a second capture instance
connection.execute("ALTER TABLE dbo.tableb DROP COLUMN colb"); connection.execute("ALTER TABLE dbo.tableb DROP COLUMN colb");
@ -577,8 +578,8 @@ public void readHistoryAfterRestart() throws Exception {
"INSERT INTO tableb VALUES(" + id + ")"); "INSERT INTO tableb VALUES(" + id + ")");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
stopConnector(); stopConnector();
start(SqlServerConnector.class, config); start(SqlServerConnector.class, config);
@ -592,8 +593,8 @@ public void readHistoryAfterRestart() throws Exception {
"INSERT INTO tableb VALUES(" + id + ")"); "INSERT INTO tableb VALUES(" + id + ")");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -616,19 +617,19 @@ public void readHistoryAfterRestart() throws Exception {
} }
}); });
// 3 tables from snapshot + 1 ALTER // 3 tables from snapshot + 1 ALTER
Assertions.assertThat(changes).hasSize(3 + 1); assertThat(changes).hasSize(3 + 1);
changes.subList(0, 3).forEach(change -> { changes.subList(0, 3).forEach(change -> {
final Array changeArray = change.getArray("tableChanges"); final Array changeArray = change.getArray("tableChanges");
Assertions.assertThat(changeArray.size()).isEqualTo(1); assertThat(changeArray.size()).isEqualTo(1);
final String type = changeArray.get(0).asDocument().getString("type"); final String type = changeArray.get(0).asDocument().getString("type");
Assertions.assertThat(type).isEqualTo("CREATE"); assertThat(type).isEqualTo("CREATE");
}); });
final Array changeArray = changes.get(3).getArray("tableChanges"); final Array changeArray = changes.get(3).getArray("tableChanges");
Assertions.assertThat(changeArray.size()).isEqualTo(1); assertThat(changeArray.size()).isEqualTo(1);
final String type = changeArray.get(0).asDocument().getString("type"); final String type = changeArray.get(0).asDocument().getString("type");
final String tableIid = changeArray.get(0).asDocument().getString("id"); final String tableIid = changeArray.get(0).asDocument().getString("id");
Assertions.assertThat(type).isEqualTo("ALTER"); assertThat(type).isEqualTo("ALTER");
Assertions.assertThat(tableIid).isEqualTo("\"testDB1\".\"dbo\".\"tableb\""); assertThat(tableIid).isEqualTo("\"testDB1\".\"dbo\".\"tableb\"");
} }
@Test @Test
@ -655,8 +656,8 @@ public void renameColumn() throws Exception {
} }
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -682,8 +683,8 @@ public void renameColumn() throws Exception {
"INSERT INTO tableb(id,newcolb) VALUES(" + id + ", 'b2')"); "INSERT INTO tableb(id,newcolb) VALUES(" + id + ", 'b2')");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
@ -704,8 +705,8 @@ public void renameColumn() throws Exception {
"INSERT INTO tableb VALUES(" + id + ", 'b3')"); "INSERT INTO tableb VALUES(" + id + ", 'b3')");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -742,8 +743,8 @@ public void changeColumn() throws Exception {
} }
SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -756,7 +757,7 @@ public void changeColumn() throws Exception {
final Struct value = ((Struct) record.value()).getStruct("after"); final Struct value = ((Struct) record.value()).getStruct("after");
final int id = value.getInt32("id"); final int id = value.getInt32("id");
final String colb = value.getString("colb"); final String colb = value.getString("colb");
Assertions.assertThat(Integer.toString(id)).isEqualTo(colb); assertThat(Integer.toString(id)).isEqualTo(colb);
}); });
// Enable a second capture instance // Enable a second capture instance
@ -771,8 +772,8 @@ public void changeColumn() throws Exception {
"INSERT INTO tableb VALUES(" + id + ", '" + id + " ')"); "INSERT INTO tableb VALUES(" + id + ", '" + id + " ')");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
@ -786,7 +787,7 @@ public void changeColumn() throws Exception {
final Struct value = ((Struct) record.value()).getStruct("after"); final Struct value = ((Struct) record.value()).getStruct("after");
final int id = value.getInt32("id"); final int id = value.getInt32("id");
final int colb = value.getInt32("colb"); final int colb = value.getInt32("colb");
Assertions.assertThat(id).isEqualTo(colb); assertThat(id).isEqualTo(colb);
}); });
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
@ -797,8 +798,8 @@ public void changeColumn() throws Exception {
"INSERT INTO tableb VALUES(" + id + ", '" + id + " ')"); "INSERT INTO tableb VALUES(" + id + ", '" + id + " ')");
} }
records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tablea")).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE); assertThat(records.recordsForTopic("server1.testDB1.dbo.tableb")).hasSize(RECORDS_PER_TABLE);
records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> { records.recordsForTopic("server1.testDB1.dbo.tableb").forEach(record -> {
assertSchemaMatchesStruct( assertSchemaMatchesStruct(
(Struct) ((Struct) record.value()).get("after"), (Struct) ((Struct) record.value()).get("after"),
@ -811,7 +812,7 @@ public void changeColumn() throws Exception {
final Struct value = ((Struct) record.value()).getStruct("after"); final Struct value = ((Struct) record.value()).getStruct("after");
final int id = value.getInt32("id"); final int id = value.getInt32("id");
final int colb = value.getInt32("colb"); final int colb = value.getInt32("colb");
Assertions.assertThat(id).isEqualTo(colb); assertThat(id).isEqualTo(colb);
}); });
} }
@ -836,7 +837,7 @@ public void addDefaultValue() throws Exception {
TestHelper.waitForCdcRecord(connection, "tableb", "after_change", rs -> rs.getInt("id") == 1); TestHelper.waitForCdcRecord(connection, "tableb", "after_change", rs -> rs.getInt("id") == 1);
List<SourceRecord> records = consumeRecordsByTopic(1).recordsForTopic("server1.testDB1.dbo.tableb"); List<SourceRecord> records = consumeRecordsByTopic(1).recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(records).hasSize(1); assertThat(records).hasSize(1);
Testing.debug("Records: " + records); Testing.debug("Records: " + records);
Testing.debug("Value Schema: " + records.get(0).valueSchema()); Testing.debug("Value Schema: " + records.get(0).valueSchema());
Testing.debug("Fields: " + records.get(0).valueSchema().fields()); Testing.debug("Fields: " + records.get(0).valueSchema().fields());
@ -846,8 +847,8 @@ public void addDefaultValue() throws Exception {
Schema colbSchema = records.get(0).valueSchema().field("after").schema().field("colb").schema(); Schema colbSchema = records.get(0).valueSchema().field("after").schema().field("colb").schema();
Testing.debug("ColumnB Schema: " + colbSchema); Testing.debug("ColumnB Schema: " + colbSchema);
Testing.debug("ColumnB Schema Default Value: " + colbSchema.defaultValue()); Testing.debug("ColumnB Schema Default Value: " + colbSchema.defaultValue());
Assertions.assertThat(colbSchema.defaultValue()).isNotNull(); assertThat(colbSchema.defaultValue()).isNotNull();
Assertions.assertThat(colbSchema.defaultValue()).isEqualTo("default_value"); assertThat(colbSchema.defaultValue()).isEqualTo("default_value");
} }
@Test @Test
@ -875,10 +876,10 @@ public void alterDefaultValue() throws Exception {
connection.execute("INSERT INTO table_dv VALUES('2', 'some_value2')"); connection.execute("INSERT INTO table_dv VALUES('2', 'some_value2')");
List<SourceRecord> records = consumeRecordsByTopic(1).recordsForTopic("server1.testDB1.dbo.table_dv"); List<SourceRecord> records = consumeRecordsByTopic(1).recordsForTopic("server1.testDB1.dbo.table_dv");
Assertions.assertThat(records).hasSize(1); assertThat(records).hasSize(1);
Schema colbSchema = records.get(0).valueSchema().field("after").schema().field("colb").schema(); Schema colbSchema = records.get(0).valueSchema().field("after").schema().field("colb").schema();
Assertions.assertThat(colbSchema.defaultValue()).isNotNull(); assertThat(colbSchema.defaultValue()).isNotNull();
Assertions.assertThat(colbSchema.defaultValue()).isEqualTo("new_default_value"); assertThat(colbSchema.defaultValue()).isEqualTo("new_default_value");
} }
} }

View File

@ -6,6 +6,8 @@
package io.debezium.connector.sqlserver; package io.debezium.connector.sqlserver;
import static org.assertj.core.api.Assertions.assertThat;
import java.math.BigDecimal; import java.math.BigDecimal;
import java.math.BigInteger; import java.math.BigInteger;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
@ -20,7 +22,6 @@
import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.SchemaBuilder;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -445,7 +446,7 @@ public void testAccessToCDCTableBasedOnUserRoleAccess() throws Exception {
TestHelper.enableTableCdc(connection, "testTable"); TestHelper.enableTableCdc(connection, "testTable");
// sa user should have access to CDC table // sa user should have access to CDC table
Assertions.assertThat(connection.checkIfConnectedUserHasAccessToCDCTable(TestHelper.TEST_DATABASE_1)).isTrue(); assertThat(connection.checkIfConnectedUserHasAccessToCDCTable(TestHelper.TEST_DATABASE_1)).isTrue();
} }
// Re-connect with the newly created user // Re-connect with the newly created user
@ -453,7 +454,7 @@ public void testAccessToCDCTableBasedOnUserRoleAccess() throws Exception {
TestHelper.jdbcConfig("test_user", "Password!"))) { TestHelper.jdbcConfig("test_user", "Password!"))) {
// This user shouldn't have access to CDC table // This user shouldn't have access to CDC table
connection.execute("USE testDB1"); connection.execute("USE testDB1");
Assertions.assertThat(connection.checkIfConnectedUserHasAccessToCDCTable(TestHelper.TEST_DATABASE_1)).isFalse(); assertThat(connection.checkIfConnectedUserHasAccessToCDCTable(TestHelper.TEST_DATABASE_1)).isFalse();
} }
} }
@ -462,7 +463,7 @@ public void testAccessToCDCTableBasedOnUserRoleAccess() throws Exception {
public void shouldConnectToASingleDatabase() throws Exception { public void shouldConnectToASingleDatabase() throws Exception {
TestHelper.createTestDatabase(); TestHelper.createTestDatabase();
try (SqlServerConnection connection = TestHelper.testConnection()) { try (SqlServerConnection connection = TestHelper.testConnection()) {
Assertions.assertThat(connection.connection().getCatalog()).isEqualTo(TestHelper.TEST_DATABASE_1); assertThat(connection.connection().getCatalog()).isEqualTo(TestHelper.TEST_DATABASE_1);
} }
} }
@ -471,7 +472,7 @@ public void shouldConnectToASingleDatabase() throws Exception {
public void shouldNotConnectToAnyOfMultipleDatabase() throws Exception { public void shouldNotConnectToAnyOfMultipleDatabase() throws Exception {
TestHelper.createTestDatabases(TestHelper.TEST_DATABASE_1, TestHelper.TEST_DATABASE_2); TestHelper.createTestDatabases(TestHelper.TEST_DATABASE_1, TestHelper.TEST_DATABASE_2);
try (SqlServerConnection connection = TestHelper.multiPartitionTestConnection()) { try (SqlServerConnection connection = TestHelper.multiPartitionTestConnection()) {
Assertions.assertThat(connection.connection().getCatalog()).isEqualTo("master"); assertThat(connection.connection().getCatalog()).isEqualTo("master");
} }
} }
@ -496,7 +497,7 @@ private long toNanos(OffsetDateTime datetime) {
private void assertColumnHasNotDefaultValue(Table table, String columnName) { private void assertColumnHasNotDefaultValue(Table table, String columnName) {
Column column = table.columnWithName(columnName); Column column = table.columnWithName(columnName);
Assertions.assertThat(column.hasDefaultValue()).isFalse(); assertThat(column.hasDefaultValue()).isFalse();
} }
private void assertColumnHasDefaultValue(Table table, String columnName, Object expectedValue, TableSchemaBuilder tableSchemaBuilder) { private void assertColumnHasDefaultValue(Table table, String columnName, Object expectedValue, TableSchemaBuilder tableSchemaBuilder) {
@ -507,16 +508,16 @@ private void assertColumnHasDefaultValue(Table table, String columnName, Object
Schema columnSchema = schema.getEnvelopeSchema().schema().field("after").schema().field(columnName).schema(); Schema columnSchema = schema.getEnvelopeSchema().schema().field("after").schema().field(columnName).schema();
Column column = table.columnWithName(columnName); Column column = table.columnWithName(columnName);
Assertions.assertThat(column.hasDefaultValue()).isTrue(); assertThat(column.hasDefaultValue()).isTrue();
Assertions.assertThat(columnSchema.defaultValue()).isEqualTo(expectedValue); assertThat(columnSchema.defaultValue()).isEqualTo(expectedValue);
if (expectedValue instanceof BigDecimal) { if (expectedValue instanceof BigDecimal) {
// safe cast as we know the expectedValue and column.defaultValue are equal // safe cast as we know the expectedValue and column.defaultValue are equal
BigDecimal columnValue = (BigDecimal) columnSchema.defaultValue(); BigDecimal columnValue = (BigDecimal) columnSchema.defaultValue();
BigDecimal expectedBigDecimal = (BigDecimal) expectedValue; BigDecimal expectedBigDecimal = (BigDecimal) expectedValue;
Assertions.assertThat(column.scale().isPresent()).isTrue(); assertThat(column.scale().isPresent()).isTrue();
int columnScale = column.scale().get(); int columnScale = column.scale().get();
Assertions.assertThat(columnScale).isEqualTo(columnValue.scale()); assertThat(columnScale).isEqualTo(columnValue.scale());
Assertions.assertThat(columnValue.scale()).isEqualTo(expectedBigDecimal.scale()); assertThat(columnValue.scale()).isEqualTo(expectedBigDecimal.scale());
} }
} }

View File

@ -39,7 +39,6 @@
import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -130,8 +129,8 @@ public void createAndDelete() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordA = tableA.get(i); final SourceRecord recordA = tableA.get(i);
final SourceRecord recordB = tableB.get(i); final SourceRecord recordB = tableB.get(i);
@ -157,8 +156,8 @@ public void createAndDelete() throws Exception {
final SourceRecords deleteRecords = consumeRecordsByTopic(2 * RECORDS_PER_TABLE); final SourceRecords deleteRecords = consumeRecordsByTopic(2 * RECORDS_PER_TABLE);
final List<SourceRecord> deleteTableA = deleteRecords.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> deleteTableA = deleteRecords.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> deleteTableB = deleteRecords.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> deleteTableB = deleteRecords.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(deleteTableA).isNullOrEmpty(); assertThat(deleteTableA).isNullOrEmpty();
Assertions.assertThat(deleteTableB).hasSize(2 * RECORDS_PER_TABLE); assertThat(deleteTableB).hasSize(2 * RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord deleteRecord = deleteTableB.get(i * 2); final SourceRecord deleteRecord = deleteTableB.get(i * 2);
@ -214,8 +213,8 @@ public void readOnlyApplicationIntent() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES, 24); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES, 24);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordA = tableA.get(i); final SourceRecord recordA = tableA.get(i);
final SourceRecord recordB = tableB.get(i); final SourceRecord recordB = tableB.get(i);
@ -294,13 +293,13 @@ public void timestampAndTimezone() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordA = tableA.get(i); final SourceRecord recordA = tableA.get(i);
final long timestamp = ((Struct) recordA.value()).getStruct("source").getInt64("ts_ms"); final long timestamp = ((Struct) recordA.value()).getStruct("source").getInt64("ts_ms");
final Instant instant = Instant.ofEpochMilli(timestamp); final Instant instant = Instant.ofEpochMilli(timestamp);
Assertions.assertThat(instant.isAfter(lowerBound) && instant.isBefore(upperBound)).isTrue(); assertThat(instant.isAfter(lowerBound) && instant.isBefore(upperBound)).isTrue();
} }
stopConnector(); stopConnector();
} }
@ -339,8 +338,8 @@ public void deleteWithoutTombstone() throws Exception {
final SourceRecords deleteRecords = consumeRecordsByTopic(RECORDS_PER_TABLE); final SourceRecords deleteRecords = consumeRecordsByTopic(RECORDS_PER_TABLE);
final List<SourceRecord> deleteTableA = deleteRecords.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> deleteTableA = deleteRecords.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> deleteTableB = deleteRecords.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> deleteTableB = deleteRecords.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(deleteTableA).isNullOrEmpty(); assertThat(deleteTableA).isNullOrEmpty();
Assertions.assertThat(deleteTableB).hasSize(RECORDS_PER_TABLE); assertThat(deleteTableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord deleteRecord = deleteTableB.get(i); final SourceRecord deleteRecord = deleteTableB.get(i);
@ -385,7 +384,7 @@ public void update() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * 2);
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE * 2); assertThat(tableB).hasSize(RECORDS_PER_TABLE * 2);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordB = tableB.get(i); final SourceRecord recordB = tableB.get(i);
final List<SchemaAndValueField> expectedRowB = Arrays.asList( final List<SchemaAndValueField> expectedRowB = Arrays.asList(
@ -442,8 +441,8 @@ public void updatePrimaryKey() throws Exception {
final SourceRecords records = consumeRecordsByTopic(6); final SourceRecords records = consumeRecordsByTopic(6);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(3); assertThat(tableA).hasSize(3);
Assertions.assertThat(tableB).hasSize(3); assertThat(tableB).hasSize(3);
final List<SchemaAndValueField> expectedDeleteRowA = Arrays.asList( final List<SchemaAndValueField> expectedDeleteRowA = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, 1), new SchemaAndValueField("id", Schema.INT32_SCHEMA, 1),
@ -532,11 +531,11 @@ public void updatePrimaryKeyWithRestartInMiddle() throws Exception {
// Wait for snapshot completion // Wait for snapshot completion
TestHelper.waitForDatabaseSnapshotToBeCompleted(TestHelper.TEST_DATABASE_1); TestHelper.waitForDatabaseSnapshotToBeCompleted(TestHelper.TEST_DATABASE_1);
final SourceRecords snapshotRecords = consumeRecordsByTopic(1); final SourceRecords snapshotRecords = consumeRecordsByTopic(1);
Assertions.assertThat(snapshotRecords.allRecordsInOrder()).hasSize(1); assertThat(snapshotRecords.allRecordsInOrder()).hasSize(1);
connection.execute("INSERT INTO tableb VALUES(1, 'b')"); connection.execute("INSERT INTO tableb VALUES(1, 'b')");
final SourceRecords insertRecords = consumeRecordsByTopic(1); final SourceRecords insertRecords = consumeRecordsByTopic(1);
Assertions.assertThat(insertRecords.allRecordsInOrder()).hasSize(1); assertThat(insertRecords.allRecordsInOrder()).hasSize(1);
connection.setAutoCommit(false); connection.setAutoCommit(false);
@ -554,8 +553,8 @@ public void updatePrimaryKeyWithRestartInMiddle() throws Exception {
final List<SourceRecord> tableA = records1.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records1.recordsForTopic("server1.testDB1.dbo.tablea");
tableA.addAll(records2.recordsForTopic("server1.testDB1.dbo.tablea")); tableA.addAll(records2.recordsForTopic("server1.testDB1.dbo.tablea"));
final List<SourceRecord> tableB = records2.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records2.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(3); assertThat(tableA).hasSize(3);
Assertions.assertThat(tableB).hasSize(3); assertThat(tableB).hasSize(3);
final List<SchemaAndValueField> expectedDeleteRowA = Arrays.asList( final List<SchemaAndValueField> expectedDeleteRowA = Arrays.asList(
new SchemaAndValueField("id", Schema.INT32_SCHEMA, 1), new SchemaAndValueField("id", Schema.INT32_SCHEMA, 1),
@ -646,7 +645,7 @@ public void updatePrimaryKeyTwiceWithRestartInMiddleOfTx() throws Exception {
TestHelper.waitForDatabaseSnapshotToBeCompleted(TestHelper.TEST_DATABASE_1); TestHelper.waitForDatabaseSnapshotToBeCompleted(TestHelper.TEST_DATABASE_1);
final SourceRecords snapshotRecords = consumeRecordsByTopic(1); final SourceRecords snapshotRecords = consumeRecordsByTopic(1);
Assertions.assertThat(snapshotRecords.allRecordsInOrder()).hasSize(1); assertThat(snapshotRecords.allRecordsInOrder()).hasSize(1);
connection.setAutoCommit(false); connection.setAutoCommit(false);
@ -673,7 +672,7 @@ public void updatePrimaryKeyTwiceWithRestartInMiddleOfTx() throws Exception {
final List<SourceRecord> tableB = records1.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records1.recordsForTopic("server1.testDB1.dbo.tableb");
tableB.addAll(records2.recordsForTopic("server1.testDB1.dbo.tableb")); tableB.addAll(records2.recordsForTopic("server1.testDB1.dbo.tableb"));
Assertions.assertThat(tableB).hasSize(expectedRecords); assertThat(tableB).hasSize(expectedRecords);
stopConnector(); stopConnector();
} }
@ -719,8 +718,8 @@ public void streamChangesWhileStopped() throws Exception {
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = i + ID_RESTART; final int id = i + ID_RESTART;
@ -839,8 +838,8 @@ public void verifyOffsets() throws Exception {
final List<SourceRecord> tableA = sourceRecords.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = sourceRecords.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = sourceRecords.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = sourceRecords.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = i + ID_RESTART; final int id = i + ID_RESTART;
@ -900,8 +899,8 @@ public void testIncludeTable() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA == null || tableA.isEmpty()).isTrue(); assertThat(tableA == null || tableA.isEmpty()).isTrue();
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
stopConnector(); stopConnector();
} }
@ -971,7 +970,7 @@ public void testTableIncludeList() throws Exception {
// Wait for snapshot completion // Wait for snapshot completion
final SourceRecords snapshotRecords = consumeRecordsByTopic(1); final SourceRecords snapshotRecords = consumeRecordsByTopic(1);
Assertions.assertThat(snapshotRecords.recordsForTopic("server1.testDB1.dbo.tableb")).isNotEmpty(); assertThat(snapshotRecords.recordsForTopic("server1.testDB1.dbo.tableb")).isNotEmpty();
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = ID_START + i; final int id = ID_START + i;
@ -984,8 +983,8 @@ public void testTableIncludeList() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA == null || tableA.isEmpty()).isTrue(); assertThat(tableA == null || tableA.isEmpty()).isTrue();
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
stopConnector(); stopConnector();
} }
@ -1019,8 +1018,8 @@ public void testTableExcludeList() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA == null || tableA.isEmpty()).isTrue(); assertThat(tableA == null || tableA.isEmpty()).isTrue();
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
stopConnector(); stopConnector();
} }
@ -1061,7 +1060,7 @@ public void blacklistColumnWhenCdcColumnsDoNotMatchWithOriginalSnapshot() throws
.put("name", "some_name") .put("name", "some_name")
.put("amount", 120); .put("amount", 120);
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldIsEqualTo(expectedValueA) .valueAfterFieldIsEqualTo(expectedValueA)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA); .valueAfterFieldSchemaIsEqualTo(expectedSchemaA);
@ -1118,12 +1117,12 @@ public void testColumnExcludeList() throws Exception {
.put("name", "some_name") .put("name", "some_name")
.put("amount", 447); .put("amount", 447);
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldIsEqualTo(expectedValueA) .valueAfterFieldIsEqualTo(expectedValueA)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA); .valueAfterFieldSchemaIsEqualTo(expectedSchemaA);
Assertions.assertThat(tableB).hasSize(1); assertThat(tableB).hasSize(1);
SourceRecordAssert.assertThat(tableB.get(0)) SourceRecordAssert.assertThat(tableB.get(0))
.valueAfterFieldIsEqualTo(expectedValueB) .valueAfterFieldIsEqualTo(expectedValueB)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaB); .valueAfterFieldSchemaIsEqualTo(expectedSchemaB);
@ -1181,12 +1180,12 @@ public void testColumnIncludeList() throws Exception {
.put("name", "some_name") .put("name", "some_name")
.put("amount", 447); .put("amount", 447);
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldIsEqualTo(expectedValueA) .valueAfterFieldIsEqualTo(expectedValueA)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA); .valueAfterFieldSchemaIsEqualTo(expectedSchemaA);
Assertions.assertThat(tableB).hasSize(1); assertThat(tableB).hasSize(1);
SourceRecordAssert.assertThat(tableB.get(0)) SourceRecordAssert.assertThat(tableB.get(0))
.valueAfterFieldIsEqualTo(expectedValueB) .valueAfterFieldIsEqualTo(expectedValueB)
.valueAfterFieldSchemaIsEqualTo(expectedSchemaB); .valueAfterFieldSchemaIsEqualTo(expectedSchemaB);
@ -1347,7 +1346,7 @@ public void whenCaptureInstanceExcludesColumnsExpectSnapshotAndStreamingToExclud
.put("id", 11) .put("id", 11)
.put("name", "some_name"); .put("name", "some_name");
Assertions.assertThat(tableA).hasSize(2); assertThat(tableA).hasSize(2);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA) .valueAfterFieldSchemaIsEqualTo(expectedSchemaA)
.valueAfterFieldIsEqualTo(expectedValueSnapshot); .valueAfterFieldIsEqualTo(expectedValueSnapshot);
@ -1401,7 +1400,7 @@ public void whenMultipleCaptureInstancesExcludesColumnsExpectLatestCDCTableUtili
.put("name", "some_name") .put("name", "some_name")
.put("note", "a note"); .put("note", "a note");
Assertions.assertThat(tableA).hasSize(2); assertThat(tableA).hasSize(2);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldSchemaIsEqualTo(expectedSchema) .valueAfterFieldSchemaIsEqualTo(expectedSchema)
.valueAfterFieldIsEqualTo(expectedValueSnapshot); .valueAfterFieldIsEqualTo(expectedValueSnapshot);
@ -1461,7 +1460,7 @@ public void whenCaptureInstanceExcludesColumnsAndColumnsRenamedExpectNoErrors()
.put("id", 11) .put("id", 11)
.put("first_name", "some_name"); .put("first_name", "some_name");
Assertions.assertThat(tableA).hasSize(2); assertThat(tableA).hasSize(2);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldSchemaIsEqualTo(expectedSchema1) .valueAfterFieldSchemaIsEqualTo(expectedSchema1)
.valueAfterFieldIsEqualTo(expectedValueSnapshot); .valueAfterFieldIsEqualTo(expectedValueSnapshot);
@ -1503,7 +1502,7 @@ public void excludeColumnWhenCaptureInstanceExcludesColumns() throws Exception {
.put("id", 10) .put("id", 10)
.put("name", "some_name"); .put("name", "some_name");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA) .valueAfterFieldSchemaIsEqualTo(expectedSchemaA)
.valueAfterFieldIsEqualTo(expectedValueA); .valueAfterFieldIsEqualTo(expectedValueA);
@ -1548,7 +1547,7 @@ public void excludeColumnWhenCaptureInstanceExcludesColumnInMiddleOfTable() thro
.put("id", 11) .put("id", 11)
.put("name", "some_name"); .put("name", "some_name");
Assertions.assertThat(tableA).hasSize(2); assertThat(tableA).hasSize(2);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA) .valueAfterFieldSchemaIsEqualTo(expectedSchemaA)
.valueAfterFieldIsEqualTo(expectedValue1); .valueAfterFieldIsEqualTo(expectedValue1);
@ -1592,7 +1591,7 @@ public void includeColumnsWhenCaptureInstanceExcludesColumnInMiddleOfTable() thr
.put("id", 10) .put("id", 10)
.put("name", "some_name"); .put("name", "some_name");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA) .valueAfterFieldSchemaIsEqualTo(expectedSchemaA)
.valueAfterFieldIsEqualTo(expectedValueA); .valueAfterFieldIsEqualTo(expectedValueA);
@ -1634,7 +1633,7 @@ public void excludeMultipleColumnsWhenCaptureInstanceExcludesSingleColumn() thro
.put("id", 10) .put("id", 10)
.put("name", "some_name"); .put("name", "some_name");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA) .valueAfterFieldSchemaIsEqualTo(expectedSchemaA)
.valueAfterFieldIsEqualTo(expectedValueA); .valueAfterFieldIsEqualTo(expectedValueA);
@ -1676,7 +1675,7 @@ public void includeMultipleColumnsWhenCaptureInstanceExcludesSingleColumn() thro
.put("id", 10) .put("id", 10)
.put("name", "some_name"); .put("name", "some_name");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
SourceRecordAssert.assertThat(tableA.get(0)) SourceRecordAssert.assertThat(tableA.get(0))
.valueAfterFieldSchemaIsEqualTo(expectedSchemaA) .valueAfterFieldSchemaIsEqualTo(expectedSchemaA)
.valueAfterFieldIsEqualTo(expectedValueA); .valueAfterFieldIsEqualTo(expectedValueA);
@ -1822,8 +1821,8 @@ private void restartInTheMiddleOfTx(boolean restartJustAfterSnapshot, boolean af
tableA = sourceRecords.recordsForTopic("server1.testDB1.dbo.tablea"); tableA = sourceRecords.recordsForTopic("server1.testDB1.dbo.tablea");
tableB = sourceRecords.recordsForTopic("server1.testDB1.dbo.tableb"); tableB = sourceRecords.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final int id = i + ID_RESTART; final int id = i + ID_RESTART;
@ -1978,7 +1977,7 @@ public void shouldRewriteIdentityKey() throws InterruptedException, SQLException
List<SourceRecord> recordsForTopic = records.recordsForTopic("server1.testDB1.dbo.keyless"); List<SourceRecord> recordsForTopic = records.recordsForTopic("server1.testDB1.dbo.keyless");
assertThat(recordsForTopic.get(0).key()).isNotNull(); assertThat(recordsForTopic.get(0).key()).isNotNull();
Struct key = (Struct) recordsForTopic.get(0).key(); Struct key = (Struct) recordsForTopic.get(0).key();
Assertions.assertThat(key.get("id")).isNotNull(); assertThat(key.get("id")).isNotNull();
stopConnector(); stopConnector();
} }
@ -2115,8 +2114,8 @@ public void useShortTableNamesForColumnMapper() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordA = tableA.get(i); final SourceRecord recordA = tableA.get(i);
final SourceRecord recordB = tableB.get(i); final SourceRecord recordB = tableB.get(i);
@ -2125,7 +2124,7 @@ public void useShortTableNamesForColumnMapper() throws Exception {
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b")); new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b"));
final Struct valueA = (Struct) recordA.value(); final Struct valueA = (Struct) recordA.value();
Assertions.assertThat(valueA.getStruct("after").getString("cola")).isEqualTo("****"); assertThat(valueA.getStruct("after").getString("cola")).isEqualTo("****");
final Struct valueB = (Struct) recordB.value(); final Struct valueB = (Struct) recordB.value();
assertRecord((Struct) valueB.get("after"), expectedRowB); assertRecord((Struct) valueB.get("after"), expectedRowB);
@ -2163,8 +2162,8 @@ public void useLongTableNamesForColumnMapper() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordA = tableA.get(i); final SourceRecord recordA = tableA.get(i);
final SourceRecord recordB = tableB.get(i); final SourceRecord recordB = tableB.get(i);
@ -2173,7 +2172,7 @@ public void useLongTableNamesForColumnMapper() throws Exception {
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b")); new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b"));
final Struct valueA = (Struct) recordA.value(); final Struct valueA = (Struct) recordA.value();
Assertions.assertThat(valueA.getStruct("after").getString("cola")).isEqualTo("****"); assertThat(valueA.getStruct("after").getString("cola")).isEqualTo("****");
final Struct valueB = (Struct) recordB.value(); final Struct valueB = (Struct) recordB.value();
assertRecord((Struct) valueB.get("after"), expectedRowB); assertRecord((Struct) valueB.get("after"), expectedRowB);
@ -2211,8 +2210,8 @@ public void useLongTableNamesForKeyMapper() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordA = tableA.get(i); final SourceRecord recordA = tableA.get(i);
final SourceRecord recordB = tableB.get(i); final SourceRecord recordB = tableB.get(i);
@ -2221,7 +2220,7 @@ public void useLongTableNamesForKeyMapper() throws Exception {
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b")); new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b"));
final Struct keyA = (Struct) recordA.key(); final Struct keyA = (Struct) recordA.key();
Assertions.assertThat(keyA.getString("cola")).isEqualTo("a"); assertThat(keyA.getString("cola")).isEqualTo("a");
final Struct valueB = (Struct) recordB.value(); final Struct valueB = (Struct) recordB.value();
assertRecord((Struct) valueB.get("after"), expectedRowB); assertRecord((Struct) valueB.get("after"), expectedRowB);
@ -2259,8 +2258,8 @@ public void useShortTableNamesForKeyMapper() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
for (int i = 0; i < RECORDS_PER_TABLE; i++) { for (int i = 0; i < RECORDS_PER_TABLE; i++) {
final SourceRecord recordA = tableA.get(i); final SourceRecord recordA = tableA.get(i);
final SourceRecord recordB = tableB.get(i); final SourceRecord recordB = tableB.get(i);
@ -2269,7 +2268,7 @@ public void useShortTableNamesForKeyMapper() throws Exception {
new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b")); new SchemaAndValueField("colb", Schema.OPTIONAL_STRING_SCHEMA, "b"));
final Struct keyA = (Struct) recordA.key(); final Struct keyA = (Struct) recordA.key();
Assertions.assertThat(keyA.getString("cola")).isEqualTo("a"); assertThat(keyA.getString("cola")).isEqualTo("a");
final Struct valueB = (Struct) recordB.value(); final Struct valueB = (Struct) recordB.value();
assertRecord((Struct) valueB.get("after"), expectedRowB); assertRecord((Struct) valueB.get("after"), expectedRowB);
@ -2376,8 +2375,8 @@ public void testMaxLsnSelectStatementWithoutLimit() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
stopConnector(); stopConnector();
} }
@ -2411,8 +2410,8 @@ public void testMaxLsnSelectStatementWithLimit() throws Exception {
final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES); final SourceRecords records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
stopConnector(); stopConnector();
} }
@ -2439,7 +2438,7 @@ public void shouldEmitNoEventsForSkippedUpdateAndDeleteOperations() throws Excep
final SourceRecords records = consumeRecordsByTopic(3); final SourceRecords records = consumeRecordsByTopic(3);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
Assertions.assertThat(tableA).hasSize(3); assertThat(tableA).hasSize(3);
tableA.forEach((SourceRecord record) -> { tableA.forEach((SourceRecord record) -> {
Struct value = (Struct) record.value(); Struct value = (Struct) record.value();
assertThat(value.get("op")).isEqualTo(Envelope.Operation.CREATE.code()); assertThat(value.get("op")).isEqualTo(Envelope.Operation.CREATE.code());
@ -2497,7 +2496,7 @@ public void shouldIncludeDatabaseNameIntoTopicAndSchemaNamesInMultiPartitionMode
final SourceRecords records = consumeRecordsByTopic(1); final SourceRecords records = consumeRecordsByTopic(1);
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
Assertions.assertThat(tableA).hasSize(1); assertThat(tableA).hasSize(1);
final SourceRecord record = tableA.get(0); final SourceRecord record = tableA.get(0);
assertThat(record.keySchema().name()).isEqualTo("server1.testDB1.dbo.tablea.Key"); assertThat(record.keySchema().name()).isEqualTo("server1.testDB1.dbo.tablea.Key");
@ -2558,10 +2557,10 @@ public void shouldApplySchemaFilters() throws Exception {
assertNull(tableS1A); assertNull(tableS1A);
assertNull(tableS1B); assertNull(tableS1B);
Assertions.assertThat(tableS2A).hasSize(1); assertThat(tableS2A).hasSize(1);
Assertions.assertThat(tableS2B).hasSize(1); assertThat(tableS2B).hasSize(1);
Assertions.assertThat(tableDboA).hasSize(1); assertThat(tableDboA).hasSize(1);
Assertions.assertThat(tableDboB).hasSize(1); assertThat(tableDboB).hasSize(1);
stopConnector(); stopConnector();
@ -2589,8 +2588,8 @@ public void shouldApplySchemaFilters() throws Exception {
tableS1A = records.recordsForTopic("server1.testDB1.s1.tablea"); tableS1A = records.recordsForTopic("server1.testDB1.s1.tablea");
tableS1B = records.recordsForTopic("server1.testDB1.s1.tableb"); tableS1B = records.recordsForTopic("server1.testDB1.s1.tableb");
Assertions.assertThat(tableS1A).hasSize(1); assertThat(tableS1A).hasSize(1);
Assertions.assertThat(tableS1B).hasSize(1); assertThat(tableS1B).hasSize(1);
assertNull(tableS2A); assertNull(tableS2A);
assertNull(tableS2B); assertNull(tableS2B);
assertNull(tableDboA); assertNull(tableDboA);

View File

@ -12,7 +12,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -72,19 +71,19 @@ public void snapshotAndStreaming() throws Exception {
SourceRecords records = consumeRecordsByTopic(4); SourceRecords records = consumeRecordsByTopic(4);
List<SourceRecord> tableA1 = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_1, "tableA")); List<SourceRecord> tableA1 = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_1, "tableA"));
Assertions.assertThat(tableA1).hasSize(1); assertThat(tableA1).hasSize(1);
assertValue(tableA1.get(0), "colA", "a1"); assertValue(tableA1.get(0), "colA", "a1");
List<SourceRecord> tableB = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_1, "tableB")); List<SourceRecord> tableB = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_1, "tableB"));
Assertions.assertThat(tableB).hasSize(1); assertThat(tableB).hasSize(1);
assertValue(tableB.get(0), "colB", "b"); assertValue(tableB.get(0), "colB", "b");
List<SourceRecord> tableA2 = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_2, "tableA")); List<SourceRecord> tableA2 = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_2, "tableA"));
Assertions.assertThat(tableA2).hasSize(1); assertThat(tableA2).hasSize(1);
assertValue(tableA2.get(0), "colA", "a2"); assertValue(tableA2.get(0), "colA", "a2");
List<SourceRecord> tableC = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_2, "tableC")); List<SourceRecord> tableC = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_2, "tableC"));
Assertions.assertThat(tableC).hasSize(1); assertThat(tableC).hasSize(1);
assertValue(tableC.get(0), "colC", "c"); assertValue(tableC.get(0), "colC", "c");
connection.execute( connection.execute(
@ -98,11 +97,11 @@ public void snapshotAndStreaming() throws Exception {
records = consumeRecordsByTopic(2); records = consumeRecordsByTopic(2);
tableA1 = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_1, "tableA")); tableA1 = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_1, "tableA"));
Assertions.assertThat(tableA1).hasSize(1); assertThat(tableA1).hasSize(1);
assertValue(tableA1.get(0), "colA", "a1s"); assertValue(tableA1.get(0), "colA", "a1s");
tableA2 = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_2, "tableA")); tableA2 = records.recordsForTopic(TestHelper.topicName(TestHelper.TEST_DATABASE_2, "tableA"));
Assertions.assertThat(tableA1).hasSize(1); assertThat(tableA1).hasSize(1);
assertValue(tableA2.get(0), "colA", "a2s"); assertValue(tableA2.get(0), "colA", "a2s");
} }

View File

@ -12,7 +12,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -77,7 +76,7 @@ private Struct consume(SchemaNameAdjustmentMode adjustmentMode) throws Interrupt
SourceRecords records = consumeRecordsByTopic(1); SourceRecords records = consumeRecordsByTopic(1);
final List<SourceRecord> results = records.recordsForTopic("server1.testDB1.dbo.name-adjustment"); final List<SourceRecord> results = records.recordsForTopic("server1.testDB1.dbo.name-adjustment");
Assertions.assertThat(results).hasSize(1); assertThat(results).hasSize(1);
return (Struct) results.get(0).value(); return (Struct) results.get(0).value();
} }

View File

@ -6,9 +6,10 @@
package io.debezium.connector.sqlserver; package io.debezium.connector.sqlserver;
import static org.assertj.core.api.Assertions.assertThat;
import java.sql.SQLException; import java.sql.SQLException;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -70,8 +71,8 @@ public void shouldProcessFromSnapshot() throws Exception {
final int expectedRecordsCount = 1; final int expectedRecordsCount = 1;
final SourceRecords records = consumeRecordsByTopic(expectedRecordsCount); final SourceRecords records = consumeRecordsByTopic(expectedRecordsCount);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key1")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key1")).isNotNull();
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key2")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key2")).isNotNull();
} }
@Test @Test
@ -90,14 +91,14 @@ public void shouldProcessFromStreaming() throws Exception {
final int expectedRecordsCount = 1; final int expectedRecordsCount = 1;
SourceRecords records = consumeRecordsByTopic(expectedRecordsCount); SourceRecords records = consumeRecordsByTopic(expectedRecordsCount);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key1")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key1")).isNotNull();
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key2")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key2")).isNotNull();
connection.execute("INSERT INTO t1 VALUES (2, 20, 'data2', 200);"); connection.execute("INSERT INTO t1 VALUES (2, 20, 'data2', 200);");
records = consumeRecordsByTopic(expectedRecordsCount); records = consumeRecordsByTopic(expectedRecordsCount);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key1")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key1")).isNotNull();
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key2")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("key2")).isNotNull();
connection.execute(DDL_STATEMENTS_STREAM); connection.execute(DDL_STATEMENTS_STREAM);
TestHelper.enableTableCdc(connection, "t2", "t2_CT", Collect.arrayListOf("key1", "key2")); TestHelper.enableTableCdc(connection, "t2", "t2_CT", Collect.arrayListOf("key1", "key2"));
@ -105,7 +106,7 @@ public void shouldProcessFromStreaming() throws Exception {
connection.execute("INSERT INTO t2 VALUES (2, 20, 'data2', 200);"); connection.execute("INSERT INTO t2 VALUES (2, 20, 'data2', 200);");
records = consumeRecordsByTopic(expectedRecordsCount); records = consumeRecordsByTopic(expectedRecordsCount);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().field("key1")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().field("key1")).isNotNull();
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().field("key2")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().field("key2")).isNotNull();
} }
} }

View File

@ -6,9 +6,10 @@
package io.debezium.connector.sqlserver; package io.debezium.connector.sqlserver;
import static org.assertj.core.api.Assertions.assertThat;
import java.sql.SQLException; import java.sql.SQLException;
import org.assertj.core.api.Assertions;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -68,11 +69,11 @@ public void shouldProcessFromSnapshot() throws Exception {
final int expectedRecordsCount = 1 + 1 + 1; final int expectedRecordsCount = 1 + 1 + 1;
final SourceRecords records = consumeRecordsByTopic(expectedRecordsCount); final SourceRecords records = consumeRecordsByTopic(expectedRecordsCount);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("pk")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().fields()).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().field("pk")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().fields()).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t3").get(0).keySchema()).isNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t3").get(0).keySchema()).isNull();
} }
@Test @Test
@ -120,10 +121,10 @@ public void shouldProcessFromStreaming() throws Exception {
final int expectedRecordsCount = 1 + 1 + 1; final int expectedRecordsCount = 1 + 1 + 1;
final SourceRecords records = consumeRecordsByTopic(expectedRecordsCount, 24); final SourceRecords records = consumeRecordsByTopic(expectedRecordsCount, 24);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("pk")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().fields()).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.t1").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().field("pk")).isNotNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().field("pk")).isNotNull();
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().fields()).hasSize(1); assertThat(records.recordsForTopic("server1.testDB1.dbo.t2").get(0).keySchema().fields()).hasSize(1);
Assertions.assertThat(records.recordsForTopic("server1.testDB1.dbo.t3").get(0).keySchema()).isNull(); assertThat(records.recordsForTopic("server1.testDB1.dbo.t3").get(0).keySchema()).isNull();
} }
} }

View File

@ -19,7 +19,6 @@
import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -108,9 +107,9 @@ public void transactionMetadata() throws Exception {
final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea"); final List<SourceRecord> tableA = records.recordsForTopic("server1.testDB1.dbo.tablea");
final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb"); final List<SourceRecord> tableB = records.recordsForTopic("server1.testDB1.dbo.tableb");
final List<SourceRecord> tx = records.recordsForTopic("server1.transaction"); final List<SourceRecord> tx = records.recordsForTopic("server1.transaction");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE + 1); assertThat(tableB).hasSize(RECORDS_PER_TABLE + 1);
Assertions.assertThat(tx).hasSize(3); assertThat(tx).hasSize(3);
final List<SourceRecord> all = records.allRecordsInOrder(); final List<SourceRecord> all = records.allRecordsInOrder();
final String txId = assertBeginTransaction(all.get(0)); final String txId = assertBeginTransaction(all.get(0));
@ -292,9 +291,9 @@ private void restartInTheMiddleOfTx(boolean restartJustAfterSnapshot, boolean af
tableB = sourceRecords.recordsForTopic("server1.testDB1.dbo.tableb"); tableB = sourceRecords.recordsForTopic("server1.testDB1.dbo.tableb");
List<SourceRecord> txMetadata = sourceRecords.recordsForTopic("server1.transaction"); List<SourceRecord> txMetadata = sourceRecords.recordsForTopic("server1.transaction");
Assertions.assertThat(tableA).hasSize(RECORDS_PER_TABLE); assertThat(tableA).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(tableB).hasSize(RECORDS_PER_TABLE); assertThat(tableB).hasSize(RECORDS_PER_TABLE);
Assertions.assertThat(txMetadata).hasSize(1 + 2 * RECORDS_PER_TABLE - 1); assertThat(txMetadata).hasSize(1 + 2 * RECORDS_PER_TABLE - 1);
assertEndTransaction(txMetadata.get(0), batchTxId, 2 * RECORDS_PER_TABLE, assertEndTransaction(txMetadata.get(0), batchTxId, 2 * RECORDS_PER_TABLE,
Collect.hashMapOf("testDB1.dbo.tablea", RECORDS_PER_TABLE, "testDB1.dbo.tableb", RECORDS_PER_TABLE)); Collect.hashMapOf("testDB1.dbo.tablea", RECORDS_PER_TABLE, "testDB1.dbo.tableb", RECORDS_PER_TABLE));

View File

@ -5,13 +5,14 @@
*/ */
package io.debezium.data; package io.debezium.data;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.List; import java.util.List;
import java.util.function.Supplier; import java.util.function.Supplier;
import org.apache.kafka.connect.data.Field; import org.apache.kafka.connect.data.Field;
import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.assertj.core.api.Assertions;
public class SchemaAndValueField { public class SchemaAndValueField {
private final Schema schema; private final Schema schema;
@ -41,18 +42,18 @@ private void assertValue(Struct content) {
} }
if (value == null) { if (value == null) {
Assertions.assertThat(content.get(fieldName)).as(fieldName + " is present in the actual content").isNull(); assertThat(content.get(fieldName)).as(fieldName + " is present in the actual content").isNull();
return; return;
} }
Object actualValue = content.get(fieldName); Object actualValue = content.get(fieldName);
Assertions.assertThat(actualValue).as(fieldName + " is not present in the actual content").isNotNull(); assertThat(actualValue).as(fieldName + " is not present in the actual content").isNotNull();
// assert the value type; for List all implementation types (e.g. immutable ones) are acceptable // assert the value type; for List all implementation types (e.g. immutable ones) are acceptable
if (actualValue instanceof List) { if (actualValue instanceof List) {
Assertions.assertThat(value).as("Incorrect value type for " + fieldName).isInstanceOf(List.class); assertThat(value).as("Incorrect value type for " + fieldName).isInstanceOf(List.class);
final List<?> actualValueList = (List<?>) actualValue; final List<?> actualValueList = (List<?>) actualValue;
final List<?> valueList = (List<?>) value; final List<?> valueList = (List<?>) value;
Assertions.assertThat(actualValueList).as("List size don't match for " + fieldName).hasSize(valueList.size()); assertThat(actualValueList).as("List size don't match for " + fieldName).hasSize(valueList.size());
if (!valueList.isEmpty() && valueList.iterator().next() instanceof Struct) { if (!valueList.isEmpty() && valueList.iterator().next() instanceof Struct) {
for (int i = 0; i < valueList.size(); i++) { for (int i = 0; i < valueList.size(); i++) {
assertStruct((Struct) valueList.get(i), (Struct) actualValueList.get(i)); assertStruct((Struct) valueList.get(i), (Struct) actualValueList.get(i));
@ -61,17 +62,17 @@ private void assertValue(Struct content) {
} }
} }
else { else {
Assertions.assertThat(actualValue.getClass()).as("Incorrect value type for " + fieldName).isEqualTo(value.getClass()); assertThat(actualValue.getClass()).as("Incorrect value type for " + fieldName).isEqualTo(value.getClass());
} }
if (actualValue instanceof byte[]) { if (actualValue instanceof byte[]) {
Assertions.assertThat((byte[]) actualValue).as("Values don't match for " + fieldName).isEqualTo((byte[]) value); assertThat((byte[]) actualValue).as("Values don't match for " + fieldName).isEqualTo((byte[]) value);
} }
else if (actualValue instanceof Struct) { else if (actualValue instanceof Struct) {
assertStruct((Struct) value, (Struct) actualValue); assertStruct((Struct) value, (Struct) actualValue);
} }
else { else {
Assertions.assertThat(actualValue).as("Values don't match for " + fieldName).isEqualTo(value); assertThat(actualValue).as("Values don't match for " + fieldName).isEqualTo(value);
} }
} }
@ -79,21 +80,21 @@ private void assertStruct(final Struct expectedStruct, final Struct actualStruct
expectedStruct.schema().fields().stream().forEach(field -> { expectedStruct.schema().fields().stream().forEach(field -> {
final Object expectedValue = expectedStruct.get(field); final Object expectedValue = expectedStruct.get(field);
if (expectedValue == null) { if (expectedValue == null) {
Assertions.assertThat(actualStruct.get(field.name())).as(fieldName + " is present in the actual content").isNull(); assertThat(actualStruct.get(field.name())).as(fieldName + " is present in the actual content").isNull();
return; return;
} }
final Object actualValue = actualStruct.get(field.name()); final Object actualValue = actualStruct.get(field.name());
Assertions.assertThat(actualValue).as("No value found for " + fieldName).isNotNull(); assertThat(actualValue).as("No value found for " + fieldName).isNotNull();
Assertions.assertThat(actualValue.getClass()).as("Incorrect value type for " + fieldName).isEqualTo(expectedValue.getClass()); assertThat(actualValue.getClass()).as("Incorrect value type for " + fieldName).isEqualTo(expectedValue.getClass());
if (actualValue instanceof byte[]) { if (actualValue instanceof byte[]) {
Assertions.assertThat(expectedValue).as("Array is not expected for " + fieldName).isInstanceOf(byte[].class); assertThat(expectedValue).as("Array is not expected for " + fieldName).isInstanceOf(byte[].class);
Assertions.assertThat((byte[]) actualValue).as("Values don't match for " + fieldName).isEqualTo((byte[]) expectedValue); assertThat((byte[]) actualValue).as("Values don't match for " + fieldName).isEqualTo((byte[]) expectedValue);
} }
else if (actualValue instanceof Struct) { else if (actualValue instanceof Struct) {
assertStruct((Struct) expectedValue, (Struct) actualValue); assertStruct((Struct) expectedValue, (Struct) actualValue);
} }
else { else {
Assertions.assertThat(actualValue).as("Values don't match for " + fieldName).isEqualTo(expectedValue); assertThat(actualValue).as("Values don't match for " + fieldName).isEqualTo(expectedValue);
} }
}); });
} }
@ -104,7 +105,7 @@ private void assertSchema(Struct content) {
} }
Schema schema = content.schema(); Schema schema = content.schema();
Field field = schema.field(fieldName); Field field = schema.field(fieldName);
Assertions.assertThat(field).as(fieldName + " not found in schema " + schema).isNotNull(); assertThat(field).as(fieldName + " not found in schema " + schema).isNotNull();
VerifyRecord.assertConnectSchemasAreEqual(field.name(), field.schema(), this.schema); VerifyRecord.assertConnectSchemasAreEqual(field.name(), field.schema(), this.schema);
} }

View File

@ -5,12 +5,13 @@
*/ */
package io.debezium.pipeline; package io.debezium.pipeline;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration; import java.time.Duration;
import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.errors.RetriableException; import org.apache.kafka.connect.errors.RetriableException;
import org.apache.kafka.connect.source.SourceConnector; import org.apache.kafka.connect.source.SourceConnector;
import org.assertj.core.api.Assertions;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -66,7 +67,7 @@ public void nonRetriableByDefault() throws Exception {
Assert.fail("Exception must be thrown"); Assert.fail("Exception must be thrown");
} }
catch (ConnectException e) { catch (ConnectException e) {
Assertions.assertThat(e instanceof RetriableException).isFalse(); assertThat(e instanceof RetriableException).isFalse();
} }
} }
@ -92,7 +93,7 @@ public void customRetriableMatch() throws Exception {
Assert.fail("Exception must be thrown"); Assert.fail("Exception must be thrown");
} }
catch (ConnectException e) { catch (ConnectException e) {
Assertions.assertThat(e instanceof RetriableException).isTrue(); assertThat(e instanceof RetriableException).isTrue();
} }
} }
@ -111,7 +112,7 @@ public void customRetriableNoMatch() throws Exception {
Assert.fail("Exception must be thrown"); Assert.fail("Exception must be thrown");
} }
catch (ConnectException e) { catch (ConnectException e) {
Assertions.assertThat(e instanceof RetriableException).isFalse(); assertThat(e instanceof RetriableException).isFalse();
} }
} }
@ -130,7 +131,7 @@ public void customRetriableMatchNested() throws Exception {
Assert.fail("Exception must be thrown"); Assert.fail("Exception must be thrown");
} }
catch (ConnectException e) { catch (ConnectException e) {
Assertions.assertThat(e instanceof RetriableException).isTrue(); assertThat(e instanceof RetriableException).isTrue();
} }
} }

View File

@ -5,9 +5,10 @@
*/ */
package io.debezium.pipeline.source.snapshot.incremental; package io.debezium.pipeline.source.snapshot.incremental;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Optional; import java.util.Optional;
import org.assertj.core.api.Assertions;
import org.junit.Test; import org.junit.Test;
import io.debezium.config.Configuration; import io.debezium.config.Configuration;
@ -66,10 +67,10 @@ public void testBuildQueryOnePkColumn() {
.addColumn(val1) .addColumn(val1)
.addColumn(val2) .addColumn(val2)
.setPrimaryKeyNames("pk1").create(); .setPrimaryKeyNames("pk1").create();
Assertions.assertThat(source.buildChunkQuery(table, Optional.empty())).isEqualTo("SELECT * FROM \"s1\".\"table1\" ORDER BY \"pk1\" LIMIT 1024"); assertThat(source.buildChunkQuery(table, Optional.empty())).isEqualTo("SELECT * FROM \"s1\".\"table1\" ORDER BY \"pk1\" LIMIT 1024");
context.nextChunkPosition(new Object[]{ 1, 5 }); context.nextChunkPosition(new Object[]{ 1, 5 });
context.maximumKey(new Object[]{ 10, 50 }); context.maximumKey(new Object[]{ 10, 50 });
Assertions.assertThat(source.buildChunkQuery(table, Optional.empty())).isEqualTo( assertThat(source.buildChunkQuery(table, Optional.empty())).isEqualTo(
"SELECT * FROM \"s1\".\"table1\" WHERE (\"pk1\" > ?) AND NOT (\"pk1\" > ?) ORDER BY \"pk1\" LIMIT 1024"); "SELECT * FROM \"s1\".\"table1\" WHERE (\"pk1\" > ?) AND NOT (\"pk1\" > ?) ORDER BY \"pk1\" LIMIT 1024");
} }
@ -88,11 +89,11 @@ public void testBuildQueryOnePkColumnWithAdditionalCondition() {
.addColumn(val1) .addColumn(val1)
.addColumn(val2) .addColumn(val2)
.setPrimaryKeyNames("pk1").create(); .setPrimaryKeyNames("pk1").create();
Assertions.assertThat(source.buildChunkQuery(table, Optional.of("\"val1\"=foo"))) assertThat(source.buildChunkQuery(table, Optional.of("\"val1\"=foo")))
.isEqualTo("SELECT * FROM \"s1\".\"table1\" WHERE \"val1\"=foo ORDER BY \"pk1\" LIMIT 1024"); .isEqualTo("SELECT * FROM \"s1\".\"table1\" WHERE \"val1\"=foo ORDER BY \"pk1\" LIMIT 1024");
context.nextChunkPosition(new Object[]{ 1, 5 }); context.nextChunkPosition(new Object[]{ 1, 5 });
context.maximumKey(new Object[]{ 10, 50 }); context.maximumKey(new Object[]{ 10, 50 });
Assertions.assertThat(source.buildChunkQuery(table, Optional.of("\"val1\"=foo"))).isEqualTo( assertThat(source.buildChunkQuery(table, Optional.of("\"val1\"=foo"))).isEqualTo(
"SELECT * FROM \"s1\".\"table1\" WHERE (\"pk1\" > ?) AND NOT (\"pk1\" > ?) AND \"val1\"=foo ORDER BY \"pk1\" LIMIT 1024"); "SELECT * FROM \"s1\".\"table1\" WHERE (\"pk1\" > ?) AND NOT (\"pk1\" > ?) AND \"val1\"=foo ORDER BY \"pk1\" LIMIT 1024");
} }
@ -115,10 +116,10 @@ public void testBuildQueryThreePkColumns() {
.addColumn(val1) .addColumn(val1)
.addColumn(val2) .addColumn(val2)
.setPrimaryKeyNames("pk1", "pk2", "pk3").create(); .setPrimaryKeyNames("pk1", "pk2", "pk3").create();
Assertions.assertThat(source.buildChunkQuery(table, Optional.empty())).isEqualTo("SELECT * FROM \"s1\".\"table1\" ORDER BY \"pk1\", \"pk2\", \"pk3\" LIMIT 1024"); assertThat(source.buildChunkQuery(table, Optional.empty())).isEqualTo("SELECT * FROM \"s1\".\"table1\" ORDER BY \"pk1\", \"pk2\", \"pk3\" LIMIT 1024");
context.nextChunkPosition(new Object[]{ 1, 5 }); context.nextChunkPosition(new Object[]{ 1, 5 });
context.maximumKey(new Object[]{ 10, 50 }); context.maximumKey(new Object[]{ 10, 50 });
Assertions.assertThat(source.buildChunkQuery(table, Optional.empty())).isEqualTo( assertThat(source.buildChunkQuery(table, Optional.empty())).isEqualTo(
"SELECT * FROM \"s1\".\"table1\" WHERE ((\"pk1\" > ?) OR (\"pk1\" = ? AND \"pk2\" > ?) OR (\"pk1\" = ? AND \"pk2\" = ? AND \"pk3\" > ?)) AND NOT ((\"pk1\" > ?) OR (\"pk1\" = ? AND \"pk2\" > ?) OR (\"pk1\" = ? AND \"pk2\" = ? AND \"pk3\" > ?)) ORDER BY \"pk1\", \"pk2\", \"pk3\" LIMIT 1024"); "SELECT * FROM \"s1\".\"table1\" WHERE ((\"pk1\" > ?) OR (\"pk1\" = ? AND \"pk2\" > ?) OR (\"pk1\" = ? AND \"pk2\" = ? AND \"pk3\" > ?)) AND NOT ((\"pk1\" > ?) OR (\"pk1\" = ? AND \"pk2\" > ?) OR (\"pk1\" = ? AND \"pk2\" = ? AND \"pk3\" > ?)) ORDER BY \"pk1\", \"pk2\", \"pk3\" LIMIT 1024");
} }
@ -141,11 +142,11 @@ public void testBuildQueryThreePkColumnsWithAdditionalCondition() {
.addColumn(val1) .addColumn(val1)
.addColumn(val2) .addColumn(val2)
.setPrimaryKeyNames("pk1", "pk2", "pk3").create(); .setPrimaryKeyNames("pk1", "pk2", "pk3").create();
Assertions.assertThat(source.buildChunkQuery(table, Optional.of("\"val1\"=foo"))) assertThat(source.buildChunkQuery(table, Optional.of("\"val1\"=foo")))
.isEqualTo("SELECT * FROM \"s1\".\"table1\" WHERE \"val1\"=foo ORDER BY \"pk1\", \"pk2\", \"pk3\" LIMIT 1024"); .isEqualTo("SELECT * FROM \"s1\".\"table1\" WHERE \"val1\"=foo ORDER BY \"pk1\", \"pk2\", \"pk3\" LIMIT 1024");
context.nextChunkPosition(new Object[]{ 1, 5 }); context.nextChunkPosition(new Object[]{ 1, 5 });
context.maximumKey(new Object[]{ 10, 50 }); context.maximumKey(new Object[]{ 10, 50 });
Assertions.assertThat(source.buildChunkQuery(table, Optional.of("\"val1\"=foo"))).isEqualTo( assertThat(source.buildChunkQuery(table, Optional.of("\"val1\"=foo"))).isEqualTo(
"SELECT * FROM \"s1\".\"table1\" WHERE ((\"pk1\" > ?) OR (\"pk1\" = ? AND \"pk2\" > ?) OR (\"pk1\" = ? AND \"pk2\" = ? AND \"pk3\" > ?)) AND NOT ((\"pk1\" > ?) OR (\"pk1\" = ? AND \"pk2\" > ?) OR (\"pk1\" = ? AND \"pk2\" = ? AND \"pk3\" > ?)) AND \"val1\"=foo ORDER BY \"pk1\", \"pk2\", \"pk3\" LIMIT 1024"); "SELECT * FROM \"s1\".\"table1\" WHERE ((\"pk1\" > ?) OR (\"pk1\" = ? AND \"pk2\" > ?) OR (\"pk1\" = ? AND \"pk2\" = ? AND \"pk3\" > ?)) AND NOT ((\"pk1\" > ?) OR (\"pk1\" = ? AND \"pk2\" > ?) OR (\"pk1\" = ? AND \"pk2\" = ? AND \"pk3\" > ?)) AND \"val1\"=foo ORDER BY \"pk1\", \"pk2\", \"pk3\" LIMIT 1024");
} }
@ -160,7 +161,7 @@ public void testMaxQuery() {
final Column val2 = Column.editor().name("val2").create(); final Column val2 = Column.editor().name("val2").create();
final Table table = Table.editor().tableId(new TableId(null, "s1", "table1")).addColumn(pk1).addColumn(pk2) final Table table = Table.editor().tableId(new TableId(null, "s1", "table1")).addColumn(pk1).addColumn(pk2)
.addColumn(val1).addColumn(val2).setPrimaryKeyNames("pk1", "pk2").create(); .addColumn(val1).addColumn(val2).setPrimaryKeyNames("pk1", "pk2").create();
Assertions.assertThat(source.buildMaxPrimaryKeyQuery(table, Optional.empty())) assertThat(source.buildMaxPrimaryKeyQuery(table, Optional.empty()))
.isEqualTo("SELECT * FROM \"s1\".\"table1\" ORDER BY \"pk1\" DESC, \"pk2\" DESC LIMIT 1"); .isEqualTo("SELECT * FROM \"s1\".\"table1\" ORDER BY \"pk1\" DESC, \"pk2\" DESC LIMIT 1");
} }
@ -175,7 +176,7 @@ public void testMaxQueryWithAdditionalCondition() {
final Column val2 = Column.editor().name("val2").create(); final Column val2 = Column.editor().name("val2").create();
final Table table = Table.editor().tableId(new TableId(null, "s1", "table1")).addColumn(pk1).addColumn(pk2) final Table table = Table.editor().tableId(new TableId(null, "s1", "table1")).addColumn(pk1).addColumn(pk2)
.addColumn(val1).addColumn(val2).setPrimaryKeyNames("pk1", "pk2").create(); .addColumn(val1).addColumn(val2).setPrimaryKeyNames("pk1", "pk2").create();
Assertions.assertThat(source.buildMaxPrimaryKeyQuery(table, Optional.of("\"val1\"=foo"))) assertThat(source.buildMaxPrimaryKeyQuery(table, Optional.of("\"val1\"=foo")))
.isEqualTo("SELECT * FROM \"s1\".\"table1\" WHERE \"val1\"=foo ORDER BY \"pk1\" DESC, \"pk2\" DESC LIMIT 1"); .isEqualTo("SELECT * FROM \"s1\".\"table1\" WHERE \"val1\"=foo ORDER BY \"pk1\" DESC, \"pk2\" DESC LIMIT 1");
} }
@ -191,7 +192,7 @@ config, new JdbcConnection(config.getJdbcConfig(), c -> null, "\"", "\""), null,
source.setContext(context); source.setContext(context);
String actualProjection = source.buildChunkQuery(createTwoPrimaryKeysTable(), Optional.empty()); String actualProjection = source.buildChunkQuery(createTwoPrimaryKeysTable(), Optional.empty());
String expectedProjection = "SELECT \"pk1\", \"pk2\", \"val1\", \"val2\" FROM \"s1\".\"table1\" ORDER BY \"pk1\", \"pk2\" LIMIT 1024"; String expectedProjection = "SELECT \"pk1\", \"pk2\", \"val1\", \"val2\" FROM \"s1\".\"table1\" ORDER BY \"pk1\", \"pk2\" LIMIT 1024";
Assertions.assertThat(actualProjection).isEqualTo(expectedProjection); assertThat(actualProjection).isEqualTo(expectedProjection);
} }
@Test @Test
@ -206,7 +207,7 @@ config, new JdbcConnection(config.getJdbcConfig(), c -> null, "\"", "\""), null,
source.setContext(context); source.setContext(context);
String actualProjection = source.buildChunkQuery(createTwoPrimaryKeysTable(), Optional.empty()); String actualProjection = source.buildChunkQuery(createTwoPrimaryKeysTable(), Optional.empty());
String expectedProjection = "SELECT \"pk1\", \"val1\", \"val2\" FROM \"s1\".\"table1\" ORDER BY \"pk1\", \"pk2\" LIMIT 1024"; String expectedProjection = "SELECT \"pk1\", \"val1\", \"val2\" FROM \"s1\".\"table1\" ORDER BY \"pk1\", \"pk2\" LIMIT 1024";
Assertions.assertThat(actualProjection).isEqualTo(expectedProjection); assertThat(actualProjection).isEqualTo(expectedProjection);
} }
private Table createTwoPrimaryKeysTable() { private Table createTwoPrimaryKeysTable() {

View File

@ -5,13 +5,14 @@
*/ */
package io.debezium.serde; package io.debezium.serde;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Objects; import java.util.Objects;
import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.serialization.Serde;
import org.assertj.core.api.Assertions;
import org.junit.Test; import org.junit.Test;
import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonProperty;
@ -106,22 +107,22 @@ public void simpleKey() {
final Serde<Integer> keySerde = DebeziumSerdes.payloadJson(Integer.class); final Serde<Integer> keySerde = DebeziumSerdes.payloadJson(Integer.class);
keySerde.configure(Collections.emptyMap(), true); keySerde.configure(Collections.emptyMap(), true);
Assertions.assertThat(keySerde.deserializer().deserialize("xx", "{\"payload\": {\"a\": 1}}".getBytes())).isEqualTo(1); assertThat(keySerde.deserializer().deserialize("xx", "{\"payload\": {\"a\": 1}}".getBytes())).isEqualTo(1);
Assertions.assertThat(keySerde.deserializer().deserialize("xx", "{\"payload\": 1}".getBytes())).isEqualTo(1); assertThat(keySerde.deserializer().deserialize("xx", "{\"payload\": 1}".getBytes())).isEqualTo(1);
Assertions.assertThat(keySerde.deserializer().deserialize("xx", "{\"payload\": {\"a\": null}}".getBytes())).isNull(); assertThat(keySerde.deserializer().deserialize("xx", "{\"payload\": {\"a\": null}}".getBytes())).isNull();
Assertions.assertThat(keySerde.deserializer().deserialize("xx", "{\"payload\": null}".getBytes())).isNull(); assertThat(keySerde.deserializer().deserialize("xx", "{\"payload\": null}".getBytes())).isNull();
Assertions.assertThat(keySerde.deserializer().deserialize("xx", "{\"a\": 1}".getBytes())).isEqualTo(1); assertThat(keySerde.deserializer().deserialize("xx", "{\"a\": 1}".getBytes())).isEqualTo(1);
Assertions.assertThat(keySerde.deserializer().deserialize("xx", "1".getBytes())).isEqualTo(1); assertThat(keySerde.deserializer().deserialize("xx", "1".getBytes())).isEqualTo(1);
Assertions.assertThat(keySerde.deserializer().deserialize("xx", "{\"a\": null}".getBytes())).isNull(); assertThat(keySerde.deserializer().deserialize("xx", "{\"a\": null}".getBytes())).isNull();
Assertions.assertThat(keySerde.deserializer().deserialize("xx", "null".getBytes())).isNull(); assertThat(keySerde.deserializer().deserialize("xx", "null".getBytes())).isNull();
} }
@Test @Test
public void compositeKey() { public void compositeKey() {
final Serde<CompositeKey> keySerde = DebeziumSerdes.payloadJson(CompositeKey.class); final Serde<CompositeKey> keySerde = DebeziumSerdes.payloadJson(CompositeKey.class);
keySerde.configure(Collections.emptyMap(), true); keySerde.configure(Collections.emptyMap(), true);
Assertions.assertThat(keySerde.deserializer().deserialize("xx", "{\"a\": 1, \"b\": 2}".getBytes())).isEqualTo(new CompositeKey(1, 2)); assertThat(keySerde.deserializer().deserialize("xx", "{\"a\": 1, \"b\": 2}".getBytes())).isEqualTo(new CompositeKey(1, 2));
} }
@Test @Test
@ -129,7 +130,7 @@ public void valuePayloadWithSchema() {
final Serde<Customer> valueSerde = DebeziumSerdes.payloadJson(Customer.class); final Serde<Customer> valueSerde = DebeziumSerdes.payloadJson(Customer.class);
valueSerde.configure(Collections.singletonMap("from.field", "after"), false); valueSerde.configure(Collections.singletonMap("from.field", "after"), false);
final String content = Testing.Files.readResourceAsString("json/serde-with-schema.json"); final String content = Testing.Files.readResourceAsString("json/serde-with-schema.json");
Assertions.assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())).isEqualTo(new Customer(1004, "Anne", "Kretchmar", "annek@noanswer.org")); assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())).isEqualTo(new Customer(1004, "Anne", "Kretchmar", "annek@noanswer.org"));
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@ -139,8 +140,8 @@ public void valueEnvelopeWithSchema() {
valueSerde.configure(Collections.emptyMap(), false); valueSerde.configure(Collections.emptyMap(), false);
final String content = Testing.Files.readResourceAsString("json/serde-with-schema.json"); final String content = Testing.Files.readResourceAsString("json/serde-with-schema.json");
Map<String, String> envelope = valueSerde.deserializer().deserialize("xx", content.getBytes()); Map<String, String> envelope = valueSerde.deserializer().deserialize("xx", content.getBytes());
Assertions.assertThat(envelope).hasSize(FIELDS_IN_ENVELOPE - 1); // tx block not present assertThat(envelope).hasSize(FIELDS_IN_ENVELOPE - 1); // tx block not present
Assertions.assertThat(envelope.get("op")).isEqualTo("c"); assertThat(envelope.get("op")).isEqualTo("c");
} }
@Test @Test
@ -148,7 +149,7 @@ public void valuePayloadWithoutSchema() {
final Serde<Customer> valueSerde = DebeziumSerdes.payloadJson(Customer.class); final Serde<Customer> valueSerde = DebeziumSerdes.payloadJson(Customer.class);
valueSerde.configure(Collections.singletonMap("from.field", "after"), false); valueSerde.configure(Collections.singletonMap("from.field", "after"), false);
final String content = Testing.Files.readResourceAsString("json/serde-without-schema.json"); final String content = Testing.Files.readResourceAsString("json/serde-without-schema.json");
Assertions.assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())).isEqualTo(new Customer(1004, "Anne", "Kretchmar", "annek@noanswer.org")); assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())).isEqualTo(new Customer(1004, "Anne", "Kretchmar", "annek@noanswer.org"));
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@ -158,8 +159,8 @@ public void valueEnvelopeWithoutSchema() {
valueSerde.configure(Collections.emptyMap(), false); valueSerde.configure(Collections.emptyMap(), false);
final String content = Testing.Files.readResourceAsString("json/serde-without-schema.json"); final String content = Testing.Files.readResourceAsString("json/serde-without-schema.json");
Map<String, String> envelope = valueSerde.deserializer().deserialize("xx", content.getBytes()); Map<String, String> envelope = valueSerde.deserializer().deserialize("xx", content.getBytes());
Assertions.assertThat(envelope).hasSize(5); assertThat(envelope).hasSize(5);
Assertions.assertThat(envelope.get("op")).isEqualTo("c"); assertThat(envelope.get("op")).isEqualTo("c");
} }
@Test @Test
@ -168,11 +169,11 @@ public void valueBeforeField() {
valueSerde.configure(Collections.singletonMap("from.field", "before"), false); valueSerde.configure(Collections.singletonMap("from.field", "before"), false);
String content = Testing.Files.readResourceAsString("json/serde-update.json"); String content = Testing.Files.readResourceAsString("json/serde-update.json");
Assertions.assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())) assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes()))
.isEqualTo(new Customer(1004, "Anne-Marie", "Kretchmar", "annek@noanswer.org")); .isEqualTo(new Customer(1004, "Anne-Marie", "Kretchmar", "annek@noanswer.org"));
content = Testing.Files.readResourceAsString("json/serde-without-schema.json"); content = Testing.Files.readResourceAsString("json/serde-without-schema.json");
Assertions.assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())).isNull(); assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())).isNull();
} }
@Test @Test
@ -180,8 +181,8 @@ public void valueNull() {
final Serde<Customer> valueSerde = DebeziumSerdes.payloadJson(Customer.class); final Serde<Customer> valueSerde = DebeziumSerdes.payloadJson(Customer.class);
valueSerde.configure(Collections.emptyMap(), false); valueSerde.configure(Collections.emptyMap(), false);
Assertions.assertThat(valueSerde.deserializer().deserialize("xx", "null".getBytes())).isNull(); assertThat(valueSerde.deserializer().deserialize("xx", "null".getBytes())).isNull();
Assertions.assertThat(valueSerde.deserializer().deserialize("xx", null)).isNull(); assertThat(valueSerde.deserializer().deserialize("xx", null)).isNull();
} }
@Test @Test
@ -189,7 +190,7 @@ public void valuePayloadUnwrapped() {
final Serde<Customer> valueSerde = DebeziumSerdes.payloadJson(Customer.class); final Serde<Customer> valueSerde = DebeziumSerdes.payloadJson(Customer.class);
valueSerde.configure(Collections.emptyMap(), false); valueSerde.configure(Collections.emptyMap(), false);
final String content = Testing.Files.readResourceAsString("json/serde-unwrapped.json"); final String content = Testing.Files.readResourceAsString("json/serde-unwrapped.json");
Assertions.assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())).isEqualTo(new Customer(1004, "Anne", "Kretchmar", "annek@noanswer.org")); assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())).isEqualTo(new Customer(1004, "Anne", "Kretchmar", "annek@noanswer.org"));
} }
@Test(expected = RuntimeException.class) @Test(expected = RuntimeException.class)
@ -198,7 +199,7 @@ public void valueWithUnknownPropertyThrowRuntimeException() {
valueSerde.configure(Collections.singletonMap("from.field", "before"), false); valueSerde.configure(Collections.singletonMap("from.field", "before"), false);
String content = Testing.Files.readResourceAsString("json/serde-unknown-property.json"); String content = Testing.Files.readResourceAsString("json/serde-unknown-property.json");
Assertions.assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())) assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes()))
.isEqualTo(new Customer(1004, "Anne-Marie", "Kretchmar", "annek@noanswer.org")); .isEqualTo(new Customer(1004, "Anne-Marie", "Kretchmar", "annek@noanswer.org"));
} }
@ -212,7 +213,7 @@ public void valueWithUnknownPropertyIgnored() {
valueSerde.configure(options, false); valueSerde.configure(options, false);
String content = Testing.Files.readResourceAsString("json/serde-unknown-property.json"); String content = Testing.Files.readResourceAsString("json/serde-unknown-property.json");
Assertions.assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes())) assertThat(valueSerde.deserializer().deserialize("xx", content.getBytes()))
.isEqualTo(new Customer(1004, "Anne-Marie", "Kretchmar", "annek@noanswer.org")); .isEqualTo(new Customer(1004, "Anne-Marie", "Kretchmar", "annek@noanswer.org"));
} }
} }

View File

@ -55,7 +55,6 @@
import org.apache.kafka.connect.storage.Converter; import org.apache.kafka.connect.storage.Converter;
import org.apache.kafka.connect.storage.FileOffsetBackingStore; import org.apache.kafka.connect.storage.FileOffsetBackingStore;
import org.apache.kafka.connect.storage.OffsetStorageReaderImpl; import org.apache.kafka.connect.storage.OffsetStorageReaderImpl;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -1110,12 +1109,12 @@ protected String assertBeginTransaction(SourceRecord record) {
final Struct beginKey = (Struct) record.key(); final Struct beginKey = (Struct) record.key();
final Map<String, Object> offset = (Map<String, Object>) record.sourceOffset(); final Map<String, Object> offset = (Map<String, Object>) record.sourceOffset();
Assertions.assertThat(begin.getString("status")).isEqualTo("BEGIN"); assertThat(begin.getString("status")).isEqualTo("BEGIN");
Assertions.assertThat(begin.getInt64("event_count")).isNull(); assertThat(begin.getInt64("event_count")).isNull();
final String txId = begin.getString("id"); final String txId = begin.getString("id");
Assertions.assertThat(beginKey.getString("id")).isEqualTo(txId); assertThat(beginKey.getString("id")).isEqualTo(txId);
Assertions.assertThat(offset.get("transaction_id")).isEqualTo(txId); assertThat(offset.get("transaction_id")).isEqualTo(txId);
return txId; return txId;
} }
@ -1125,16 +1124,15 @@ protected void assertEndTransaction(SourceRecord record, String expectedTxId, lo
final Struct endKey = (Struct) record.key(); final Struct endKey = (Struct) record.key();
final Map<String, Object> offset = (Map<String, Object>) record.sourceOffset(); final Map<String, Object> offset = (Map<String, Object>) record.sourceOffset();
Assertions.assertThat(end.getString("status")).isEqualTo("END"); assertThat(end.getString("status")).isEqualTo("END");
Assertions.assertThat(end.getString("id")).isEqualTo(expectedTxId); assertThat(end.getString("id")).isEqualTo(expectedTxId);
Assertions.assertThat(end.getInt64("event_count")).isEqualTo(expectedEventCount); assertThat(end.getInt64("event_count")).isEqualTo(expectedEventCount);
Assertions.assertThat(endKey.getString("id")).isEqualTo(expectedTxId); assertThat(endKey.getString("id")).isEqualTo(expectedTxId);
Assertions assertThat(end.getArray("data_collections").stream().map(x -> (Struct) x)
.assertThat(end.getArray("data_collections").stream().map(x -> (Struct) x)
.collect(Collectors.toMap(x -> x.getString("data_collection"), x -> x.getInt64("event_count")))) .collect(Collectors.toMap(x -> x.getString("data_collection"), x -> x.getInt64("event_count"))))
.isEqualTo(expectedPerTableCount.entrySet().stream().collect(Collectors.toMap(x -> x.getKey(), x -> x.getValue().longValue()))); .isEqualTo(expectedPerTableCount.entrySet().stream().collect(Collectors.toMap(x -> x.getKey(), x -> x.getValue().longValue())));
Assertions.assertThat(offset.get("transaction_id")).isEqualTo(expectedTxId); assertThat(offset.get("transaction_id")).isEqualTo(expectedTxId);
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@ -1142,10 +1140,10 @@ protected void assertRecordTransactionMetadata(SourceRecord record, String expec
final Struct change = ((Struct) record.value()).getStruct("transaction"); final Struct change = ((Struct) record.value()).getStruct("transaction");
final Map<String, Object> offset = (Map<String, Object>) record.sourceOffset(); final Map<String, Object> offset = (Map<String, Object>) record.sourceOffset();
Assertions.assertThat(change.getString("id")).isEqualTo(expectedTxId); assertThat(change.getString("id")).isEqualTo(expectedTxId);
Assertions.assertThat(change.getInt64("total_order")).isEqualTo(expectedTotalOrder); assertThat(change.getInt64("total_order")).isEqualTo(expectedTotalOrder);
Assertions.assertThat(change.getInt64("data_collection_order")).isEqualTo(expectedCollectionOrder); assertThat(change.getInt64("data_collection_order")).isEqualTo(expectedCollectionOrder);
Assertions.assertThat(offset.get("transaction_id")).isEqualTo(expectedTxId); assertThat(offset.get("transaction_id")).isEqualTo(expectedTxId);
} }
public static int waitTimeForRecords() { public static int waitTimeForRecords() {
@ -1166,7 +1164,7 @@ private static void waitForSnapshotEvent(String connector, String server, String
Awaitility.await() Awaitility.await()
.alias("Streaming was not started on time") .alias("Streaming was not started on time")
.pollInterval(100, TimeUnit.MILLISECONDS) .pollInterval(100, TimeUnit.MILLISECONDS)
.atMost(waitTimeForRecords() * 30, TimeUnit.SECONDS) .atMost(waitTimeForRecords() * 30L, TimeUnit.SECONDS)
.ignoreException(InstanceNotFoundException.class) .ignoreException(InstanceNotFoundException.class)
.until(() -> (boolean) mbeanServer .until(() -> (boolean) mbeanServer
.getAttribute(getSnapshotMetricsObjectName(connector, server), event)); .getAttribute(getSnapshotMetricsObjectName(connector, server), event));

View File

@ -41,7 +41,6 @@
import org.apache.kafka.connect.transforms.predicates.Predicate; import org.apache.kafka.connect.transforms.predicates.Predicate;
import org.apache.kafka.connect.util.Callback; import org.apache.kafka.connect.util.Callback;
import org.apache.kafka.connect.util.SafeObjectInputStream; import org.apache.kafka.connect.util.SafeObjectInputStream;
import org.assertj.core.api.Assertions;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -602,11 +601,11 @@ public void shouldRunDebeziumEngineWithJson() throws Exception {
.using(props) .using(props)
.notifying((records, committer) -> { .notifying((records, committer) -> {
assertThat(records.size()).isGreaterThanOrEqualTo(NUMBER_OF_LINES); assertThat(records.size()).isGreaterThanOrEqualTo(NUMBER_OF_LINES);
Integer groupCount = records.size() / NUMBER_OF_LINES; int groupCount = records.size() / NUMBER_OF_LINES;
for (ChangeEvent<String, String> r : records) { for (ChangeEvent<String, String> r : records) {
Assertions.assertThat(r.key()).isNull(); assertThat(r.key()).isNull();
Assertions.assertThat(r.value()).startsWith("\"Generated line number "); assertThat(r.value()).startsWith("\"Generated line number ");
committer.markProcessed(r); committer.markProcessed(r);
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.pipeline.source.snapshot.incremental; package io.debezium.pipeline.source.snapshot.incremental;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.entry;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -27,7 +29,6 @@
import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceConnector; import org.apache.kafka.connect.source.SourceConnector;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.Test; import org.junit.Test;
@ -189,7 +190,7 @@ protected <V> Map<Integer, V> consumeMixedWithIncrementalSnapshot(int recordCoun
final List<SourceRecord> dataRecords = records.recordsForTopic(topicName); final List<SourceRecord> dataRecords = records.recordsForTopic(topicName);
if (records.allRecordsInOrder().isEmpty()) { if (records.allRecordsInOrder().isEmpty()) {
noRecords++; noRecords++;
Assertions.assertThat(noRecords).describedAs(String.format("Too many no data record results, %d < %d", dbChanges.size(), recordCount)) assertThat(noRecords).describedAs(String.format("Too many no data record results, %d < %d", dbChanges.size(), recordCount))
.isLessThanOrEqualTo(MAXIMUM_NO_RECORDS_CONSUMES); .isLessThanOrEqualTo(MAXIMUM_NO_RECORDS_CONSUMES);
continue; continue;
} }
@ -212,7 +213,7 @@ protected <V> Map<Integer, V> consumeMixedWithIncrementalSnapshot(int recordCoun
} }
} }
Assertions.assertThat(dbChanges).hasSize(recordCount); assertThat(dbChanges).hasSize(recordCount);
return dbChanges; return dbChanges;
} }
@ -365,7 +366,7 @@ public void snapshotOnly() throws Exception {
final int expectedRecordCount = ROW_COUNT; final int expectedRecordCount = ROW_COUNT;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -381,7 +382,7 @@ public void invalidTablesInTheList() throws Exception {
final int expectedRecordCount = ROW_COUNT; final int expectedRecordCount = ROW_COUNT;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -409,7 +410,7 @@ public void inserts() throws Exception {
final int expectedRecordCount = ROW_COUNT * 2; final int expectedRecordCount = ROW_COUNT * 2;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -441,7 +442,7 @@ public void updates() throws Exception {
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount,
x -> x.getValue() >= 2000, null); x -> x.getValue() >= 2000, null);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i + 2000)); assertThat(dbChanges).contains(entry(i + 1, i + 2000));
} }
} }
@ -490,7 +491,7 @@ public void updatesWithRestart() throws Exception {
} }
}); });
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i + 2000)); assertThat(dbChanges).contains(entry(i + 1, i + 2000));
} }
} }
@ -511,7 +512,7 @@ public void updatesLargeChunk() throws Exception {
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount,
x -> x.getValue() >= 2000, null); x -> x.getValue() >= 2000, null);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i + 2000)); assertThat(dbChanges).contains(entry(i + 1, i + 2000));
} }
} }
@ -545,7 +546,7 @@ public void snapshotOnlyWithRestart() throws Exception {
} }
}); });
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -574,7 +575,7 @@ public void snapshotPreceededBySchemaChange() throws Exception {
final int expectedRecordCount = ROW_COUNT; final int expectedRecordCount = ROW_COUNT;
Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
// Initiate a schema change to the table immediately before the adhoc-snapshot // Initiate a schema change to the table immediately before the adhoc-snapshot
@ -589,7 +590,7 @@ public void snapshotPreceededBySchemaChange() throws Exception {
dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -602,7 +603,7 @@ public void snapshotWithRegexDataCollections() throws Exception {
final int expectedRecordCount = ROW_COUNT; final int expectedRecordCount = ROW_COUNT;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -626,7 +627,7 @@ public void stopCurrentIncrementalSnapshotWithoutCollectionsAndTakeNewNewIncreme
// Consume any residual left-over events after stopping incremental snapshots such as open/close // Consume any residual left-over events after stopping incremental snapshots such as open/close
// and wait for the stop message in the connector logs // and wait for the stop message in the connector logs
Assertions.assertThat(consumeAnyRemainingIncrementalSnapshotEventsAndCheckForStopMessage( assertThat(consumeAnyRemainingIncrementalSnapshotEventsAndCheckForStopMessage(
interceptor, "Stopping incremental snapshot")).isTrue(); interceptor, "Stopping incremental snapshot")).isTrue();
// stop the connector // stop the connector
@ -635,7 +636,7 @@ public void stopCurrentIncrementalSnapshotWithoutCollectionsAndTakeNewNewIncreme
// restart the connector // restart the connector
// should start with no available records, should not have any incremental snapshot state // should start with no available records, should not have any incremental snapshot state
startConnector(); startConnector();
Assertions.assertThat(interceptor.containsMessage("No incremental snapshot in progress")).isTrue(); assertThat(interceptor.containsMessage("No incremental snapshot in progress")).isTrue();
sendAdHocSnapshotSignal(); sendAdHocSnapshotSignal();
@ -654,7 +655,7 @@ public void stopCurrentIncrementalSnapshotWithoutCollectionsAndTakeNewNewIncreme
final int expectedRecordCount = ROW_COUNT * 2; final int expectedRecordCount = ROW_COUNT * 2;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -678,7 +679,7 @@ public void stopCurrentIncrementalSnapshotWithAllCollectionsAndTakeNewNewIncreme
// Consume any residual left-over events after stopping incremental snapshots such as open/close // Consume any residual left-over events after stopping incremental snapshots such as open/close
// and wait for the stop message in the connector logs // and wait for the stop message in the connector logs
Assertions.assertThat(consumeAnyRemainingIncrementalSnapshotEventsAndCheckForStopMessage( assertThat(consumeAnyRemainingIncrementalSnapshotEventsAndCheckForStopMessage(
interceptor, "Removing '[" + tableDataCollectionId() + "]' collections from incremental snapshot")).isTrue(); interceptor, "Removing '[" + tableDataCollectionId() + "]' collections from incremental snapshot")).isTrue();
// stop the connector // stop the connector
@ -687,7 +688,7 @@ public void stopCurrentIncrementalSnapshotWithAllCollectionsAndTakeNewNewIncreme
// restart the connector // restart the connector
// should start with no available records, should not have any incremental snapshot state // should start with no available records, should not have any incremental snapshot state
startConnector(); startConnector();
Assertions.assertThat(interceptor.containsMessage("No incremental snapshot in progress")).isTrue(); assertThat(interceptor.containsMessage("No incremental snapshot in progress")).isTrue();
sendAdHocSnapshotSignal(); sendAdHocSnapshotSignal();
@ -706,7 +707,7 @@ public void stopCurrentIncrementalSnapshotWithAllCollectionsAndTakeNewNewIncreme
final int expectedRecordCount = ROW_COUNT * 2; final int expectedRecordCount = ROW_COUNT * 2;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -722,13 +723,13 @@ public void removeNotYetCapturedCollectionFromInProgressIncrementalSnapshot() th
startConnector(x -> x.with(CommonConnectorConfig.INCREMENTAL_SNAPSHOT_CHUNK_SIZE, 250)); startConnector(x -> x.with(CommonConnectorConfig.INCREMENTAL_SNAPSHOT_CHUNK_SIZE, 250));
final List<String> collectionIds = tableDataCollectionIds(); final List<String> collectionIds = tableDataCollectionIds();
Assertions.assertThat(collectionIds).hasSize(2); assertThat(collectionIds).hasSize(2);
final List<String> tableNames = tableNames(); final List<String> tableNames = tableNames();
Assertions.assertThat(tableNames).hasSize(2); assertThat(tableNames).hasSize(2);
final List<String> topicNames = topicNames(); final List<String> topicNames = topicNames();
Assertions.assertThat(topicNames).hasSize(2); assertThat(topicNames).hasSize(2);
final String collectionIdToRemove = collectionIds.get(1); final String collectionIdToRemove = collectionIds.get(1);
final String tableToSnapshot = tableNames.get(0); final String tableToSnapshot = tableNames.get(0);
@ -757,7 +758,7 @@ public void removeNotYetCapturedCollectionFromInProgressIncrementalSnapshot() th
final int expectedRecordCount = ROW_COUNT * 2; final int expectedRecordCount = ROW_COUNT * 2;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, topicToConsume); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, topicToConsume);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -773,13 +774,13 @@ public void removeStartedCapturedCollectionFromInProgressIncrementalSnapshot() t
startConnector(x -> x.with(CommonConnectorConfig.INCREMENTAL_SNAPSHOT_CHUNK_SIZE, 250)); startConnector(x -> x.with(CommonConnectorConfig.INCREMENTAL_SNAPSHOT_CHUNK_SIZE, 250));
final List<String> collectionIds = tableDataCollectionIds(); final List<String> collectionIds = tableDataCollectionIds();
Assertions.assertThat(collectionIds).hasSize(2); assertThat(collectionIds).hasSize(2);
final List<String> tableNames = tableNames(); final List<String> tableNames = tableNames();
Assertions.assertThat(tableNames).hasSize(2); assertThat(tableNames).hasSize(2);
final List<String> topicNames = topicNames(); final List<String> topicNames = topicNames();
Assertions.assertThat(topicNames).hasSize(2); assertThat(topicNames).hasSize(2);
final String collectionIdToRemove = collectionIds.get(0); final String collectionIdToRemove = collectionIds.get(0);
final String tableToSnapshot = tableNames.get(1); final String tableToSnapshot = tableNames.get(1);
@ -808,7 +809,7 @@ public void removeStartedCapturedCollectionFromInProgressIncrementalSnapshot() t
final int expectedRecordCount = ROW_COUNT * 2; final int expectedRecordCount = ROW_COUNT * 2;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, topicToConsume); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount, topicToConsume);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
@ -836,7 +837,7 @@ public void shouldSnapshotNewlyAddedTableToIncludeListAfterRestart() throws Exce
final int expectedRecordCount = ROW_COUNT; final int expectedRecordCount = ROW_COUNT;
final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount); final Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount);
for (int i = 0; i < expectedRecordCount; i++) { for (int i = 0; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
stopConnector(); stopConnector();
@ -877,7 +878,7 @@ public void testPauseDuringSnapshot() throws Exception {
if ((expectedRecordCount - beforeResume) > 0) { if ((expectedRecordCount - beforeResume) > 0) {
Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount - beforeResume); Map<Integer, Integer> dbChanges = consumeMixedWithIncrementalSnapshot(expectedRecordCount - beforeResume);
for (int i = beforeResume + 1; i < expectedRecordCount; i++) { for (int i = beforeResume + 1; i < expectedRecordCount; i++) {
Assertions.assertThat(dbChanges).contains(Assertions.entry(i + 1, i)); assertThat(dbChanges).contains(entry(i + 1, i));
} }
} }
} }

View File

@ -5,12 +5,13 @@
*/ */
package io.debezium.server; package io.debezium.server;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration; import java.time.Duration;
import javax.enterprise.event.Observes; import javax.enterprise.event.Observes;
import javax.inject.Inject; import javax.inject.Inject;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.DisabledIfSystemProperty; import org.junit.jupiter.api.condition.DisabledIfSystemProperty;
@ -64,8 +65,8 @@ public void testPostgresWithJson() throws Exception {
final TestConsumer testConsumer = (TestConsumer) server.getConsumer(); final TestConsumer testConsumer = (TestConsumer) server.getConsumer();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())) Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT)); .until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT));
Assertions.assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT); assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT);
Assertions.assertThat(((String) testConsumer.getValues().get(MESSAGE_COUNT - 1))).contains( assertThat(((String) testConsumer.getValues().get(MESSAGE_COUNT - 1))).contains(
"\"after\":{\"id\":1004,\"first_name\":\"Anne\",\"last_name\":\"Kretchmar\",\"email\":\"annek@noanswer.org\"}"); "\"after\":{\"id\":1004,\"first_name\":\"Anne\",\"last_name\":\"Kretchmar\",\"email\":\"annek@noanswer.org\"}");
} }
} }

View File

@ -5,12 +5,13 @@
*/ */
package io.debezium.server; package io.debezium.server;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration; import java.time.Duration;
import javax.enterprise.event.Observes; import javax.enterprise.event.Observes;
import javax.inject.Inject; import javax.inject.Inject;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.DisabledIfSystemProperty; import org.junit.jupiter.api.condition.DisabledIfSystemProperty;
@ -66,8 +67,8 @@ public void testPostgresWithJson() throws Exception {
final TestConsumer testConsumer = (TestConsumer) server.getConsumer(); final TestConsumer testConsumer = (TestConsumer) server.getConsumer();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())) Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT)); .until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT));
Assertions.assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT); assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT);
Assertions.assertThat(((String) testConsumer.getValues().get(MESSAGE_COUNT - 1))).contains( assertThat(((String) testConsumer.getValues().get(MESSAGE_COUNT - 1))).contains(
"\"after\":{\"id\":1004,\"first_name\":\"Anne\",\"last_name\":\"Kretchmar\",\"email\":\"annek@noanswer.org\"}"); "\"after\":{\"id\":1004,\"first_name\":\"Anne\",\"last_name\":\"Kretchmar\",\"email\":\"annek@noanswer.org\"}");
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.server; package io.debezium.server;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException; import java.io.IOException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.nio.file.StandardOpenOption; import java.nio.file.StandardOpenOption;
@ -14,7 +16,6 @@
import javax.enterprise.event.Observes; import javax.enterprise.event.Observes;
import javax.inject.Inject; import javax.inject.Inject;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@ -52,25 +53,25 @@ void setupDependencies(@Observes ConnectorStartedEvent event) {
@Test @Test
public void testProps() { public void testProps() {
Properties properties = server.getProps(); Properties properties = server.getProps();
Assertions.assertThat(properties.getProperty(RelationalDatabaseConnectorConfig.TABLE_INCLUDE_LIST.name())).isNotNull(); assertThat(properties.getProperty(RelationalDatabaseConnectorConfig.TABLE_INCLUDE_LIST.name())).isNotNull();
Assertions.assertThat(properties.getProperty(RelationalDatabaseConnectorConfig.TABLE_INCLUDE_LIST.name())).isEqualTo("public.table_name"); assertThat(properties.getProperty(RelationalDatabaseConnectorConfig.TABLE_INCLUDE_LIST.name())).isEqualTo("public.table_name");
Assertions.assertThat(properties.getProperty("offset.flush.interval.ms.test")).isNotNull(); assertThat(properties.getProperty("offset.flush.interval.ms.test")).isNotNull();
Assertions.assertThat(properties.getProperty("offset.flush.interval.ms.test")).isEqualTo("0"); assertThat(properties.getProperty("offset.flush.interval.ms.test")).isEqualTo("0");
Assertions.assertThat(properties.getProperty("snapshot.select.statement.overrides.public.table_name")).isNotNull(); assertThat(properties.getProperty("snapshot.select.statement.overrides.public.table_name")).isNotNull();
Assertions.assertThat(properties.getProperty("snapshot.select.statement.overrides.public.table_name")).isEqualTo("SELECT * FROM table_name WHERE 1>2"); assertThat(properties.getProperty("snapshot.select.statement.overrides.public.table_name")).isEqualTo("SELECT * FROM table_name WHERE 1>2");
Assertions.assertThat(properties.getProperty("database.allowPublicKeyRetrieval")).isNotNull(); assertThat(properties.getProperty("database.allowPublicKeyRetrieval")).isNotNull();
Assertions.assertThat(properties.getProperty("database.allowPublicKeyRetrieval")).isEqualTo("true"); assertThat(properties.getProperty("database.allowPublicKeyRetrieval")).isEqualTo("true");
} }
@Test @Test
public void testJson() throws Exception { public void testJson() throws Exception {
final TestConsumer testConsumer = (TestConsumer) server.getConsumer(); final TestConsumer testConsumer = (TestConsumer) server.getConsumer();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())).until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT)); Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())).until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT));
Assertions.assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT); assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT);
Assertions.assertThat(testConsumer.getValues().get(MESSAGE_COUNT - 1)).isEqualTo("{\"line\":\"" + MESSAGE_COUNT + "\"}"); assertThat(testConsumer.getValues().get(MESSAGE_COUNT - 1)).isEqualTo("{\"line\":\"" + MESSAGE_COUNT + "\"}");
} }
static void appendLinesToSource(int numberOfLines) { static void appendLinesToSource(int numberOfLines) {

View File

@ -12,7 +12,6 @@
import javax.enterprise.event.Observes; import javax.enterprise.event.Observes;
import javax.inject.Inject; import javax.inject.Inject;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty; import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
@ -59,9 +58,9 @@ public void testPostgresWithProtobuf() throws Exception {
final TestConsumer testConsumer = (TestConsumer) server.getConsumer(); final TestConsumer testConsumer = (TestConsumer) server.getConsumer();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())) Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT)); .until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT));
Assertions.assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT); assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT);
Assertions.assertThat(testConsumer.getValues().get(0)).isInstanceOf(byte[].class); assertThat(testConsumer.getValues().get(0)).isInstanceOf(byte[].class);
Assertions.assertThat(testConsumer.getValues().get(0)).isNotNull(); assertThat(testConsumer.getValues().get(0)).isNotNull();
assertThat(((byte[]) testConsumer.getValues().get(0))[0]).isEqualTo((byte) 0); assertThat(((byte[]) testConsumer.getValues().get(0))[0]).isEqualTo((byte) 0);
} }
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.server.kafka; package io.debezium.server.kafka;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration; import java.time.Duration;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -19,7 +21,6 @@
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringDeserializer;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@ -70,7 +71,7 @@ void connectorCompleted(@Observes final ConnectorCompletedEvent event) throws Ex
} }
@AfterAll @AfterAll
static void stop() throws Exception { static void stop() {
if (consumer != null) { if (consumer != null) {
consumer.unsubscribe(); consumer.unsubscribe();
consumer.close(); consumer.close();
@ -78,7 +79,7 @@ static void stop() throws Exception {
} }
@Test @Test
public void testKafka() throws Exception { public void testKafka() {
Awaitility.await().atMost(Duration.ofSeconds(KafkaTestConfigSource.waitForSeconds())).until(() -> { Awaitility.await().atMost(Duration.ofSeconds(KafkaTestConfigSource.waitForSeconds())).until(() -> {
return consumer != null; return consumer != null;
}); });
@ -94,6 +95,6 @@ public void testKafka() throws Exception {
.forEachRemaining(actual::add); .forEachRemaining(actual::add);
return actual.size() >= MESSAGE_COUNT; return actual.size() >= MESSAGE_COUNT;
}); });
Assertions.assertThat(actual.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT); assertThat(actual.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
} }
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.server.nats.jetstream; package io.debezium.server.nats.jetstream;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration; import java.time.Duration;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
@ -12,7 +14,6 @@
import javax.enterprise.event.Observes; import javax.enterprise.event.Observes;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@ -45,7 +46,7 @@ class NatsJetStreamIT {
protected static JetStream js; protected static JetStream js;
protected static Dispatcher d; protected static Dispatcher d;
{ static {
Testing.Files.delete(NatsJetStreamTestConfigSource.OFFSET_STORE_PATH); Testing.Files.delete(NatsJetStreamTestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(NatsJetStreamTestConfigSource.OFFSET_STORE_PATH); Testing.Files.createTestingFile(NatsJetStreamTestConfigSource.OFFSET_STORE_PATH);
} }
@ -90,6 +91,6 @@ static void stop() throws Exception {
@Test @Test
void testNatsStreaming() throws Exception { void testNatsStreaming() throws Exception {
Awaitility.await().atMost(Duration.ofSeconds(NatsJetStreamTestConfigSource.waitForSeconds())).until(() -> messages.size() >= MESSAGE_COUNT); Awaitility.await().atMost(Duration.ofSeconds(NatsJetStreamTestConfigSource.waitForSeconds())).until(() -> messages.size() >= MESSAGE_COUNT);
Assertions.assertThat(messages.size() >= MESSAGE_COUNT).isTrue(); assertThat(messages.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
} }
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.server.nats.streaming; package io.debezium.server.nats.streaming;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration; import java.time.Duration;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
@ -12,7 +14,6 @@
import javax.enterprise.event.Observes; import javax.enterprise.event.Observes;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@ -48,7 +49,7 @@ public class NatsStreamingIT {
protected static StreamingConnection sc; protected static StreamingConnection sc;
protected static Subscription subscription; protected static Subscription subscription;
{ static {
Testing.Files.delete(NatsStreamingTestConfigSource.OFFSET_STORE_PATH); Testing.Files.delete(NatsStreamingTestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(NatsStreamingTestConfigSource.OFFSET_STORE_PATH); Testing.Files.createTestingFile(NatsStreamingTestConfigSource.OFFSET_STORE_PATH);
} }
@ -82,7 +83,7 @@ public void onMessage(Message m) {
} }
} }
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception { void connectorCompleted(@Observes final ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) { if (!event.isSuccess()) {
throw (Exception) event.getError().get(); throw (Exception) event.getError().get();
} }
@ -97,8 +98,11 @@ static void stop() throws Exception {
} }
@Test @Test
public void testNatsStreaming() throws Exception { public void testNatsStreaming() {
Awaitility.await().atMost(Duration.ofSeconds(NatsStreamingTestConfigSource.waitForSeconds())).until(() -> messages.size() >= MESSAGE_COUNT); Awaitility.await()
Assertions.assertThat(messages.size() >= MESSAGE_COUNT); .atMost(Duration.ofSeconds(NatsStreamingTestConfigSource.waitForSeconds()))
.until(() -> messages.size() >= MESSAGE_COUNT);
assertThat(messages.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
} }
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.server.pubsub; package io.debezium.server.pubsub;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException; import java.io.IOException;
import java.time.Duration; import java.time.Duration;
import java.util.ArrayList; import java.util.ArrayList;
@ -14,7 +16,6 @@
import javax.enterprise.event.Observes; import javax.enterprise.event.Observes;
import javax.inject.Inject; import javax.inject.Inject;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@ -185,8 +186,11 @@ void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exceptio
} }
@Test @Test
public void testPubSub() throws Exception { public void testPubSub() {
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())).until(() -> messages.size() >= MESSAGE_COUNT); Awaitility.await()
Assertions.assertThat(messages.size() >= MESSAGE_COUNT); .atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> messages.size() >= MESSAGE_COUNT);
assertThat(messages.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
} }
} }

View File

@ -5,6 +5,8 @@
*/ */
package io.debezium.server.pubsub; package io.debezium.server.pubsub;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException; import java.io.IOException;
import java.time.Duration; import java.time.Duration;
import java.util.ArrayList; import java.util.ArrayList;
@ -15,7 +17,6 @@
import javax.enterprise.event.Observes; import javax.enterprise.event.Observes;
import javax.inject.Inject; import javax.inject.Inject;
import org.assertj.core.api.Assertions;
import org.awaitility.Awaitility; import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@ -152,9 +153,12 @@ void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exceptio
} }
@Test @Test
public void testPubSubLite() throws Exception { public void testPubSubLite() {
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())).until(() -> messages.size() >= MESSAGE_COUNT); Awaitility.await()
Assertions.assertThat(messages.size() >= MESSAGE_COUNT); .atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> messages.size() >= MESSAGE_COUNT);
assertThat(messages.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
} }
} }

View File

@ -5,9 +5,10 @@
*/ */
package io.debezium.server.redis; package io.debezium.server.redis;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Map; import java.util.Map;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import io.debezium.connector.postgresql.connection.PostgresConnection; import io.debezium.connector.postgresql.connection.PostgresConnection;
@ -43,7 +44,7 @@ public void testRedisStream() throws Exception {
TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, MESSAGE_COUNT); TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, MESSAGE_COUNT);
Map<String, String> redisOffsets = jedis.hgetAll(OFFSETS_HASH_NAME); Map<String, String> redisOffsets = jedis.hgetAll(OFFSETS_HASH_NAME);
Assertions.assertThat(redisOffsets.size() > 0).isTrue(); assertThat(redisOffsets.size() > 0).isTrue();
} }
/** /**
@ -91,7 +92,7 @@ public void testRedisConnectionRetry() throws Exception {
Map<String, String> redisOffsets = jedis.hgetAll(OFFSETS_HASH_NAME); Map<String, String> redisOffsets = jedis.hgetAll(OFFSETS_HASH_NAME);
jedis.close(); jedis.close();
Assertions.assertThat(redisOffsets.size() > 0).isTrue(); assertThat(redisOffsets.size() > 0).isTrue();
} }
} }