DBZ-175 Misc. adjustments;

* Dedicated functional interface for halting predicates
* Using semantic temporal types
* Typo fixes
* Exposing SnapshotNewTables through connector config
* Static loggers
* Adding Moira to COPYRIGHT.txt
This commit is contained in:
Gunnar Morling 2019-01-14 16:36:46 +01:00
parent a6d791cd8c
commit 965b789dab
12 changed files with 102 additions and 75 deletions

View File

@ -43,6 +43,7 @@ MaoXiang Pan
Mario Mueller Mario Mueller
Matteo Capitanio Matteo Capitanio
Matthias Wessendorf Matthias Wessendorf
Moira Tagle
Olavi Mustanoja Olavi Mustanoja
Omar Al-Safi Omar Al-Safi
Ori Popowski Ori Popowski

View File

@ -13,7 +13,6 @@
import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
@ -50,7 +49,7 @@ public abstract class AbstractReader implements Reader {
private final AtomicReference<Runnable> uponCompletion = new AtomicReference<>(); private final AtomicReference<Runnable> uponCompletion = new AtomicReference<>();
private final Duration pollInterval; private final Duration pollInterval;
private final Predicate<SourceRecord> acceptAndContinue; private final HaltingPredicate acceptAndContinue;
/** /**
* Create a snapshot reader. * Create a snapshot reader.
@ -62,7 +61,7 @@ public abstract class AbstractReader implements Reader {
* accepting records once {@link #enqueueRecord(SourceRecord)} is called with a record * accepting records once {@link #enqueueRecord(SourceRecord)} is called with a record
* that tests as false. Can be null. If null, all records will be accepted. * that tests as false. Can be null. If null, all records will be accepted.
*/ */
public AbstractReader(String name, MySqlTaskContext context, Predicate<SourceRecord> acceptAndContinue) { public AbstractReader(String name, MySqlTaskContext context, HaltingPredicate acceptAndContinue) {
this.name = name; this.name = name;
this.context = context; this.context = context;
this.connectionContext = context.getConnectionContext(); this.connectionContext = context.getConnectionContext();
@ -313,7 +312,7 @@ protected void pollComplete(List<SourceRecord> batch) {
*/ */
protected boolean enqueueRecord(SourceRecord record) throws InterruptedException { protected boolean enqueueRecord(SourceRecord record) throws InterruptedException {
if (record != null) { if (record != null) {
if (acceptAndContinue.test(record)) { if (acceptAndContinue.accepts(record)) {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("Enqueuing source record: {}", record); logger.trace("Enqueuing source record: {}", record);
} }
@ -331,10 +330,10 @@ protected boolean enqueueRecord(SourceRecord record) throws InterruptedException
/** /**
* A predicate that returns true for all sourceRecords * A predicate that returns true for all sourceRecords
*/ */
public static class AcceptAllPredicate implements Predicate<SourceRecord> { public static class AcceptAllPredicate implements HaltingPredicate {
@Override @Override
public boolean test(SourceRecord sourceRecord) { public boolean accepts(SourceRecord sourceRecord) {
return true; return true;
} }
} }

View File

@ -146,7 +146,7 @@ public boolean equals(Object obj) {
* @param context the task context in which this reader is running; may not be null * @param context the task context in which this reader is running; may not be null
* @param acceptAndContinue see {@link AbstractReader#AbstractReader(String, MySqlTaskContext, Predicate)} * @param acceptAndContinue see {@link AbstractReader#AbstractReader(String, MySqlTaskContext, Predicate)}
*/ */
public BinlogReader(String name, MySqlTaskContext context, Predicate<SourceRecord> acceptAndContinue) { public BinlogReader(String name, MySqlTaskContext context, HaltingPredicate acceptAndContinue) {
this(name, context, acceptAndContinue, context.serverId()); this(name, context, acceptAndContinue, context.serverId());
} }
@ -158,7 +158,7 @@ public BinlogReader(String name, MySqlTaskContext context, Predicate<SourceRecor
* @param acceptAndContinue see {@link AbstractReader#AbstractReader(String, MySqlTaskContext, Predicate)} * @param acceptAndContinue see {@link AbstractReader#AbstractReader(String, MySqlTaskContext, Predicate)}
* @param serverId the server id to use for the {@link BinaryLogClient} * @param serverId the server id to use for the {@link BinaryLogClient}
*/ */
public BinlogReader(String name, MySqlTaskContext context, Predicate<SourceRecord> acceptAndContinue, long serverId) { public BinlogReader(String name, MySqlTaskContext context, HaltingPredicate acceptAndContinue, long serverId) {
super(name, context, acceptAndContinue); super(name, context, acceptAndContinue);
connectionContext = context.getConnectionContext(); connectionContext = context.getConnectionContext();

View File

@ -0,0 +1,23 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.mysql;
import org.apache.kafka.connect.source.SourceRecord;
/**
* A predicate invoked by {@link Reader} implementations in order to determine whether they should continue with
* processing records or not.
*
* @author Gunnar Morling
*/
@FunctionalInterface
public interface HaltingPredicate {
/**
* Whether this record should be processed by the calling reader or not.
*/
boolean accepts(SourceRecord record);
}

View File

@ -631,7 +631,7 @@ public static DdlParsingMode parse(String value, String defaultValue) {
.withWidth(Width.LONG) .withWidth(Width.LONG)
.withImportance(Importance.HIGH) .withImportance(Importance.HIGH)
.withDefault(10000L) .withDefault(10000L)
.withDescription("Only relevant in parallel snapshotting is configured. During " .withDescription("Only relevant if parallel snapshotting is configured. During "
+ "parallel snapshotting, multiple (4) connections open to the database " + "parallel snapshotting, multiple (4) connections open to the database "
+ "client, and they each need their own unique connection ID. This offset is " + "client, and they each need their own unique connection ID. This offset is "
+ "used to generate those IDs from the base configured cluster ID."); + "used to generate those IDs from the base configured cluster ID.");
@ -1092,6 +1092,7 @@ public static final Field MASK_COLUMN(int length) {
private final SnapshotLockingMode snapshotLockingMode; private final SnapshotLockingMode snapshotLockingMode;
private final DdlParsingMode ddlParsingMode; private final DdlParsingMode ddlParsingMode;
private final GtidNewChannelPosition gitIdNewChannelPosition; private final GtidNewChannelPosition gitIdNewChannelPosition;
private final SnapshotNewTables snapshotNewTables;
public MySqlConnectorConfig(Configuration config) { public MySqlConnectorConfig(Configuration config) {
super( super(
@ -1119,6 +1120,9 @@ public MySqlConnectorConfig(Configuration config) {
String gitIdNewChannelPosition = config.getString(MySqlConnectorConfig.GTID_NEW_CHANNEL_POSITION); String gitIdNewChannelPosition = config.getString(MySqlConnectorConfig.GTID_NEW_CHANNEL_POSITION);
this.gitIdNewChannelPosition = GtidNewChannelPosition.parse(gitIdNewChannelPosition, MySqlConnectorConfig.GTID_NEW_CHANNEL_POSITION.defaultValueAsString()); this.gitIdNewChannelPosition = GtidNewChannelPosition.parse(gitIdNewChannelPosition, MySqlConnectorConfig.GTID_NEW_CHANNEL_POSITION.defaultValueAsString());
String snapshotNewTables = config.getString(MySqlConnectorConfig.SNAPSHOT_NEW_TABLES);
this.snapshotNewTables = SnapshotNewTables.parse(snapshotNewTables, MySqlConnectorConfig.SNAPSHOT_NEW_TABLES.defaultValueAsString());
} }
public SnapshotLockingMode getSnapshotLockingMode() { public SnapshotLockingMode getSnapshotLockingMode() {
@ -1133,6 +1137,10 @@ public GtidNewChannelPosition gtidNewChannelPosition() {
return gitIdNewChannelPosition; return gitIdNewChannelPosition;
} }
public SnapshotNewTables getSnapshotNewTables() {
return snapshotNewTables;
}
protected static ConfigDef configDef() { protected static ConfigDef configDef() {
ConfigDef config = new ConfigDef(); ConfigDef config = new ConfigDef();
Field.group(config, "MySQL", HOSTNAME, PORT, USER, PASSWORD, ON_CONNECT_STATEMENTS, SERVER_NAME, SERVER_ID, SERVER_ID_OFFSET, Field.group(config, "MySQL", HOSTNAME, PORT, USER, PASSWORD, ON_CONNECT_STATEMENTS, SERVER_NAME, SERVER_ID, SERVER_ID_OFFSET,

View File

@ -10,13 +10,13 @@
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry;
import java.util.Set; import java.util.Set;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction; import java.util.function.BiFunction;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.Stream; import java.util.stream.Stream;
import io.debezium.util.LoggingContext;
import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -29,6 +29,7 @@
import io.debezium.connector.mysql.MySqlConnectorConfig.SnapshotMode; import io.debezium.connector.mysql.MySqlConnectorConfig.SnapshotMode;
import io.debezium.schema.TopicSelector; import io.debezium.schema.TopicSelector;
import io.debezium.util.Collect; import io.debezium.util.Collect;
import io.debezium.util.LoggingContext;
import io.debezium.util.LoggingContext.PreviousContext; import io.debezium.util.LoggingContext.PreviousContext;
/** /**
@ -215,7 +216,7 @@ public synchronized void start(Configuration config) {
// if there are new tables // if there are new tables
if (newTablesInConfig()) { if (newTablesInConfig()) {
// and we are configured to run a parallel snapshot // and we are configured to run a parallel snapshot
if (taskContext.snapshotNewTables() == MySqlConnectorConfig.SnapshotNewTables.PARALLEL) { if (taskContext.getConnectorConfig().getSnapshotNewTables() == MySqlConnectorConfig.SnapshotNewTables.PARALLEL) {
ServerIdGenerator serverIdGenerator = ServerIdGenerator serverIdGenerator =
new ServerIdGenerator(config.getLong(MySqlConnectorConfig.SERVER_ID), new ServerIdGenerator(config.getLong(MySqlConnectorConfig.SERVER_ID),
config.getLong(MySqlConnectorConfig.SERVER_ID_OFFSET)); config.getLong(MySqlConnectorConfig.SERVER_ID_OFFSET));
@ -310,14 +311,13 @@ public long getConfiguredServerId() {
* @return the offset to restart from. * @return the offset to restart from.
* @see RecordMakers#RecordMakers(MySqlSchema, SourceInfo, TopicSelector, boolean, Map) * @see RecordMakers#RecordMakers(MySqlSchema, SourceInfo, TopicSelector, boolean, Map)
*/ */
@SuppressWarnings("unchecked")
private Map<String, ?> getRestartOffset(Map<String, ?> storedOffset) { private Map<String, ?> getRestartOffset(Map<String, ?> storedOffset) {
Map<String, Object> restartOffset = new HashMap<>(); Map<String, Object> restartOffset = new HashMap<>();
if (storedOffset != null) { if (storedOffset != null) {
for (String key : storedOffset.keySet()){ for (Entry<String, ?> entry : storedOffset.entrySet()){
if (key.startsWith(SourceInfo.RESTART_PREFIX)) { if (entry.getKey().startsWith(SourceInfo.RESTART_PREFIX)) {
String newKey = key.substring(SourceInfo.RESTART_PREFIX.length()); String newKey = entry.getKey().substring(SourceInfo.RESTART_PREFIX.length());
restartOffset.put(newKey, storedOffset.get(key)); restartOffset.put(newKey, entry.getValue());
} }
} }
} }

View File

@ -17,7 +17,6 @@
import io.debezium.connector.common.CdcSourceTaskContext; import io.debezium.connector.common.CdcSourceTaskContext;
import io.debezium.connector.mysql.MySqlConnectorConfig.GtidNewChannelPosition; import io.debezium.connector.mysql.MySqlConnectorConfig.GtidNewChannelPosition;
import io.debezium.connector.mysql.MySqlConnectorConfig.SnapshotMode; import io.debezium.connector.mysql.MySqlConnectorConfig.SnapshotMode;
import io.debezium.connector.mysql.MySqlConnectorConfig.SnapshotNewTables;
import io.debezium.function.Predicates; import io.debezium.function.Predicates;
import io.debezium.relational.TableId; import io.debezium.relational.TableId;
import io.debezium.relational.history.DatabaseHistory; import io.debezium.relational.history.DatabaseHistory;
@ -246,11 +245,6 @@ protected SnapshotMode snapshotMode() {
return SnapshotMode.parse(value, MySqlConnectorConfig.SNAPSHOT_MODE.defaultValueAsString()); return SnapshotMode.parse(value, MySqlConnectorConfig.SNAPSHOT_MODE.defaultValueAsString());
} }
protected SnapshotNewTables snapshotNewTables() {
String value = config.getString(MySqlConnectorConfig.SNAPSHOT_NEW_TABLES);
return SnapshotNewTables.parse(value, MySqlConnectorConfig.SNAPSHOT_NEW_TABLES.defaultValueAsString());
}
public String getSnapshotSelectOverrides() { public String getSnapshotSelectOverrides() {
return config.getString(MySqlConnectorConfig.SNAPSHOT_SELECT_STATEMENT_OVERRIDES_BY_TABLE); return config.getString(MySqlConnectorConfig.SNAPSHOT_SELECT_STATEMENT_OVERRIDES_BY_TABLE);
} }

View File

@ -5,17 +5,17 @@
*/ */
package io.debezium.connector.mysql; package io.debezium.connector.mysql;
import io.debezium.config.Configuration;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration; import java.time.Duration;
import java.time.Instant; import java.time.Instant;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.config.Configuration;
/** /**
* A reader that runs a {@link ChainedReader} consisting of a {@link SnapshotReader} and a {@link BinlogReader} * A reader that runs a {@link ChainedReader} consisting of a {@link SnapshotReader} and a {@link BinlogReader}
@ -195,17 +195,17 @@ public String name() {
* the current time. Once a single record near the end of the binlog has been seen, we * the current time. Once a single record near the end of the binlog has been seen, we
* we assume the reader will stay near the end of the binlog. * we assume the reader will stay near the end of the binlog.
*/ */
/*package local*/ static class ParallelHaltingPredicate implements Predicate<SourceRecord> { /*package local*/ static class ParallelHaltingPredicate implements HaltingPredicate {
private final Logger logger = LoggerFactory.getLogger(getClass()); private static final Logger LOGGER = LoggerFactory.getLogger(ParallelHaltingPredicate.class);
private static final Duration DEFAULT_MIN_HALTING_DURATION = Duration.ofMinutes(5);
private volatile AtomicBoolean thisReaderNearEnd; private volatile AtomicBoolean thisReaderNearEnd;
private volatile AtomicBoolean otherReaderNearEnd; private volatile AtomicBoolean otherReaderNearEnd;
// The minimum duration we must be within before we attempt to halt. // The minimum duration we must be within before we attempt to halt.
private final Duration minHaltingDuration; private final Duration minHaltingDuration;
// is hard coded in as 5 minutes.
private static final Duration DEFAULT_MIN_HALTING_DURATION = Duration.ofMinutes(5);
/*package local*/ ParallelHaltingPredicate(AtomicBoolean thisReaderNearEndRef, /*package local*/ ParallelHaltingPredicate(AtomicBoolean thisReaderNearEndRef,
AtomicBoolean otherReaderNearEndRef) { AtomicBoolean otherReaderNearEndRef) {
@ -221,7 +221,7 @@ public String name() {
} }
@Override @Override
public boolean test(SourceRecord ourSourceRecord) { public boolean accepts(SourceRecord ourSourceRecord) {
// we assume if we ever end up near the end of the binlog, then we will remain there. // we assume if we ever end up near the end of the binlog, then we will remain there.
if (!thisReaderNearEnd.get()) { if (!thisReaderNearEnd.get()) {
Long sourceRecordTimestamp = (Long) ourSourceRecord.sourceOffset().get(SourceInfo.TIMESTAMP_KEY); Long sourceRecordTimestamp = (Long) ourSourceRecord.sourceOffset().get(SourceInfo.TIMESTAMP_KEY);
@ -232,7 +232,7 @@ public boolean test(SourceRecord ourSourceRecord) {
now); now);
if (durationToEnd.compareTo(minHaltingDuration) <= 0) { if (durationToEnd.compareTo(minHaltingDuration) <= 0) {
// we are within minHaltingDuration of the end // we are within minHaltingDuration of the end
logger.debug("Parallel halting predicate: this reader near end"); LOGGER.debug("Parallel halting predicate: this reader near end");
thisReaderNearEnd.set(true); thisReaderNearEnd.set(true);
} }
} }

View File

@ -5,10 +5,8 @@
*/ */
package io.debezium.connector.mysql; package io.debezium.connector.mysql;
import io.debezium.document.Document; import static io.debezium.connector.mysql.SourceInfo.BINLOG_FILENAME_OFFSET_KEY;
import org.apache.kafka.connect.source.SourceRecord; import static io.debezium.connector.mysql.SourceInfo.BINLOG_POSITION_OFFSET_KEY;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -16,8 +14,11 @@
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate; import java.util.function.Predicate;
import static io.debezium.connector.mysql.SourceInfo.BINLOG_FILENAME_OFFSET_KEY; import org.apache.kafka.connect.source.SourceRecord;
import static io.debezium.connector.mysql.SourceInfo.BINLOG_POSITION_OFFSET_KEY; import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.document.Document;
/** /**
* A reader that unifies the binlog positions of two binlog readers. * A reader that unifies the binlog positions of two binlog readers.
@ -29,7 +30,7 @@
*/ */
public class ReconcilingBinlogReader implements Reader { public class ReconcilingBinlogReader implements Reader {
private final Logger logger = LoggerFactory.getLogger(getClass()); private static final Logger LOGGER = LoggerFactory.getLogger(ReconcilingBinlogReader.class);
private final BinlogReader binlogReaderA; private final BinlogReader binlogReaderA;
private final BinlogReader binlogReaderB; private final BinlogReader binlogReaderB;
@ -104,11 +105,11 @@ public void start() {
public void stop() { public void stop() {
if (running.compareAndSet(true, false)){ if (running.compareAndSet(true, false)){
try { try {
logger.info("Stopping the {} reader", reconcilingReader.name()); LOGGER.info("Stopping the {} reader", reconcilingReader.name());
reconcilingReader.stop(); reconcilingReader.stop();
reconcilingReader.context.shutdown(); reconcilingReader.context.shutdown();
} catch (Throwable t) { } catch (Throwable t) {
logger.error("Unexpected error stopping the {} reader", reconcilingReader.name()); LOGGER.error("Unexpected error stopping the {} reader", reconcilingReader.name());
} }
} }
} }
@ -126,7 +127,7 @@ private void completeSuccessfully() {
if (completed.compareAndSet(false, true)){ if (completed.compareAndSet(false, true)){
stop(); stop();
setupUnifiedReader(); setupUnifiedReader();
logger.info("Completed Reconciliation of Parallel Readers."); LOGGER.info("Completed Reconciliation of Parallel Readers.");
Runnable completionHandler = uponCompletion.getAndSet(null); // set to null so that we call it only once Runnable completionHandler = uponCompletion.getAndSet(null); // set to null so that we call it only once
if (completionHandler != null) { if (completionHandler != null) {
@ -177,9 +178,9 @@ private void determineLeadingReader() {
} }
if (aReaderLeading) { if (aReaderLeading) {
logger.info("old tables leading; reading only from new tables"); LOGGER.info("old tables leading; reading only from new tables");
} else { } else {
logger.info("new tables leading; reading only from old tables"); LOGGER.info("new tables leading; reading only from old tables");
} }
} }
@ -203,10 +204,10 @@ private void checkLaggingLeadingInfo() {
/** /**
* A Predicate that returns false for any record beyond a given offset. * A Predicate that returns false for any record beyond a given offset.
*/ */
/*package private*/ static class OffsetLimitPredicate implements Predicate<SourceRecord> { /*package private*/ static class OffsetLimitPredicate implements HaltingPredicate {
private Document leadingReaderFinalOffsetDocument; private final Document leadingReaderFinalOffsetDocument;
private Predicate<String> gtidFilter; private final Predicate<String> gtidFilter;
/*package private*/ OffsetLimitPredicate(Map<String, ?> leadingReaderFinalOffset, /*package private*/ OffsetLimitPredicate(Map<String, ?> leadingReaderFinalOffset,
Predicate<String> gtidFilter) { Predicate<String> gtidFilter) {
@ -216,7 +217,7 @@ private void checkLaggingLeadingInfo() {
} }
@Override @Override
public boolean test(SourceRecord sourceRecord) { public boolean accepts(SourceRecord sourceRecord) {
Document offsetDocument = SourceInfo.createDocumentFromOffset(sourceRecord.sourceOffset()); Document offsetDocument = SourceInfo.createDocumentFromOffset(sourceRecord.sourceOffset());
// .isPositionAtOrBefore is true IFF leadingReaderFinalOffsetDocument <= offsetDocument // .isPositionAtOrBefore is true IFF leadingReaderFinalOffsetDocument <= offsetDocument
// we should stop (return false) IFF leadingReaderFinalOffsetDocument <= offsetDocument // we should stop (return false) IFF leadingReaderFinalOffsetDocument <= offsetDocument

View File

@ -766,7 +766,7 @@ protected void readBinlogPosition(int step, SourceInfo source, JdbcConnection my
* @return {@link Filters} that represent all the tables that this snapshot reader should CREATE * @return {@link Filters} that represent all the tables that this snapshot reader should CREATE
*/ */
private Filters getCreateTableFilters(Filters filters) { private Filters getCreateTableFilters(Filters filters) {
MySqlConnectorConfig.SnapshotNewTables snapshotNewTables = context.snapshotNewTables(); MySqlConnectorConfig.SnapshotNewTables snapshotNewTables = context.getConnectorConfig().getSnapshotNewTables();
if (snapshotNewTables == MySqlConnectorConfig.SnapshotNewTables.PARALLEL) { if (snapshotNewTables == MySqlConnectorConfig.SnapshotNewTables.PARALLEL) {
// if we are snapshotting new tables in parallel, we need to make sure all the tables in the configuration // if we are snapshotting new tables in parallel, we need to make sure all the tables in the configuration
// are created. // are created.

View File

@ -5,21 +5,23 @@
*/ */
package io.debezium.connector.mysql; package io.debezium.connector.mysql;
import org.apache.kafka.connect.source.SourceRecord; import static org.mockito.Mockito.mock;
import org.junit.Assert; import static org.mockito.Mockito.verify;
import org.junit.Test; import static org.mockito.Mockito.when;
import java.time.Duration; import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import static io.debezium.connector.mysql.ParallelSnapshotReader.ParallelHaltingPredicate; import org.apache.kafka.connect.source.SourceRecord;
import static org.mockito.Mockito.mock; import org.junit.Assert;
import static org.mockito.Mockito.verify; import org.junit.Test;
import static org.mockito.Mockito.when;
import io.debezium.connector.mysql.ParallelSnapshotReader.ParallelHaltingPredicate;
/** /**
* @author Moira Tagle * @author Moira Tagle
@ -170,12 +172,11 @@ public void testHaltingPredicateHonorsTimeRange() {
AtomicBoolean thisReaderNearEnd = new AtomicBoolean(false); AtomicBoolean thisReaderNearEnd = new AtomicBoolean(false);
AtomicBoolean otherReaderNearEnd = new AtomicBoolean(false); AtomicBoolean otherReaderNearEnd = new AtomicBoolean(false);
long durationSec = 5 * 60; // five minutes Duration duration = Duration.ofMinutes(5);
Duration duration = Duration.ofSeconds(durationSec);
ParallelHaltingPredicate parallelHaltingPredicate = new ParallelHaltingPredicate(thisReaderNearEnd, otherReaderNearEnd, duration); ParallelHaltingPredicate parallelHaltingPredicate = new ParallelHaltingPredicate(thisReaderNearEnd, otherReaderNearEnd, duration);
boolean testResult = parallelHaltingPredicate.test(createSourceRecordWithTimestamp(System.currentTimeMillis()/1000 - (durationSec * 2))); boolean testResult = parallelHaltingPredicate.accepts(createSourceRecordWithTimestamp(Instant.now().minus(duration.multipliedBy(2))));
Assert.assertTrue(testResult); Assert.assertTrue(testResult);
@ -192,11 +193,11 @@ public void testHaltingPredicateFlipsthisReaderNearEnd() {
AtomicBoolean thisReaderNearEnd = new AtomicBoolean(false); AtomicBoolean thisReaderNearEnd = new AtomicBoolean(false);
AtomicBoolean otherReaderNearEnd = new AtomicBoolean(false); AtomicBoolean otherReaderNearEnd = new AtomicBoolean(false);
Duration duration = Duration.ofSeconds(5 * 60); // five minutes Duration duration = Duration.ofMinutes(5);
ParallelHaltingPredicate parallelHaltingPredicate = new ParallelHaltingPredicate(thisReaderNearEnd, otherReaderNearEnd, duration); ParallelHaltingPredicate parallelHaltingPredicate = new ParallelHaltingPredicate(thisReaderNearEnd, otherReaderNearEnd, duration);
boolean testResult = parallelHaltingPredicate.test(createSourceRecordWithTimestamp(System.currentTimeMillis()/1000)); boolean testResult = parallelHaltingPredicate.accepts(createSourceRecordWithTimestamp(Instant.now()));
Assert.assertTrue(testResult); Assert.assertTrue(testResult);
@ -212,16 +213,16 @@ public void testHaltingPredicateHalts() {
AtomicBoolean thisReaderNearEnd = new AtomicBoolean(false); AtomicBoolean thisReaderNearEnd = new AtomicBoolean(false);
AtomicBoolean otherReaderNearEnd = new AtomicBoolean(true); AtomicBoolean otherReaderNearEnd = new AtomicBoolean(true);
Duration duration = Duration.ofSeconds(5 * 60); // five minutes Duration duration = Duration.ofMinutes(5);
ParallelHaltingPredicate parallelHaltingPredicate = ParallelHaltingPredicate parallelHaltingPredicate =
new ParallelHaltingPredicate(thisReaderNearEnd, otherReaderNearEnd, duration); new ParallelHaltingPredicate(thisReaderNearEnd, otherReaderNearEnd, duration);
boolean testResult = boolean testResult =
parallelHaltingPredicate.test(createSourceRecordWithTimestamp(System.currentTimeMillis()/1000)); parallelHaltingPredicate.accepts(createSourceRecordWithTimestamp(Instant.now()));
Assert.assertFalse(testResult); Assert.assertFalse(testResult);
Assert.assertTrue(thisReaderNearEnd.get()); Assert.assertTrue(thisReaderNearEnd.get());
Assert.assertTrue(otherReaderNearEnd.get()); Assert.assertTrue(otherReaderNearEnd.get());
} }
@ -230,11 +231,11 @@ public void testHaltingPredicateHalts() {
* Create an "offset" containing a single timestamp element with the given value. * Create an "offset" containing a single timestamp element with the given value.
* Needed because {@link ParallelSnapshotReader.ParallelHaltingPredicate} halts based on how * Needed because {@link ParallelSnapshotReader.ParallelHaltingPredicate} halts based on how
* close the record's timestamp is to the present time. * close the record's timestamp is to the present time.
* @param tsSec the timestamp (in seconds) in the resulting offset. * @param tsSec the timestamp in the resulting offset.
* @return an "offset" containing the given timestamp. * @return an "offset" containing the given timestamp.
*/ */
private SourceRecord createSourceRecordWithTimestamp(long tsSec) { private SourceRecord createSourceRecordWithTimestamp(Instant ts) {
Map<String, ?> offset = Collections.singletonMap(SourceInfo.TIMESTAMP_KEY, tsSec); Map<String, ?> offset = Collections.singletonMap(SourceInfo.TIMESTAMP_KEY, ts.getEpochSecond());
return new SourceRecord(null, offset, null, null, null); return new SourceRecord(null, offset, null, null, null);
} }
} }

View File

@ -5,15 +5,15 @@
*/ */
package io.debezium.connector.mysql; package io.debezium.connector.mysql;
import org.apache.kafka.connect.source.SourceRecord;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import org.apache.kafka.connect.source.SourceRecord;
import org.junit.Assert;
import org.junit.Test;
/** /**
* @author Moira Tagle * @author Moira Tagle
*/ */
@ -27,7 +27,7 @@ public void haltAfterPredicateTrue() {
SourceRecord testSourceRecord = createSourceRecordWithOffset(offsets.get(0)); SourceRecord testSourceRecord = createSourceRecordWithOffset(offsets.get(0));
// tested record (0) is before limit (1), so we should return true. // tested record (0) is before limit (1), so we should return true.
Assert.assertTrue(offsetLimitPredicate.test(testSourceRecord)); Assert.assertTrue(offsetLimitPredicate.accepts(testSourceRecord));
} }
@Test @Test
@ -38,7 +38,7 @@ public void haltAfterPredicateFalse() {
SourceRecord testSourceRecord = createSourceRecordWithOffset(offsets.get(1)); SourceRecord testSourceRecord = createSourceRecordWithOffset(offsets.get(1));
// tested record (1) is beyond limit (0), so we should return false. // tested record (1) is beyond limit (0), so we should return false.
Assert.assertFalse(offsetLimitPredicate.test(testSourceRecord)); Assert.assertFalse(offsetLimitPredicate.accepts(testSourceRecord));
} }
private final int SERVER_ID = 0; private final int SERVER_ID = 0;