DBZ-28 Corrected MySQL connector's behavior for representing deletes
Corrects a bug where a deleted row was written to Kafka in the same as an insert, making them indistinguishable. Now, a deleted row is written with the row's primary/unique key as the record key, and a null record value. Note that if the row has no primary/unique key, no record is written to Kafka.
This commit is contained in:
parent
b6b982d711
commit
64d0e0b458
@ -296,7 +296,7 @@ public void handleDelete(Event event, SourceInfo source, Consumer<SourceRecord>
|
|||||||
Schema keySchema = converter.keySchema();
|
Schema keySchema = converter.keySchema();
|
||||||
Object key = converter.createKey(values, includedColumns);
|
Object key = converter.createKey(values, includedColumns);
|
||||||
Schema valueSchema = converter.valueSchema();
|
Schema valueSchema = converter.valueSchema();
|
||||||
Struct value = converter.inserted(values, includedColumns);
|
Struct value = converter.deleted(values, includedColumns);
|
||||||
SourceRecord record = new SourceRecord(source.partition(), source.offset(row), topic, partition,
|
SourceRecord record = new SourceRecord(source.partition(), source.offset(row), topic, partition,
|
||||||
keySchema, key, valueSchema, value);
|
keySchema, key, valueSchema, value);
|
||||||
recorder.accept(record);
|
recorder.accept(record);
|
||||||
|
Loading…
Reference in New Issue
Block a user