DBZ-1604 Documentation update
This commit is contained in:
parent
884def88d7
commit
2c86b1c84f
@ -132,3 +132,4 @@ WenZe Hu
|
||||
William Pursell
|
||||
Willie Cheong
|
||||
Wout Scheepers
|
||||
Yang Yang
|
||||
|
@ -5,6 +5,9 @@
|
||||
*/
|
||||
package io.debezium.converters;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
@ -13,9 +16,6 @@
|
||||
import org.apache.kafka.connect.storage.ConverterConfig;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A customized value converter to allow avro message to be delivered as it is (byte[]) to kafka, this is used
|
||||
* for outbox pattern where payload is serialized by KafkaAvroSerializer, the consumer need to get the deseralized payload.
|
||||
|
@ -6,18 +6,18 @@
|
||||
|
||||
package io.debezium.converters;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import static junit.framework.TestCase.fail;
|
||||
import static org.fest.assertions.Assertions.assertThat;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Collections;
|
||||
|
||||
import static junit.framework.TestCase.fail;
|
||||
import static org.fest.assertions.Assertions.assertThat;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class ByteBufferConverterTest {
|
||||
|
||||
@ -97,4 +97,4 @@ public void shouldConvertToConnectDataForNullValue() {
|
||||
assertThat(schemaAndValue.schema()).isEqualTo(Schema.OPTIONAL_BYTES_SCHEMA);
|
||||
assertThat(schemaAndValue.value()).isNull();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -201,3 +201,25 @@ See option `table.fields.additional.placement` for more details.
|
||||
transforms=outbox,...
|
||||
transforms.outbox.type=io.debezium.transforms.outbox.EventRouter
|
||||
----
|
||||
|
||||
=== Using Avro as the payload format
|
||||
|
||||
The outbox routing SMT supports arbitrary payload formats, as the payload column value is passed on transparently.
|
||||
As an alternative to working with JSON as shown above it's therefore also possible to use Avro.
|
||||
This can be beneficial for the purposes of message format governance and making sure outbox event schemas evolve in a backwards-compatible way.
|
||||
|
||||
How a source application produces Avro messages as an outbox event payload is out of the scope of this documentation.
|
||||
One possibility could be to leverage the `KafkaAvroSerializer` class and use it to serialize `GenericRecord` instances.
|
||||
In order to ensure that the Kafka message value is the exact Avro binary data,
|
||||
apply the following configuration to the connector:
|
||||
|
||||
[source]
|
||||
----
|
||||
transforms=outbox,...
|
||||
transforms.outbox.type=io.debezium.transforms.outbox.EventRouter
|
||||
transforms.outbox.table.fields.additional.placement=type:header:eventType
|
||||
value.converter=io.debezium.converters.ByteBufferConverter
|
||||
----
|
||||
|
||||
This moves the `eventType` value into a Kafka message header, leaving only the `payload` column value (the Avro data) as the sole message value.
|
||||
Using `ByteBufferConverter` as the value converter will propagate that value as-is into the Kafka message value.
|
||||
|
Loading…
Reference in New Issue
Block a user