This commit is contained in:
Jiri Pechanec 2023-01-27 12:26:09 +01:00
parent a2f15abafd
commit 7d5179b5f6
175 changed files with 56 additions and 14924 deletions

View File

@ -28,7 +28,6 @@ jobs:
postgresql-changed: ${{ steps.changed-files-postgresql.outputs.any_changed }}
oracle-changed: ${{ steps.changed-files-oracle.outputs.any_changed }}
sqlserver-changed: ${{ steps.changed-files-sqlserver.outputs.any_changed }}
debezium-server-changed: ${{ steps.changed-files-debezium-server.outputs.any_changed }}
outbox-changed: ${{ steps.changed-files-outbox.outputs.any_changed }}
rest-extension-changed: ${{ steps.changed-files-rest-extension.outputs.any_changed }}
schema-generator-changed: ${{ steps.changed-files-schema-generator.outputs.any_changed }}
@ -97,13 +96,6 @@ jobs:
files: |
debezium-connector-sqlserver/**
- name: Get modified files (Debezium Server)
id: changed-files-debezium-server
uses: tj-actions/changed-files@v35.4.4
with:
files: |
debezium-server/**
- name: Get modified files (Quarkus Outbox)
id: changed-files-outbox
uses: tj-actions/changed-files@v35.4.4
@ -457,47 +449,6 @@ jobs:
-Dmaven.wagon.httpconnectionManager.ttlSeconds=120
-Ddebezium.test.records.waittime=10
build_debezium_server:
needs: [check_style, file_changes]
if: ${{ needs.file_changes.outputs.common-changed == 'true' || needs.file_changes.outputs.debezium-server-changed == 'true' }}
name: "Debezium Server"
runs-on: ubuntu-latest
steps:
- name: Checkout Action
uses: actions/checkout@v3
- name: Set up Java 17
uses: actions/setup-java@v3
with:
distribution: 'temurin'
java-version: 17
- name: Cache Maven Repository
uses: actions/cache@v3
with:
path: ~/.m2/repository
key: maven-debezium-test-build-${{ hashFiles('**/pom.xml') }}
restore-keys: |
maven-debezium-test-build-${{ hashFiles('**/pom.xml') }}
- name: Build Debezium Server
run: >
./mvnw clean install -Dquick -B -pl debezium-testing/debezium-testing-testcontainers,debezium-server,:debezium-storage-redis -Pserver-ci -am -amd
-Dcheckstyle.skip=true
-Dformat.skip=true
-Dhttp.keepAlive=false
-Dmaven.wagon.http.pool=false
-Dmaven.wagon.httpconnectionManager.ttlSeconds=120
-DskipITs=true
-DskipTests=true
- name: Test Debezium Server
run: >
./mvnw install -B -pl debezium-server -Pserver-ci -amd
-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn
-Dmaven.wagon.http.pool=false
-Dmaven.wagon.httpconnectionManager.ttlSeconds=120
build_outbox:
needs: [check_style, file_changes]
if: ${{ needs.file_changes.outputs.common-changed == 'true' || needs.file_changes.outputs.outbox-changed == 'true' }}
@ -888,6 +839,61 @@ jobs:
-Dmaven.wagon.http.pool=false
-Dmaven.wagon.httpconnectionManager.ttlSeconds=120
build_debezium_server:
needs: [check_style, file_changes]
if: ${{ needs.file_changes.outputs.common-changed == 'true' }}
name: "Debezium Server"
runs-on: ubuntu-latest
steps:
- name: Checkout Action (Core)
uses: actions/checkout@v3
with:
path: core
- name: Checkout Action (Debezium Server)
uses: actions/checkout@v3
with:
repository: debezium/debezium-server
path: server
- name: Set up Java 17
uses: actions/setup-java@v2
with:
distribution: 'temurin'
java-version: 17
# We explicitly use only the hash of the POM files from the core repository by default
# For this build, we do not care if there are or are not changes in the sibling repository since this
# job will only ever fire if there are changes in the common paths identified in the files_changed job.
- name: Cache Maven Repository
uses: actions/cache@v2
with:
path: ~/.m2/repository
key: maven-debezium-test-build-${{ hashFiles('core/**/pom.xml') }}
restore-keys: |
maven-debezium-test-build-${{ hashFiles('core/**/pom.xml') }}
- name: Build Debezium (Core)
run: >
./core/mvnw clean install -f core/pom.xml
-DskipTests=true
-DskipITs=true
-Dcheckstyle.skip=true
-Dformat.skip=true
-Drevapi.skip
-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn
-Dmaven.wagon.http.pool=false
-Dmaven.wagon.httpconnectionManager.ttlSeconds=120
- name: Build Debezium Server
run: >
./core/mvnw clean install -f server/pom.xml -Passembly
-Dcheckstyle.skip=true
-Dformat.skip=true
-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn
-Dmaven.wagon.http.pool=false
-Dmaven.wagon.httpconnectionManager.ttlSeconds=120
build_ui:
needs: [check_style, file_changes]
if: ${{ needs.file_changes.outputs.debezium-ui-changed == 'true' }}

View File

@ -1,95 +1 @@
# Debezium Server
Debezium Server is a standalone Java application built on Qurkus framework.
The application itself contains the `core` module and a set of modules responsible for communication with different target systems.
The per-module integration tests depend on the availability of the external services.
It is thus recommended to execute integration tests per-module and set-up necessary pre-requisities beforehand.
Note: running these tests against external infrastructure may incur cost with your cloud provider.
We're not going to pay your AWS/GCP/Azure bill.
## Amazon Kinesis
* Execute `aws configure` as described in AWS CLI [getting started](https://github.com/aws/aws-cli#getting-started) guide and setup the account.
* Create Kinesis stream `aws kinesis create-stream --stream-name testc.inventory.customers --shard-count 1`
* Build the module and execute the tests `mvn clean install -DskipITs=false -am -pl debezium-server-kinesis`
* Remove the stream `aws kinesis delete-stream --stream-name testc.inventory.customers`
## Google Cloud Pub/Sub
* Login into your Google Cloud account using `gcloud auth application-default login` as described in the [documentation](https://cloud.google.com/sdk/gcloud/reference/auth/application-default).
* Build the module and execute the tests `mvn clean install -DskipITs=false -am -pl debezium-server-pubsub`
## Azure Event Hubs
Login into your Azure account and create a resource group, e.g. on the CLI:
```shell
az login
az group create --name eventhubstest --location westeurope
```
### Create an Event Hubs namespace
Create an [Event Hubs namespace](https://docs.microsoft.com/azure/event-hubs/event-hubs-features#namespace). Check the documentation for options on how do this using the [Azure Portal](https://docs.microsoft.com/azure/event-hubs/event-hubs-create#create-an-event-hubs-namespace), [Azure CLI](https://docs.microsoft.com/azure/event-hubs/event-hubs-quickstart-cli#create-an-event-hubs-namespace) etc., e.g. on the CLI:
```shell
az eventhubs namespace create --name debezium-test --resource-group eventhubstest -l westeurope
```
### Create an Event Hub
Create an Event Hub (equivalent to a topic) with `one` partition. Check the documentation for options on how do this using the [Azure Portal](https://docs.microsoft.com/azure/event-hubs/event-hubs-create#create-an-event-hub), [Azure CLI](https://docs.microsoft.com/azure/event-hubs/event-hubs-quickstart-cli#create-an-event-hub) etc. , e.g. on the CLI:
```shell
az eventhubs eventhub create --name debezium-test-hub --resource-group eventhubstest --namespace-name debezium-test
```
### Build the module
[Get the Connection string](https://docs.microsoft.com/azure/event-hubs/event-hubs-get-connection-string) required to communicate with Event Hubs. The format is: `Endpoint=sb://<NAMESPACE>/;SharedAccessKeyName=<ACCESS_KEY_NAME>;SharedAccessKey=<ACCESS_KEY_VALUE>`.
E.g. on the CLI:
```shell
az eventhubs namespace authorization-rule keys list --resource-group eventhubstest --namespace-name debezium-test --name RootManageSharedAccessKey
```
Set environment variables required for tests:
```shell
export EVENTHUBS_CONNECTION_STRING=<Event Hubs connection string>
export EVENTHUBS_NAME=<name of the Event hub created in previous step>
```
Execute the tests:
```shell
mvn clean install -DskipITs=false -Deventhubs.connection.string=$EVENTHUBS_CONNECTION_STRING -Deventhubs.hub.name=$EVENTHUBS_NAME -am -pl :debezium-server-eventhubs
```
### Examine Events in the Event Hub
E.g. using kafkacat. Create _kafkacat.conf_:
```shell
metadata.broker.list=debezium-test.servicebus.windows.net:9093
security.protocol=SASL_SSL
sasl.mechanisms=PLAIN
sasl.username=$ConnectionString
sasl.password=Endpoint=sb://debezium-test.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=<access key>
```
Start consuming events:
export KAFKACAT_CONFIG=<path to kafkacat.conf>
kafkacat -b debezium-test.servicebus.windows.net:9093 -t debezium-test-hub
### Clean up
Delete the Event Hubs namespace and log out, e.g. on the CLI:
```shell
az group delete -n eventhubstest
az logout
```
Debezium Server has been moved to a [separate repository](https://github.com/debezium/debezium-server/).

View File

@ -1,236 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-build-parent</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-bom</artifactId>
<name>Debezium Server BOM</name>
<packaging>jar</packaging>
<properties>
<version.kinesis>2.13.13</version.kinesis>
<version.pubsub>25.0.0</version.pubsub>
<version.pulsar>2.10.1</version.pulsar>
<version.eventhubs>5.12.1</version.eventhubs>
<version.pravega>0.9.1</version.pravega>
<version.nats>2.16.3</version.nats>
<version.stan>2.2.3</version.stan>
<version.commons.logging>1.2</version.commons.logging>
</properties>
<dependencyManagement>
<dependencies>
<!-- Aligning versions/fixing scopes -->
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${version.commons.logging}</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${version.kafka}</version>
</dependency>
<!-- Quarkus dependencies -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-bom</artifactId>
<version>${quarkus.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<!-- Override Quarkus default driver versions -->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${version.mysql.driver}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${version.postgresql.driver}</version>
</dependency>
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>mongodb-driver-core</artifactId>
<version>${version.mongo.driver}</version>
</dependency>
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>mongodb-driver-sync</artifactId>
<version>${version.mongo.driver}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${version.sqlserver.driver}</version>
</dependency>
<!-- Debezium dependencies -->
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-kinesis</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-http</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-pubsub</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-pulsar</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-eventhubs</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-redis</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-kafka</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-pravega</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-nats-streaming</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-nats-jetstream</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-infinispan</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-rocketmq</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<version>${project.version}</version>
</dependency>
<!-- Quarkus BOM can override apicurio deps -->
<dependency>
<groupId>io.apicurio</groupId>
<artifactId>apicurio-registry-serdes-avro-serde</artifactId>
<version>${version.apicurio}</version>
</dependency>
<dependency>
<groupId>io.apicurio</groupId>
<artifactId>apicurio-registry-client</artifactId>
<version>${version.apicurio}</version>
</dependency>
<!-- Target systems -->
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>kinesis</artifactId>
<version>${version.kinesis}</version>
</dependency>
<dependency>
<groupId>com.google.cloud</groupId>
<artifactId>libraries-bom</artifactId>
<version>${version.pubsub}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.apache.pulsar</groupId>
<artifactId>pulsar-client</artifactId>
<version>${version.pulsar}</version>
</dependency>
<dependency>
<groupId>com.azure</groupId>
<artifactId>azure-messaging-eventhubs</artifactId>
<version>${version.eventhubs}</version>
</dependency>
<dependency>
<groupId>io.nats</groupId>
<artifactId>jnats</artifactId>
<version>${version.nats}</version>
</dependency>
<dependency>
<groupId>io.nats</groupId>
<artifactId>java-nats-streaming</artifactId>
<version>${version.stan}</version>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<version>${project.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.pravega</groupId>
<artifactId>pravega-client</artifactId>
<version>${version.pravega}</version>
</dependency>
<dependency>
<groupId>org.apache.rocketmq</groupId>
<artifactId>rocketmq-client</artifactId>
<version>${version.rocketmq}</version>
</dependency>
<dependency>
<groupId>org.apache.rocketmq</groupId>
<artifactId>rocketmq-tools</artifactId>
<version>${version.rocketmq}</version>
<exclusions>
<exclusion>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.infinispan</groupId>
<artifactId>infinispan-bom</artifactId>
<version>${version.infinispan}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
</project>

View File

@ -1,362 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-core</artifactId>
<name>Debezium Server Core</name>
<packaging>jar</packaging>
<dependencies>
<!-- Quarkus extensions -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-core</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-smallrye-health</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-resteasy</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-api</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-embedded</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope></dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.apicurio</groupId>
<artifactId>apicurio-registry-utils-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-protobuf-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>test</scope>
</dependency>
<!-- Vitess/grpc dependency conflicts with version required by schema registry.
We thus change the version just for the sake of compatibility testing.
-->
<dependency>
<groupId>com.google.api.grpc</groupId>
<artifactId>proto-google-common-protos</artifactId>
<version>2.5.1</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
<test.apicurio>false</test.apicurio>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>assembly</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<version>${version.failsafe.plugin}</version>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<runOrder>${runOrder}</runOrder>
</configuration>
<executions>
<execution>
<id>integration-test-apicurio-ext-json</id>
<goals>
<goal>integration-test</goal>
</goals>
<configuration>
<systemPropertyVariables>
<test.apicurio.converter.format>json</test.apicurio.converter.format>
<test.apicurio>true</test.apicurio>
</systemPropertyVariables>
</configuration>
</execution>
<execution>
<id>integration-test-apicurio-avro</id>
<goals>
<goal>integration-test</goal>
</goals>
<configuration>
<systemPropertyVariables>
<test.apicurio.converter.format>avro</test.apicurio.converter.format>
<test.apicurio>true</test.apicurio>
</systemPropertyVariables>
</configuration>
</execution>
<execution>
<id>integration-test-protobuf</id>
<goals>
<goal>integration-test</goal>
</goals>
<configuration>
<systemPropertyVariables>
<debezium.format.key>protobuf</debezium.format.key>
<debezium.format.value>protobuf</debezium.format.value>
</systemPropertyVariables>
</configuration>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>server-ci</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<version>${version.failsafe.plugin}</version>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<runOrder>${runOrder}</runOrder>
</configuration>
<executions>
<execution>
<id>integration-test-apicurio-ext-json</id>
<goals>
<goal>integration-test</goal>
</goals>
<configuration>
<systemPropertyVariables>
<test.apicurio.converter.format>json</test.apicurio.converter.format>
<test.apicurio>true</test.apicurio>
</systemPropertyVariables>
</configuration>
</execution>
<execution>
<id>integration-test-apicurio-avro</id>
<goals>
<goal>integration-test</goal>
</goals>
<configuration>
<systemPropertyVariables>
<test.apicurio.converter.format>avro</test.apicurio.converter.format>
<test.apicurio>true</test.apicurio>
</systemPropertyVariables>
</configuration>
</execution>
<execution>
<id>integration-test-protobuf</id>
<goals>
<goal>integration-test</goal>
</goals>
<configuration>
<systemPropertyVariables>
<debezium.format.key>protobuf</debezium.format.key>
<debezium.format.value>protobuf</debezium.format.value>
</systemPropertyVariables>
</configuration>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>apicurio-ext-json</id>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemPropertyVariables>
<test.apicurio.converter.format>json</test.apicurio.converter.format>
<test.apicurio>true</test.apicurio>
</systemPropertyVariables>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>apicurio-avro</id>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemPropertyVariables>
<test.apicurio.converter.format>avro</test.apicurio.converter.format>
<test.apicurio>true</test.apicurio>
</systemPropertyVariables>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>schema-registry-protobuf</id>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemPropertyVariables>
<debezium.format.key>protobuf</debezium.format.key>
<debezium.format.value>protobuf</debezium.format.value>
</systemPropertyVariables>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>quick</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>quick</name>
</property>
</activation>
<properties>
<skipITs>true</skipITs>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,14 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import io.debezium.engine.format.SerializationFormat;
/**
* A {@link SerializationFormat} defining the undefined serialization type.
*/
class Any implements SerializationFormat<Object> {
}

View File

@ -1,87 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import java.util.HashMap;
import java.util.Map;
import javax.annotation.PostConstruct;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import org.eclipse.microprofile.config.Config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
/**
* Basic services provided to all change consumers.
*
* @author Jiri Pechanec
*
*/
public class BaseChangeConsumer {
private static final Logger LOGGER = LoggerFactory.getLogger(BaseChangeConsumer.class);
protected StreamNameMapper streamNameMapper = (x) -> x;
@Inject
Instance<StreamNameMapper> customStreamNameMapper;
@PostConstruct
void init() {
if (customStreamNameMapper.isResolvable()) {
streamNameMapper = customStreamNameMapper.get();
}
LOGGER.info("Using '{}' stream name mapper", streamNameMapper);
}
/**
* Get a subset of the configuration properties that matches the given prefix.
*
* @param config The global configuration object to extract the subset from.
* @param prefix The prefix to filter property names.
*
* @return A subset of the original configuration properties containing property names
* without the prefix.
*/
protected Map<String, Object> getConfigSubset(Config config, String prefix) {
final Map<String, Object> ret = new HashMap<>();
for (String propName : config.getPropertyNames()) {
if (propName.startsWith(prefix)) {
final String newPropName = propName.substring(prefix.length());
ret.put(newPropName, config.getConfigValue(propName).getValue());
}
}
return ret;
}
protected byte[] getBytes(Object object) {
if (object instanceof byte[]) {
return (byte[]) object;
}
else if (object instanceof String) {
return ((String) object).getBytes();
}
throw new DebeziumException(unsupportedTypeMessage(object));
}
protected String getString(Object object) {
if (object instanceof String) {
return (String) object;
}
throw new DebeziumException(unsupportedTypeMessage(object));
}
protected String unsupportedTypeMessage(Object object) {
final String type = (object == null) ? "null" : object.getClass().getName();
return "Unexpected data type '" + type + "'";
}
}

View File

@ -1,99 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.event.Event;
import javax.inject.Inject;
import org.eclipse.microprofile.health.HealthCheck;
import org.eclipse.microprofile.health.HealthCheckResponse;
import org.eclipse.microprofile.health.Liveness;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.engine.DebeziumEngine;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.server.events.ConnectorStoppedEvent;
import io.debezium.server.events.TaskStartedEvent;
import io.debezium.server.events.TaskStoppedEvent;
/**
* The server lifecycle listener that published CDI events based on the lifecycle changes and also provides
* Microprofile Health information.
*
* @author Jiri Pechanec
*
*/
@Liveness
@ApplicationScoped
public class ConnectorLifecycle implements HealthCheck, DebeziumEngine.ConnectorCallback, DebeziumEngine.CompletionCallback {
private static final Logger LOGGER = LoggerFactory.getLogger(ConnectorLifecycle.class);
private volatile boolean live = false;
@Inject
Event<ConnectorStartedEvent> connectorStartedEvent;
@Inject
Event<ConnectorStoppedEvent> connectorStoppedEvent;
@Inject
Event<TaskStartedEvent> taskStartedEvent;
@Inject
Event<TaskStoppedEvent> taskStoppedEvent;
@Inject
Event<ConnectorCompletedEvent> connectorCompletedEvent;
@Override
public void connectorStarted() {
LOGGER.debug("Connector started");
connectorStartedEvent.fire(new ConnectorStartedEvent());
}
@Override
public void connectorStopped() {
LOGGER.debug("Connector stopped");
connectorStoppedEvent.fire(new ConnectorStoppedEvent());
}
@Override
public void taskStarted() {
LOGGER.debug("Task started");
taskStartedEvent.fire(new TaskStartedEvent());
live = true;
}
@Override
public void taskStopped() {
LOGGER.debug("Task stopped");
taskStoppedEvent.fire(new TaskStoppedEvent());
}
@Override
public void handle(boolean success, String message, Throwable error) {
String logMessage = String.format("Connector completed: success = '%s', message = '%s', error = '%s'", success, message, error);
if (success) {
LOGGER.info(logMessage);
}
else {
LOGGER.error(logMessage, error);
}
connectorCompletedEvent.fire(new ConnectorCompletedEvent(success, message, error));
live = false;
}
@Override
public HealthCheckResponse call() {
LOGGER.trace("Healthcheck called - live = '{}'", live);
return HealthCheckResponse.named("debezium").status(live).build();
}
}

View File

@ -1,25 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.ElementType.PARAMETER;
import static java.lang.annotation.ElementType.TYPE;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import javax.inject.Qualifier;
@Qualifier
@Target({ TYPE, METHOD, PARAMETER, FIELD })
@Retention(RUNTIME)
@Documented
public @interface CustomConsumerBuilder {
}

View File

@ -1,134 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import java.lang.management.ManagementFactory;
import java.util.Objects;
import javax.enterprise.context.Dependent;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
/**
* Reads debezium source pipeline metrics.
* NOTE: calls for reading metrics should be made after debezium connector initialized,
* after connector registers metrics, otherwise it will throw `Debezium Mbean not found` error
*
* @author Ismail Simsek
*/
@Dependent
public class DebeziumMetrics {
protected static final Logger LOGGER = LoggerFactory.getLogger(DebeziumMetrics.class);
public static final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
private ObjectName snapshotMetricsObjectName;
private ObjectName streamingMetricsObjectName;
private static ObjectName getDebeziumMbean(String context) {
ObjectName debeziumMbean = null;
for (ObjectName mbean : mbeanServer.queryNames(null, null)) {
if (mbean.getCanonicalName().contains("debezium.")
&& mbean.getCanonicalName().contains("type=connector-metrics")
&& mbean.getCanonicalName().contains("context=" + context)) {
LOGGER.debug("Using {} MBean to get {} metrics", mbean, context);
debeziumMbean = mbean;
break;
}
}
Objects.requireNonNull(debeziumMbean, "Debezium MBean (context=" + context + ") not found!");
return debeziumMbean;
}
public ObjectName getSnapshotMetricsObjectName() {
if (snapshotMetricsObjectName == null) {
snapshotMetricsObjectName = getDebeziumMbean("snapshot");
}
return snapshotMetricsObjectName;
}
public ObjectName getStreamingMetricsObjectName() {
if (streamingMetricsObjectName == null) {
streamingMetricsObjectName = getDebeziumMbean("streaming");
}
return streamingMetricsObjectName;
}
public int maxQueueSize() {
try {
return (int) mbeanServer.getAttribute(getStreamingMetricsObjectName(), "QueueTotalCapacity");
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
public boolean snapshotRunning() {
try {
return (boolean) mbeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotRunning");
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
public boolean snapshotCompleted() {
try {
return (boolean) mbeanServer.getAttribute(getSnapshotMetricsObjectName(), "SnapshotCompleted");
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
public int streamingQueueRemainingCapacity() {
try {
return (int) mbeanServer.getAttribute(getStreamingMetricsObjectName(), "QueueRemainingCapacity");
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
public int streamingQueueCurrentSize() {
return maxQueueSize() - streamingQueueRemainingCapacity();
}
public long streamingMilliSecondsBehindSource() {
try {
return (long) mbeanServer.getAttribute(getStreamingMetricsObjectName(), "MilliSecondsBehindSource");
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
public void logMetrics() {
LOGGER.info("Debezium Metrics: snapshotCompleted={} snapshotRunning={} "
+ "streamingQueueCurrentSize={} streamingQueueRemainingCapacity={} maxQueueSize={} streamingMilliSecondsBehindSource={}",
this.snapshotCompleted(),
this.snapshotRunning(),
this.streamingQueueCurrentSize(),
this.streamingQueueRemainingCapacity(),
this.maxQueueSize(),
this.streamingMilliSecondsBehindSource());
}
}

View File

@ -1,277 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import java.nio.file.Paths;
import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import javax.annotation.PostConstruct;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.context.spi.CreationalContext;
import javax.enterprise.event.Observes;
import javax.enterprise.inject.spi.Bean;
import javax.enterprise.inject.spi.BeanManager;
import javax.inject.Inject;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.eclipse.microprofile.health.Liveness;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.ChangeConsumer;
import io.debezium.engine.format.Avro;
import io.debezium.engine.format.CloudEvents;
import io.debezium.engine.format.Json;
import io.debezium.engine.format.JsonByteArray;
import io.debezium.engine.format.Protobuf;
import io.debezium.relational.history.SchemaHistory;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.quarkus.runtime.Quarkus;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.Startup;
/**
* <p>The entry point of the Quarkus-based standalone server. The server is configured via Quarkus/Microprofile Configuration sources
* and provides few out-of-the-box target implementations.</p>
* <p>The implementation uses CDI to find all classes that implements {@link DebeziumEngine.ChangeConsumer} interface.
* The candidate classes should be annotated with {@code @Named} annotation and should be {@code Dependent}.</p>
* <p>The configuration option {@code debezium.consumer} provides a name of the consumer that should be used and the value
* must match to exactly one of the implementation classes.</p>
*
* @author Jiri Pechanec
*
*/
@ApplicationScoped
@Startup
public class DebeziumServer {
private static final Logger LOGGER = LoggerFactory.getLogger(DebeziumServer.class);
private static final String PROP_PREFIX = "debezium.";
private static final String PROP_SOURCE_PREFIX = PROP_PREFIX + "source.";
private static final String PROP_SINK_PREFIX = PROP_PREFIX + "sink.";
private static final String PROP_FORMAT_PREFIX = PROP_PREFIX + "format.";
private static final String PROP_PREDICATES_PREFIX = PROP_PREFIX + "predicates.";
private static final String PROP_TRANSFORMS_PREFIX = PROP_PREFIX + "transforms.";
private static final String PROP_HEADER_FORMAT_PREFIX = PROP_FORMAT_PREFIX + "header.";
private static final String PROP_KEY_FORMAT_PREFIX = PROP_FORMAT_PREFIX + "key.";
private static final String PROP_VALUE_FORMAT_PREFIX = PROP_FORMAT_PREFIX + "value.";
private static final String PROP_OFFSET_STORAGE_PREFIX = "offset.storage.";
private static final String PROP_PREDICATES = PROP_PREFIX + "predicates";
private static final String PROP_TRANSFORMS = PROP_PREFIX + "transforms";
private static final String PROP_SINK_TYPE = PROP_SINK_PREFIX + "type";
private static final String PROP_HEADER_FORMAT = PROP_FORMAT_PREFIX + "header";
private static final String PROP_KEY_FORMAT = PROP_FORMAT_PREFIX + "key";
private static final String PROP_VALUE_FORMAT = PROP_FORMAT_PREFIX + "value";
private static final String PROP_TERMINATION_WAIT = PROP_PREFIX + "termination.wait";
private static final String FORMAT_JSON = Json.class.getSimpleName().toLowerCase();
private static final String FORMAT_JSON_BYTE_ARRAY = JsonByteArray.class.getSimpleName().toLowerCase();
private static final String FORMAT_CLOUDEVENT = CloudEvents.class.getSimpleName().toLowerCase();
private static final String FORMAT_AVRO = Avro.class.getSimpleName().toLowerCase();
private static final String FORMAT_PROTOBUF = Protobuf.class.getSimpleName().toLowerCase();
private static final Pattern SHELL_PROPERTY_NAME_PATTERN = Pattern.compile("^[a-zA-Z0-9_]+_+[a-zA-Z0-9_]+$");
private ExecutorService executor = Executors.newSingleThreadExecutor();
private int returnCode = 0;
@Inject
BeanManager beanManager;
@Inject
@Liveness
ConnectorLifecycle health;
private Bean<DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>>> consumerBean;
private CreationalContext<ChangeConsumer<ChangeEvent<Object, Object>>> consumerBeanCreationalContext;
private DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> consumer;
private DebeziumEngine<?> engine;
private final Properties props = new Properties();
@SuppressWarnings("unchecked")
@PostConstruct
public void start() {
final Config config = loadConfigOrDie();
final String name = config.getValue(PROP_SINK_TYPE, String.class);
final Set<Bean<?>> beans = beanManager.getBeans(name).stream()
.filter(x -> DebeziumEngine.ChangeConsumer.class.isAssignableFrom(x.getBeanClass()))
.collect(Collectors.toSet());
LOGGER.debug("Found {} candidate consumer(s)", beans.size());
if (beans.size() == 0) {
throw new DebeziumException("No Debezium consumer named '" + name + "' is available");
}
else if (beans.size() > 1) {
throw new DebeziumException("Multiple Debezium consumers named '" + name + "' were found");
}
consumerBean = (Bean<DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>>>) beans.iterator().next();
consumerBeanCreationalContext = beanManager.createCreationalContext(consumerBean);
consumer = consumerBean.create(consumerBeanCreationalContext);
LOGGER.info("Consumer '{}' instantiated", consumer.getClass().getName());
final Class<Any> keyFormat = (Class<Any>) getFormat(config, PROP_KEY_FORMAT);
final Class<Any> valueFormat = (Class<Any>) getFormat(config, PROP_VALUE_FORMAT);
final Class<Any> headerFormat = (Class<Any>) getHeaderFormat(config);
configToProperties(config, props, PROP_SOURCE_PREFIX, "", true);
configToProperties(config, props, PROP_FORMAT_PREFIX, "key.converter.", true);
configToProperties(config, props, PROP_FORMAT_PREFIX, "value.converter.", true);
configToProperties(config, props, PROP_FORMAT_PREFIX, "header.converter.", true);
configToProperties(config, props, PROP_KEY_FORMAT_PREFIX, "key.converter.", true);
configToProperties(config, props, PROP_VALUE_FORMAT_PREFIX, "value.converter.", true);
configToProperties(config, props, PROP_HEADER_FORMAT_PREFIX, "header.converter.", true);
configToProperties(config, props, PROP_SINK_PREFIX + name + ".", SchemaHistory.CONFIGURATION_FIELD_PREFIX_STRING + name + ".", false);
configToProperties(config, props, PROP_SINK_PREFIX + name + ".", PROP_OFFSET_STORAGE_PREFIX + name + ".", false);
final Optional<String> transforms = config.getOptionalValue(PROP_TRANSFORMS, String.class);
if (transforms.isPresent()) {
props.setProperty("transforms", transforms.get());
configToProperties(config, props, PROP_TRANSFORMS_PREFIX, "transforms.", true);
}
final Optional<String> predicates = config.getOptionalValue(PROP_PREDICATES, String.class);
if (predicates.isPresent()) {
props.setProperty("predicates", predicates.get());
configToProperties(config, props, PROP_PREDICATES_PREFIX, "predicates.", true);
}
props.setProperty("name", name);
LOGGER.debug("Configuration for DebeziumEngine: {}", props);
engine = DebeziumEngine.create(keyFormat, valueFormat, headerFormat)
.using(props)
.using((DebeziumEngine.ConnectorCallback) health)
.using((DebeziumEngine.CompletionCallback) health)
.notifying(consumer)
.build();
executor.execute(() -> {
try {
engine.run();
}
finally {
Quarkus.asyncExit(returnCode);
}
});
LOGGER.info("Engine executor started");
}
private void configToProperties(Config config, Properties props, String oldPrefix, String newPrefix, boolean overwrite) {
for (String name : config.getPropertyNames()) {
String updatedPropertyName = null;
if (SHELL_PROPERTY_NAME_PATTERN.matcher(name).matches()) {
updatedPropertyName = name.replace("_", ".").toLowerCase();
}
if (updatedPropertyName != null && updatedPropertyName.startsWith(oldPrefix)) {
String finalPropertyName = newPrefix + updatedPropertyName.substring(oldPrefix.length());
if (overwrite || !props.containsKey(finalPropertyName)) {
props.setProperty(finalPropertyName, config.getValue(name, String.class));
}
}
else if (name.startsWith(oldPrefix)) {
String finalPropertyName = newPrefix + name.substring(oldPrefix.length());
if (overwrite || !props.containsKey(finalPropertyName)) {
props.setProperty(finalPropertyName, config.getConfigValue(name).getValue());
}
}
}
}
private Class<?> getFormat(Config config, String property) {
final String formatName = config.getOptionalValue(property, String.class).orElse(FORMAT_JSON);
if (FORMAT_JSON.equals(formatName)) {
return Json.class;
}
if (FORMAT_JSON_BYTE_ARRAY.equals(formatName)) {
return JsonByteArray.class;
}
else if (FORMAT_CLOUDEVENT.equals(formatName)) {
return CloudEvents.class;
}
else if (FORMAT_AVRO.equals(formatName)) {
return Avro.class;
}
else if (FORMAT_PROTOBUF.equals(formatName)) {
return Protobuf.class;
}
throw new DebeziumException("Unknown format '" + formatName + "' for option " + "'" + property + "'");
}
private Class<?> getHeaderFormat(Config config) {
final String formatName = config.getOptionalValue(PROP_HEADER_FORMAT, String.class).orElse(FORMAT_JSON);
if (FORMAT_JSON.equals(formatName)) {
return Json.class;
}
else if (FORMAT_JSON_BYTE_ARRAY.equals(formatName)) {
return JsonByteArray.class;
}
throw new DebeziumException("Unknown format '" + formatName + "' for option " + "'" + PROP_HEADER_FORMAT + "'");
}
public void stop(@Observes ShutdownEvent event) {
try {
LOGGER.info("Received request to stop the engine");
final Config config = ConfigProvider.getConfig();
engine.close();
executor.shutdown();
executor.awaitTermination(config.getOptionalValue(PROP_TERMINATION_WAIT, Integer.class).orElse(10), TimeUnit.SECONDS);
}
catch (Exception e) {
LOGGER.error("Exception while shuttting down Debezium", e);
}
consumerBean.destroy(consumer, consumerBeanCreationalContext);
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) {
if (!event.isSuccess()) {
returnCode = 1;
}
}
private Config loadConfigOrDie() {
final Config config = ConfigProvider.getConfig();
// Check config and exit if we cannot load mandatory option.
try {
config.getValue(PROP_SINK_TYPE, String.class);
}
catch (NoSuchElementException e) {
final String configFile = Paths.get(System.getProperty("user.dir"), "conf", "application.properties").toString();
// CHECKSTYLE IGNORE check FOR NEXT 2 LINES
System.err.println(String.format("Failed to load mandatory config value '%s'. Please check you have a correct Debezium server config in %s or required "
+ "properties are defined via system or environment variables.", PROP_SINK_TYPE, configFile));
Quarkus.asyncExit();
}
return config;
}
/**
* For test purposes only
*/
DebeziumEngine.ChangeConsumer<?> getConsumer() {
return consumer;
}
public Properties getProps() {
return props;
}
}

View File

@ -1,18 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import io.quarkus.runtime.Quarkus;
import io.quarkus.runtime.annotations.QuarkusMain;
@QuarkusMain
public class Main {
public static void main(String... args) {
Quarkus.run(args);
}
}

View File

@ -1,16 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
/**
* Transforms the name of the record destination to the target stream name.
*
* @author Jiri Pechanec
*
*/
public interface StreamNameMapper {
String map(String topic);
}

View File

@ -1,57 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.events;
import java.util.Optional;
/**
* Fired when the connector was completed. Provides information about completion state, message
* and optional stacktrace in case of error.
*
* @author Jiri Pechanec
*
*/
public class ConnectorCompletedEvent {
private final boolean success;
private final String message;
private final Throwable error;
public ConnectorCompletedEvent(boolean success, String message, Throwable error) {
this.success = success;
this.message = message;
this.error = error;
}
/**
*
* @return true if the connector was completed successfully
*/
public boolean isSuccess() {
return success;
}
/**
*
* @return message associated with connection completion
*/
public String getMessage() {
return message;
}
/**
*
* @return optional error in case the connector has not started successfully or was terminated with an error
*/
public Optional<Throwable> getError() {
return Optional.ofNullable(error);
}
@Override
public String toString() {
return "ConnectorCompletedEvent [success=" + success + ", message=" + message + ", error=" + error + "]";
}
}

View File

@ -1,16 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.events;
/**
* Fired when the connector is started. The initialization is completed but the execution task
* is not started yet.
*
* @author Jiri Pechanec
*
*/
public class ConnectorStartedEvent {
}

View File

@ -1,15 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.events;
/**
* Fired when the connector is stopped but the final execution completion state is not yet determined.
*
* @author Jiri Pechanec
*
*/
public class ConnectorStoppedEvent {
}

View File

@ -1,15 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.events;
/**
* Fired right after the connector execution code is started.
*
* @author Jiri Pechanec
*
*/
public class TaskStartedEvent {
}

View File

@ -1,15 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.events;
/**
* Fired right after the connector execution code is stopped.
*
* @author Jiri Pechanec
*
*/
public class TaskStoppedEvent {
}

View File

@ -1,7 +0,0 @@
__ __ _
____/ /___ / /_ ___ ____ (_)__ __ ____ ___
/ __ // _ \ / __ \ / _ \/_ / / // / / // __ `__ \
/ /_/ // __// /_/ // __/ / /_ / // /_/ // / / / / /
\__,_/ \___//_.___/ \___/ /___//_/ \__,_//_/ /_/ /_/

View File

@ -1,20 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import java.util.Collections;
import java.util.List;
import io.debezium.testing.testcontainers.ApicurioTestResourceLifeCycleManager;
import io.quarkus.test.junit.QuarkusTestProfile;
public class DebeziumServerApicurioProfile implements QuarkusTestProfile {
@Override
public List<TestResourceEntry> testResources() {
return Collections.singletonList(new TestResourceEntry(ApicurioTestResourceLifeCycleManager.class));
}
}

View File

@ -1,72 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration;
import javax.enterprise.event.Observes;
import javax.inject.Inject;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.DisabledIfSystemProperty;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
/**
* Integration test that verifies basic reading from PostgreSQL database.
*
* @author Oren Elias
*/
@QuarkusTest
@TestProfile(DebeziumServerFileConfigProviderProfile.class)
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@EnabledIfSystemProperty(named = "test.apicurio", matches = "false", disabledReason = "DebeziumServerConfigProvidersIT doesn't run with apicurio profile.")
@DisabledIfSystemProperty(named = "debezium.format.key", matches = "protobuf")
@DisabledIfSystemProperty(named = "debezium.format.value", matches = "protobuf")
public class DebeziumServerConfigProvidersIT {
private static final int MESSAGE_COUNT = 4;
@Inject
DebeziumServer server;
{
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
}
void setupDependencies(@Observes ConnectorStartedEvent event) {
if (!TestConfigSource.isItTest()) {
return;
}
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw (Exception) event.getError().get();
}
}
@Test
public void testPostgresWithJson() throws Exception {
Testing.Print.enable();
final TestConsumer testConsumer = (TestConsumer) server.getConsumer();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT));
assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT);
assertThat(((String) testConsumer.getValues().get(MESSAGE_COUNT - 1))).contains(
"\"after\":{\"id\":1004,\"first_name\":\"Anne\",\"last_name\":\"Kretchmar\",\"email\":\"annek@noanswer.org\"}");
}
}

View File

@ -1,29 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import io.quarkus.test.junit.QuarkusTestProfile;
public class DebeziumServerFileConfigProviderProfile implements QuarkusTestProfile {
@Override
public Map<String, String> getConfigOverrides() {
Map<String, String> config = new HashMap<String, String>();
URL secretFile = DebeziumServerFileConfigProviderProfile.class.getClassLoader().getResource("secrets_test.txt");
config.put("debezium.source.database.user", "\\${file:" + secretFile.getPath() + ":user}");
config.put("debezium.source.config.providers", "file");
config.put("debezium.source.config.providers.file.class", "org.apache.kafka.common.config.provider.FileConfigProvider");
return config;
}
}

View File

@ -1,92 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration;
import javax.enterprise.event.Observes;
import javax.inject.Inject;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.DisabledIfSystemProperty;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import io.debezium.config.CommonConnectorConfig;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
/**
* Integration test that verifies basic reading from PostgreSQL database.
*
* @author Jiri Pechanec
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@EnabledIfSystemProperty(named = "test.apicurio", matches = "false", disabledReason = "DebeziumServerIT doesn't run with apicurio profile.")
@DisabledIfSystemProperty(named = "debezium.format.key", matches = "protobuf")
@DisabledIfSystemProperty(named = "debezium.format.value", matches = "protobuf")
public class DebeziumServerIT {
private static final int MESSAGE_COUNT = 4;
@Inject
DebeziumServer server;
@Inject
DebeziumMetrics metrics;
{
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
}
void setupDependencies(@Observes ConnectorStartedEvent event) {
if (!TestConfigSource.isItTest()) {
return;
}
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw (Exception) event.getError().get();
}
}
@Test
public void testPostgresWithJson() throws Exception {
Testing.Print.enable();
final TestConsumer testConsumer = (TestConsumer) server.getConsumer();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT));
assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT);
assertThat(((String) testConsumer.getValues().get(MESSAGE_COUNT - 1))).contains(
"\"after\":{\"id\":1004,\"first_name\":\"Anne\",\"last_name\":\"Kretchmar\",\"email\":\"annek@noanswer.org\"}");
}
@Test
public void testDebeziumMetricsWithPostgres() {
Testing.Print.enable();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())).until(() -> {
try {
// snapshot process finished
// and consuming events finished!
return metrics.snapshotCompleted()
&& metrics.streamingQueueCurrentSize() == 0
&& metrics.maxQueueSize() == CommonConnectorConfig.DEFAULT_MAX_QUEUE_SIZE;
}
catch (Exception e) {
return false;
}
});
}
}

View File

@ -1,20 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import java.util.Collections;
import java.util.List;
import io.debezium.testing.testcontainers.SchemaRegistryTestResourceLifecycleManager;
import io.quarkus.test.junit.QuarkusTestProfile;
public class DebeziumServerSchemaRegistryProfile implements QuarkusTestProfile {
@Override
public List<TestResourceEntry> testResources() {
return Collections.singletonList(new TestResourceEntry(SchemaRegistryTestResourceLifecycleManager.class));
}
}

View File

@ -1,105 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.StandardOpenOption;
import java.time.Duration;
import java.util.Properties;
import javax.enterprise.event.Observes;
import javax.inject.Inject;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test;
import io.debezium.DebeziumException;
import io.debezium.relational.RelationalDatabaseConnectorConfig;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.util.Collect;
import io.debezium.util.Testing;
import io.quarkus.test.junit.QuarkusTest;
/**
* Smoke test that verifies the basic functionality of Quarkus-based server.
*
* @author Jiri Pechanec
*/
@QuarkusTest
public class DebeziumServerTest {
private static final int MESSAGE_COUNT = 5;
{
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
}
void setupDependencies(@Observes ConnectorStartedEvent event) {
Testing.Files.delete(TestConfigSource.TEST_FILE_PATH);
Testing.Files.createTestingFile(TestConfigSource.TEST_FILE_PATH);
appendLinesToSource(MESSAGE_COUNT);
Testing.Print.enable();
}
@Inject
DebeziumServer server;
@Test
public void testProps() {
Properties properties = server.getProps();
assertThat(properties.getProperty(RelationalDatabaseConnectorConfig.TABLE_INCLUDE_LIST.name())).isNotNull();
assertThat(properties.getProperty(RelationalDatabaseConnectorConfig.TABLE_INCLUDE_LIST.name())).isEqualTo("public.table_name");
assertThat(properties.getProperty("offset.flush.interval.ms.test")).isNotNull();
assertThat(properties.getProperty("offset.flush.interval.ms.test")).isEqualTo("0");
assertThat(properties.getProperty("snapshot.select.statement.overrides.public.table_name")).isNotNull();
assertThat(properties.getProperty("snapshot.select.statement.overrides.public.table_name")).isEqualTo("SELECT * FROM table_name WHERE 1>2");
assertThat(properties.getProperty("database.allowPublicKeyRetrieval")).isNotNull();
assertThat(properties.getProperty("database.allowPublicKeyRetrieval")).isEqualTo("true");
assertThat(properties.getProperty("transforms.hoist.predicate")).isNotNull();
assertThat(properties.getProperty("transforms.hoist.predicate")).isEqualTo("topicNameMatch");
assertThat(properties.getProperty("predicates")).isNotNull();
assertThat(properties.getProperty("predicates")).isEqualTo("topicNameMatch");
assertThat(properties.getProperty("predicates.topicNameMatch.type")).isNotNull();
assertThat(properties.getProperty("predicates.topicNameMatch.type")).isEqualTo("org.apache.kafka.connect.transforms.predicates.TopicNameMatches");
assertThat(properties.getProperty("predicates.topicNameMatch.pattern")).isNotNull();
assertThat(properties.getProperty("predicates.topicNameMatch.pattern")).isEqualTo(".*");
}
@Test
public void testJson() throws Exception {
final TestConsumer testConsumer = (TestConsumer) server.getConsumer();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())).until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT));
assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT);
assertThat(testConsumer.getValues().get(MESSAGE_COUNT - 1)).isEqualTo("{\"line\":\"" + MESSAGE_COUNT + "\"}");
}
static void appendLinesToSource(int numberOfLines) {
CharSequence[] lines = new CharSequence[numberOfLines];
for (int i = 0; i != numberOfLines; ++i) {
lines[i] = generateLine(i + 1);
}
try {
java.nio.file.Files.write(TestConfigSource.TEST_FILE_PATH, Collect.arrayListOf(lines), StandardCharsets.UTF_8, StandardOpenOption.APPEND);
}
catch (IOException e) {
throw new DebeziumException(e);
}
}
static String generateLine(int lineNumber) {
return Integer.toString(lineNumber);
}
}

View File

@ -1,79 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration;
import javax.enterprise.event.Observes;
import javax.inject.Inject;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import io.debezium.DebeziumException;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@TestProfile(DebeziumServerApicurioProfile.class)
@EnabledIfSystemProperty(named = "test.apicurio", matches = "true", disabledReason = "DebeziumServerWithApicurioIT only runs when apicurio profile is enabled.")
public class DebeziumServerWithApicurioIT {
private static final int MESSAGE_COUNT = 4;
@Inject
DebeziumServer server;
{
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
}
void setupDependencies(@Observes ConnectorStartedEvent event) {
if (!TestConfigSource.isItTest()) {
return;
}
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw new DebeziumException(event.getError().get());
}
}
@Test
@EnabledIfSystemProperty(named = "test.apicurio.converter.format", matches = "avro")
public void testPostgresWithApicurioAvro() throws Exception {
Testing.Print.enable();
final TestConsumer testConsumer = (TestConsumer) server.getConsumer();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT));
assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT);
assertThat(testConsumer.getValues().get(0)).isInstanceOf(byte[].class);
assertThat(testConsumer.getValues().get(0)).isNotNull();
assertThat(((byte[]) testConsumer.getValues().get(0))[0]).isEqualTo((byte) 0);
}
@Test
@EnabledIfSystemProperty(named = "test.apicurio.converter.format", matches = "json")
public void testPostgresWithApicurioExtJson() throws Exception {
Testing.Print.enable();
final TestConsumer testConsumer = (TestConsumer) server.getConsumer();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT));
assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT);
assertThat(testConsumer.getValues().get(0)).isInstanceOf(String.class);
assertThat(((String) testConsumer.getValues().get(MESSAGE_COUNT - 1))).contains(
"\"after\":{\"id\":1004,\"first_name\":\"Anne\",\"last_name\":\"Kretchmar\",\"email\":\"annek@noanswer.org\"}");
}
}

View File

@ -1,66 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration;
import javax.enterprise.event.Observes;
import javax.inject.Inject;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@TestProfile(DebeziumServerSchemaRegistryProfile.class)
@EnabledIfSystemProperty(named = "debezium.format.key", matches = "protobuf")
@EnabledIfSystemProperty(named = "debezium.format.value", matches = "protobuf")
public class DebeziumServerWithSchemaRegistryIT {
private static final int MESSAGE_COUNT = 4;
@Inject
DebeziumServer server;
{
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
}
void setupDependencies(@Observes ConnectorStartedEvent event) {
if (!TestConfigSource.isItTest()) {
return;
}
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw (Exception) event.getError().get();
}
}
@Test
public void testPostgresWithProtobuf() throws Exception {
Testing.Print.enable();
final TestConsumer testConsumer = (TestConsumer) server.getConsumer();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> (testConsumer.getValues().size() >= MESSAGE_COUNT));
assertThat(testConsumer.getValues().size()).isEqualTo(MESSAGE_COUNT);
assertThat(testConsumer.getValues().get(0)).isInstanceOf(byte[].class);
assertThat(testConsumer.getValues().get(0)).isNotNull();
assertThat(((byte[]) testConsumer.getValues().get(0))[0]).isEqualTo((byte) 0);
}
}

View File

@ -1,121 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import org.eclipse.microprofile.config.spi.ConfigSource;
import io.debezium.data.Json;
import io.debezium.util.Testing;
/**
* A config source used during tests. Amended/overridden by values exposed from test lifecycle listeners.
*/
public class TestConfigSource implements ConfigSource {
public static final String OFFSETS_FILE = "file-connector-offsets.txt";
public static final Path OFFSET_STORE_PATH = Testing.Files.createTestingPath(OFFSETS_FILE).toAbsolutePath();
public static final Path TEST_FILE_PATH = Testing.Files.createTestingPath("file-connector-input.txt").toAbsolutePath();
final Map<String, String> integrationTest = new HashMap<>();
final Map<String, String> kinesisTest = new HashMap<>();
final Map<String, String> pubsubTest = new HashMap<>();
final Map<String, String> unitTest = new HashMap<>();
protected Map<String, String> config;
public TestConfigSource() {
integrationTest.put("debezium.sink.type", "test");
integrationTest.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
integrationTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH.toAbsolutePath().toString());
integrationTest.put("debezium.source.offset.flush.interval.ms", "0");
integrationTest.put("debezium.source.topic.prefix", "testc");
integrationTest.put("debezium.source.schema.include.list", "inventory");
integrationTest.put("debezium.source.table.include.list", "inventory.customers");
String format = System.getProperty("test.apicurio.converter.format");
String formatKey = System.getProperty("debezium.format.key");
String formatValue = System.getProperty("debezium.format.value");
String formatHeader = System.getProperty("debezium.format.header", "json");
if (format != null && format.length() != 0) {
integrationTest.put("debezium.format.key", format);
integrationTest.put("debezium.format.value", format);
integrationTest.put("debezium.format.header", formatHeader);
}
else {
formatKey = (formatKey != null) ? formatKey : Json.class.getSimpleName().toLowerCase();
formatValue = (formatValue != null) ? formatValue : Json.class.getSimpleName().toLowerCase();
formatHeader = (formatHeader != null) ? formatHeader : Json.class.getSimpleName().toLowerCase();
integrationTest.put("debezium.format.key", formatKey);
integrationTest.put("debezium.format.value", formatValue);
integrationTest.put("debezium.format.header", formatHeader);
}
unitTest.put("debezium.sink.type", "test");
unitTest.put("debezium.source.connector.class", "org.apache.kafka.connect.file.FileStreamSourceConnector");
unitTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH.toAbsolutePath().toString());
unitTest.put("debezium.source.offset.flush.interval.ms", "0");
unitTest.put("debezium.source.file", TEST_FILE_PATH.toAbsolutePath().toString());
unitTest.put("debezium.source.topic", "topicX");
unitTest.put("debezium.format.header", formatHeader);
unitTest.put("debezium.format.schemas.enable", "true");
unitTest.put("debezium.format.header.schemas.enable", "false");
unitTest.put("debezium.format.value.schemas.enable", "false");
unitTest.put("debezium.transforms", "hoist");
unitTest.put("debezium.transforms.hoist.type", "org.apache.kafka.connect.transforms.HoistField$Value");
unitTest.put("debezium.transforms.hoist.field", "line");
unitTest.put("debezium.transforms.hoist.predicate", "topicNameMatch");
unitTest.put("debezium.predicates", "topicNameMatch");
unitTest.put("debezium.predicates.topicNameMatch.type", "org.apache.kafka.connect.transforms.predicates.TopicNameMatches");
unitTest.put("debezium.predicates.topicNameMatch.pattern", ".*");
// DBZ-2622 For testing properties passed via smallrye/microprofile environment variables
unitTest.put("DEBEZIUM_SOURCE_TABLE_INCLUDE_LIST", "public.table_name");
unitTest.put("debezium_source_offset_flush_interval_ms_Test", "0");
unitTest.put("debezium.source.snapshot.select.statement.overrides.public.table_name", "SELECT * FROM table_name WHERE 1>2");
unitTest.put("debezium.source.database.allowPublicKeyRetrieval", "true");
if (isItTest()) {
config = integrationTest;
}
else {
config = unitTest;
}
}
public static boolean isItTest() {
return "IT".equals(System.getProperty("test.type"));
}
@Override
public Map<String, String> getProperties() {
return config;
}
@Override
public String getValue(String propertyName) {
return config.get(propertyName);
}
@Override
public String getName() {
return "test";
}
@Override
public Set<String> getPropertyNames() {
return config.keySet();
}
public static int waitForSeconds() {
return 60;
}
}

View File

@ -1,57 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.inject.Named;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.util.Testing;
@Dependent
@Named("test")
public class TestConsumer implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
final List<Object> values = Collections.synchronizedList(new ArrayList<>());
@PostConstruct
void init() {
Testing.print("Test consumer constructed");
}
@PreDestroy
void close() {
Testing.print("Test consumer destroyed");
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
records.forEach(record -> {
Testing.print(record);
values.add(record.value());
try {
committer.markProcessed(record);
}
catch (InterruptedException e) {
throw new DebeziumException(e);
}
});
}
public List<Object> getValues() {
return values;
}
}

View File

@ -1,39 +0,0 @@
<configuration>
<appender name="CONSOLE"
class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n</pattern>
</encoder>
</appender>
<root level="warn">
<appender-ref ref="CONSOLE" />
</root>
<!-- Set up the default logging to be INFO level, then override specific
units -->
<logger name="io.debezium" level="info" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.embedded.EmbeddedEngine$EmbeddedConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.converters.CloudEventsConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="org.apache.kafka.connect.json.JsonConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.confluent"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
</configuration>

View File

@ -1,165 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-dist</artifactId>
<name>Debezium Server Distribution</name>
<packaging>jar</packaging>
<properties>
<assembly.descriptor>server-distribution</assembly.descriptor>
<quarkus.package.type>legacy-jar</quarkus.package.type>
</properties>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>assembly</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-mysql</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-mongodb</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-sqlserver</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-oracle</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-scripting</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-scripting-languages</artifactId>
<type>pom</type>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-kinesis</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-http</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-pubsub</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-pulsar</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-eventhubs</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-redis</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-kafka</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-pravega</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-nats-streaming</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-nats-jetstream</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-infinispan</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-rocketmq</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-logging-json</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${version.assembly.plugin}</version>
<executions>
<execution>
<id>default</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<appendAssemblyId>false</appendAssemblyId>
<attach>true</attach> <!-- we want attach & deploy these to Maven -->
<descriptors>
<descriptor>src/main/resources/assemblies/${assembly.descriptor}.xml</descriptor>
</descriptors>
<tarLongFileMode>posix</tarLongFileMode>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -1,73 +0,0 @@
<?xml version="1.0"?>
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
<id>distribution</id>
<formats>
<format>tar.gz</format>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>${project.parent.artifactId}/lib</outputDirectory>
<unpack>false</unpack>
<scope>runtime</scope>
<useProjectArtifact>false</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<excludes>
<exclude>org.apache.kafka:kafka-tools:*</exclude>
<exclude>javax.ws.rs:javax.ws.rs-api:*</exclude>
<exclude>org.apache.kafka:connect-file:*</exclude>
<exclude>org.glassfish.jersey.*:*:*</exclude>
<exclude>org.eclipse.jetty:*:*</exclude>
<exclude>org.apache.maven:*:*</exclude>
<exclude>log4j:log4j:*</exclude>
<exclude>ch.qos.reload4j:reload4j</exclude>
<exclude>io.debezium:debezium-scripting</exclude>
<exclude>io.debezium:debezium-scripting-opt-depts</exclude>
</excludes>
</dependencySet>
<dependencySet>
<outputDirectory>${project.parent.artifactId}/lib_opt</outputDirectory>
<unpack>false</unpack>
<scope>runtime</scope>
<useProjectArtifact>false</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<includes>
<include>io.debezium:debezium-scripting:*</include>
<include>io.debezium:debezium-scripting-languages:*</include>
</includes>
</dependencySet>
</dependencySets>
<fileSets>
<fileSet>
<!-- Get the files from the top-level directory -->
<directory>${project.basedir}/../..</directory>
<outputDirectory>${project.parent.artifactId}</outputDirectory>
<includes>
<include>README*</include>
<include>CHANGELOG*</include>
<include>CONTRIBUTE*</include>
<include>COPYRIGHT*</include>
<include>LICENSE*</include>
</includes>
<useDefaultExcludes>true</useDefaultExcludes>
</fileSet>
<fileSet>
<!-- >directory>../${project.parent.artifactId}-core/target</directory-->
<directory>${project.build.directory}</directory>
<outputDirectory>${project.parent.artifactId}</outputDirectory>
<includes>
<include>*-runner.jar</include>
</includes>
</fileSet>
<fileSet>
<directory>src/main/resources/distro</directory>
<outputDirectory>${project.parent.artifactId}</outputDirectory>
<includes>
<include>**/*</include>
</includes>
</fileSet>
</fileSets>
</assembly>

View File

@ -1,13 +0,0 @@
debezium.sink.type=kinesis
debezium.sink.kinesis.region=eu-central-1
debezium.source.connector.class=io.debezium.connector.postgresql.PostgresConnector
debezium.source.offset.storage.file.filename=data/offsets.dat
debezium.source.offset.flush.interval.ms=0
debezium.source.database.hostname=localhost
debezium.source.database.port=5432
debezium.source.database.user=postgres
debezium.source.database.password=postgres
debezium.source.database.dbname=postgres
debezium.source.topic.prefix=tutorial
debezium.source.schema.include.list=inventory
quarkus.log.console.json=false

View File

@ -1,9 +0,0 @@
SET PATH_SEP=;
SET JAVA_BINARY=%JAVA_HOME%\bin\java
for %%i in (debezium-server-*runner.jar) do set RUNNER=%%~i
echo %RUNNER%
SET LIB_PATH=lib\*
IF %ENABLE_DEBEZIUM_SCRIPTING%=="true" LIB_PATH=%LIB_PATH%%PATH_SEP%lib_opt\*
call "%JAVA_BINARY%" %DEBEZIUM_OPTS% %JAVA_OPTS% -cp %RUNNER%%PATH_SEP%conf%PATH_SEP%%LIB_PATH% io.debezium.server.Main

View File

@ -1,28 +0,0 @@
#!/bin/bash
#
# Copyright Debezium Authors.
#
# Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
#
if [ -z "$JAVA_HOME" ]; then
JAVA_BINARY="java"
else
JAVA_BINARY="$JAVA_HOME/bin/java"
fi
if [ "$OSTYPE" = "msys" ] || [ "$OSTYPE" = "cygwin" ]; then
PATH_SEP=";"
else
PATH_SEP=":"
fi
RUNNER=$(ls debezium-server-*runner.jar)
ENABLE_DEBEZIUM_SCRIPTING=${ENABLE_DEBEZIUM_SCRIPTING:-false}
LIB_PATH="lib/*"
if [[ "${ENABLE_DEBEZIUM_SCRIPTING}" == "true" ]]; then
LIB_PATH=$LIB_PATH$PATH_SEP"lib_opt/*"
fi
exec "$JAVA_BINARY" $DEBEZIUM_OPTS $JAVA_OPTS -cp "$RUNNER"$PATH_SEP"conf"$PATH_SEP$LIB_PATH io.debezium.server.Main

View File

@ -1,132 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-eventhubs</artifactId>
<name>Debezium Server Azure Event Hubs Adapter</name>
<packaging>jar</packaging>
<properties>
<skipITs>true</skipITs>
</properties>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<!-- Target systems -->
<dependency>
<groupId>com.azure</groupId>
<artifactId>azure-messaging-eventhubs</artifactId>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope></dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -1,206 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.eventhubs;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import javax.inject.Named;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.azure.core.amqp.exception.AmqpException;
import com.azure.messaging.eventhubs.EventData;
import com.azure.messaging.eventhubs.EventDataBatch;
import com.azure.messaging.eventhubs.EventHubClientBuilder;
import com.azure.messaging.eventhubs.EventHubProducerClient;
import com.azure.messaging.eventhubs.models.CreateBatchOptions;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.server.CustomConsumerBuilder;
/**
* This sink adapter delivers change event messages to Azure Event Hubs
*
* @author Abhishek Gupta
*
*/
@Named("eventhubs")
@Dependent
public class EventHubsChangeConsumer extends BaseChangeConsumer
implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.eventhubs.";
private static final String PROP_CONNECTION_STRING_NAME = PROP_PREFIX + "connectionstring";
private static final String PROP_EVENTHUB_NAME = PROP_PREFIX + "hubname";
private static final String PROP_PARTITION_ID = PROP_PREFIX + "partitionid";
private static final String PROP_PARTITION_KEY = PROP_PREFIX + "partitionkey";
// maximum size for the batch of events (bytes)
private static final String PROP_MAX_BATCH_SIZE = PROP_PREFIX + "maxbatchsize";
private String connectionString;
private String eventHubName;
private String partitionID;
private String partitionKey;
private Integer maxBatchSize;
// connection string format -
// Endpoint=sb://<NAMESPACE>/;SharedAccessKeyName=<KEY_NAME>;SharedAccessKey=<ACCESS_KEY>;EntityPath=<HUB_NAME>
private static final String CONNECTION_STRING_FORMAT = "%s;EntityPath=%s";
private EventHubProducerClient producer = null;
@Inject
@CustomConsumerBuilder
Instance<EventHubProducerClient> customProducer;
@PostConstruct
void connect() {
if (customProducer.isResolvable()) {
producer = customProducer.get();
LOGGER.info("Obtained custom configured Event Hubs client for namespace '{}'",
customProducer.get().getFullyQualifiedNamespace());
return;
}
final Config config = ConfigProvider.getConfig();
connectionString = config.getValue(PROP_CONNECTION_STRING_NAME, String.class);
eventHubName = config.getValue(PROP_EVENTHUB_NAME, String.class);
// optional config
partitionID = config.getOptionalValue(PROP_PARTITION_ID, String.class).orElse("");
partitionKey = config.getOptionalValue(PROP_PARTITION_KEY, String.class).orElse("");
maxBatchSize = config.getOptionalValue(PROP_MAX_BATCH_SIZE, Integer.class).orElse(0);
String finalConnectionString = String.format(CONNECTION_STRING_FORMAT, connectionString, eventHubName);
try {
producer = new EventHubClientBuilder().connectionString(finalConnectionString).buildProducerClient();
}
catch (Exception e) {
throw new DebeziumException(e);
}
LOGGER.info("Using default Event Hubs client for namespace '{}'", producer.getFullyQualifiedNamespace());
}
@PreDestroy
void close() {
try {
producer.close();
LOGGER.info("Closed Event Hubs producer client");
}
catch (Exception e) {
LOGGER.warn("Exception while closing Event Hubs producer: {}", e);
}
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records,
RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
LOGGER.trace("Event Hubs sink adapter processing change events");
CreateBatchOptions op = new CreateBatchOptions().setPartitionId(partitionID);
if (partitionKey != "") {
op.setPartitionKey(partitionKey);
}
if (maxBatchSize.intValue() != 0) {
op.setMaximumSizeInBytes(maxBatchSize);
}
for (int recordIndex = 0; recordIndex < records.size();) {
int start = recordIndex;
LOGGER.trace("Emitting events starting from index {}", start);
EventDataBatch batch = producer.createBatch(op);
// this loop adds as many records to the batch as possible
for (; recordIndex < records.size(); recordIndex++) {
ChangeEvent<Object, Object> record = records.get(recordIndex);
LOGGER.trace("Received record '{}'", record.value());
if (null == record.value()) {
continue;
}
EventData eventData = null;
if (record.value() instanceof String) {
eventData = new EventData((String) record.value());
}
else if (record.value() instanceof byte[]) {
eventData = new EventData(getBytes(record.value()));
}
try {
if (!batch.tryAdd(eventData)) {
if (batch.getCount() == 0) {
// If we fail to add at least the very first event to the batch that is because
// the event's size exceeds the maxBatchSize in which case we cannot safely
// recover and dispatch the event, only option is to throw an exception.
throw new DebeziumException("Event data is too large to fit into batch");
}
// reached the maximum allowed size for the batch
LOGGER.trace("Maximum batch reached, dispatching {} events.", batch.getCount());
break;
}
}
catch (IllegalArgumentException e) {
// thrown by tryAdd if event data is null
throw new DebeziumException(e);
}
catch (AmqpException e) {
// tryAdd throws AmqpException if "eventData is larger than the maximum size of
// the EventDataBatch."
throw new DebeziumException("Event data was larger than the maximum size of the batch", e);
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
final int batchEventSize = batch.getCount();
if (batchEventSize > 0) {
try {
LOGGER.trace("Sending batch of {} events to Event Hubs", batchEventSize);
producer.send(batch);
LOGGER.trace("Sent record batch to Event Hubs");
}
catch (Exception e) {
throw new DebeziumException(e);
}
// this loop commits each record submitted in the event hubs batch
LOGGER.trace("Marking records at index {} to {} as processed", start, recordIndex);
for (int j = start; j < recordIndex; ++j) {
ChangeEvent<Object, Object> record = records.get(j);
try {
committer.markProcessed(record);
LOGGER.trace("Record marked processed");
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
}
}
committer.markBatchFinished();
LOGGER.trace("Batch marked finished");
}
}

View File

@ -1,121 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.eventhubs;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import javax.enterprise.event.Observes;
import javax.inject.Inject;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.azure.core.util.IterableStream;
import com.azure.messaging.eventhubs.EventHubClientBuilder;
import com.azure.messaging.eventhubs.EventHubConsumerClient;
import com.azure.messaging.eventhubs.EventHubProducerClient;
import com.azure.messaging.eventhubs.models.EventPosition;
import com.azure.messaging.eventhubs.models.PartitionEvent;
import io.debezium.server.DebeziumServer;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
/**
* Integration test that verifies basic reading from PostgreSQL database and
* writing to Azure Event Hubs.
*
* @author Abhishek Gupta
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
public class EventHubsIT {
private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsIT.class);
private static final int MESSAGE_COUNT = 4;
private static final String CONSUMER_GROUP = "$Default";
protected static EventHubProducerClient producer = null;
protected static EventHubConsumerClient consumer = null;
{
Testing.Files.delete(EventHubsTestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(EventHubsTestConfigSource.OFFSET_STORE_PATH);
}
@AfterAll
static void stop() {
if (producer != null) {
producer.close();
}
if (consumer != null) {
consumer.close();
}
}
@Inject
DebeziumServer server;
void setupDependencies(@Observes ConnectorStartedEvent event) {
String finalConnectionString = String.format("%s;EntityPath=%s",
EventHubsTestConfigSource.getEventHubsConnectionString(), EventHubsTestConfigSource.getEventHubsName());
producer = new EventHubClientBuilder().connectionString(finalConnectionString).buildProducerClient();
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw (Exception) event.getError().get();
}
}
@Test
public void testEventHubs() throws Exception {
Testing.Print.enable();
String finalConnectionString = String.format("%s;EntityPath=%s",
EventHubsTestConfigSource.getEventHubsConnectionString(), EventHubsTestConfigSource.getEventHubsName());
consumer = new EventHubClientBuilder().connectionString(finalConnectionString).consumerGroup(CONSUMER_GROUP)
.buildConsumerClient();
final List<PartitionEvent> expected = new ArrayList<>();
Awaitility.await().atMost(Duration.ofSeconds(EventHubsTestConfigSource.waitForSeconds())).until(() -> {
IterableStream<PartitionEvent> events = consumer.receiveFromPartition("0", MESSAGE_COUNT,
EventPosition.latest());
events.forEach(event -> expected.add(event));
return expected.size() >= MESSAGE_COUNT;
});
// check whether the event data contains expected id i.e. 1001, 1002, 1003 and
// 1004
String eventBody = null;
String expectedID = null;
final String idPart = "\"id\":100";
// since all messages go to same partition, ordering will be maintained
// (assuming no errors)
for (int i = 0; i < MESSAGE_COUNT; i++) {
eventBody = expected.get(i).getData().getBodyAsString();
expectedID = idPart + String.valueOf(i + 1);
assertTrue(eventBody.contains(expectedID), expectedID + " not found in payload");
}
}
}

View File

@ -1,57 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.eventhubs;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class EventHubsTestConfigSource extends TestConfigSource {
static final String EVENTHUBS_CONNECTION_STRING_SYSTEM_PROPERTY_NAME = "eventhubs.connection.string";
static final String EVENTHUBS_NAME_SYSTEM_PROPERTY_NAME = "eventhubs.hub.name";
static final String CONNECTION_STRING_FORMAT = "%s;EntityPath=%s";
public EventHubsTestConfigSource() {
Map<String, String> eventHubsTest = new HashMap<>();
// event hubs sink config
eventHubsTest.put("debezium.sink.type", "eventhubs");
eventHubsTest.put("debezium.sink.eventhubs.connectionstring", getEventHubsConnectionString());
eventHubsTest.put("debezium.sink.eventhubs.hubname", getEventHubsName());
// postgresql source config
eventHubsTest.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
eventHubsTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
OFFSET_STORE_PATH.toAbsolutePath().toString());
eventHubsTest.put("debezium.source.offset.flush.interval.ms", "0");
eventHubsTest.put("debezium.source.topic.prefix", "testc");
eventHubsTest.put("debezium.source.schema.include.list", "inventory");
eventHubsTest.put("debezium.source.table.include.list", "inventory.customers");
config = eventHubsTest;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
public static String getEventHubsConnectionString() {
return System.getProperty(EVENTHUBS_CONNECTION_STRING_SYSTEM_PROPERTY_NAME);
}
public static String getEventHubsName() {
return System.getProperty(EVENTHUBS_NAME_SYSTEM_PROPERTY_NAME);
}
}

View File

@ -1,39 +0,0 @@
<configuration>
<appender name="CONSOLE"
class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n</pattern>
</encoder>
</appender>
<root level="warn">
<appender-ref ref="CONSOLE" />
</root>
<!-- Set up the default logging to be INFO level, then override specific
units -->
<logger name="io.debezium" level="info" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.embedded.EmbeddedEngine$EmbeddedConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.converters.CloudEventsConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="org.apache.kafka.connect.json.JsonConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.confluent"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
</configuration>

View File

@ -1,173 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-http</artifactId>
<name>Debezium Server HTTP Webhook Sink Adapter</name>
<packaging>jar</packaging>
<properties>
<wiremock.version>2.32.0</wiremock.version>
</properties>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<!-- Target systems -->
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.tomakehurst</groupId>
<artifactId>wiremock-jre8</artifactId>
<version>${wiremock.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope></dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<configuration>
<show>private</show>
<nohelp>true</nohelp>
</configuration>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>quick</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>quick</name>
</property>
</activation>
<properties>
<skipITs>true</skipITs>
<docker.skip>true</docker.skip>
</properties>
</profile>
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Do not perform any Docker-related functionality
To use, specify "-DskipITs" on the Maven command line.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<profile>
<id>skip-integration-tests</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>skipITs</name>
</property>
</activation>
<properties>
<docker.skip>true</docker.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,157 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.http;
import static java.net.HttpURLConnection.HTTP_ACCEPTED;
import static java.net.HttpURLConnection.HTTP_NO_CONTENT;
import static java.net.HttpURLConnection.HTTP_OK;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.time.Duration;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.enterprise.context.Dependent;
import javax.inject.Named;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.util.Clock;
import io.debezium.util.Metronome;
/**
* Implementation of the consumer that delivers the messages to an HTTP Webhook destination.
*
* @author Chris Baumbauer
*/
@Named("http")
@Dependent
public class HttpChangeConsumer extends BaseChangeConsumer implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(HttpChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.http.";
private static final String PROP_WEBHOOK_URL = "url";
private static final String PROP_CLIENT_TIMEOUT = "timeout.ms";
private static final String PROP_RETRIES = "retries";
private static final String PROP_RETRY_INTERVAL = "retry.interval.ms";
private static final Long HTTP_TIMEOUT = Integer.toUnsignedLong(60000); // Default to 60s
private static final int DEFAULT_RETRIES = 5;
private static final Long RETRY_INTERVAL = Integer.toUnsignedLong(1_000); // Default to 1s
private static Duration timeoutDuration;
private static int retries;
private static Duration retryInterval;
private HttpClient client;
private HttpRequest.Builder requestBuilder;
// If this is running as a Knative object, then expect the sink URL to be located in `K_SINK`
// as per https://knative.dev/development/eventing/custom-event-source/sinkbinding/
@PostConstruct
void connect() throws URISyntaxException {
String sinkUrl;
String contentType;
client = HttpClient.newHttpClient();
final Config config = ConfigProvider.getConfig();
String sink = System.getenv("K_SINK");
timeoutDuration = Duration.ofMillis(HTTP_TIMEOUT);
retries = DEFAULT_RETRIES;
retryInterval = Duration.ofMillis(RETRY_INTERVAL);
if (sink != null) {
sinkUrl = sink;
}
else {
sinkUrl = config.getValue(PROP_PREFIX + PROP_WEBHOOK_URL, String.class);
}
config.getOptionalValue(PROP_PREFIX + PROP_CLIENT_TIMEOUT, String.class)
.ifPresent(t -> timeoutDuration = Duration.ofMillis(Long.parseLong(t)));
config.getOptionalValue(PROP_PREFIX + PROP_RETRIES, String.class)
.ifPresent(n -> retries = Integer.parseInt(n));
config.getOptionalValue(PROP_PREFIX + PROP_RETRY_INTERVAL, String.class)
.ifPresent(t -> retryInterval = Duration.ofMillis(Long.parseLong(t)));
switch (config.getValue("debezium.format.value", String.class)) {
case "avro":
contentType = "avro/bytes";
break;
case "cloudevents":
contentType = "application/cloudevents+json";
break;
default:
// Note: will default to JSON if it cannot be determined, but should not reach this point
contentType = "application/json";
}
LOGGER.info("Using http content-type type {}", contentType);
LOGGER.info("Using sink URL: {}", sinkUrl);
requestBuilder = HttpRequest.newBuilder(new URI(sinkUrl)).timeout(timeoutDuration);
requestBuilder.setHeader("content-type", contentType);
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, DebeziumEngine.RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
for (ChangeEvent<Object, Object> record : records) {
LOGGER.trace("Received event '{}'", record);
if (record.value() != null) {
int attempts = 0;
while (!recordSent(record)) {
attempts++;
if (attempts >= retries) {
throw new DebeziumException("Exceeded maximum number of attempts to publish event " + record);
}
Metronome.sleeper(retryInterval, Clock.SYSTEM).pause();
}
committer.markProcessed(record);
}
}
committer.markBatchFinished();
}
private boolean recordSent(ChangeEvent<Object, Object> record) throws InterruptedException {
boolean sent = false;
String value = (String) record.value();
HttpResponse r;
HttpRequest request = requestBuilder.POST(HttpRequest.BodyPublishers.ofString(value)).build();
try {
r = client.send(request, HttpResponse.BodyHandlers.ofString());
}
catch (IOException ioe) {
throw new InterruptedException(ioe.toString());
}
if ((r.statusCode() == HTTP_OK) || (r.statusCode() == HTTP_NO_CONTENT) || (r.statusCode() == HTTP_ACCEPTED)) {
sent = true;
}
else {
LOGGER.info("Failed to publish event: " + r.body());
}
return sent;
}
}

View File

@ -1,167 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.http;
import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
import static com.github.tomakehurst.wiremock.client.WireMock.configureFor;
import static com.github.tomakehurst.wiremock.client.WireMock.getAllServeEvents;
import static com.github.tomakehurst.wiremock.client.WireMock.post;
import static com.github.tomakehurst.wiremock.client.WireMock.removeServeEvent;
import static com.github.tomakehurst.wiremock.client.WireMock.stubFor;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import javax.enterprise.event.Observes;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.tomakehurst.wiremock.stubbing.ServeEvent;
import com.github.tomakehurst.wiremock.verification.LoggedRequest;
import com.google.inject.Inject;
import io.debezium.DebeziumException;
import io.debezium.doc.FixFor;
import io.debezium.server.DebeziumServer;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
/**
* Integration test that verifies basic reading from PostgreSQL database and writing to an HTTP Server
*
* @author Chris Baumbauer
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@QuarkusTestResource(HttpTestResourceLifecycleManager.class)
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
public class HttpIT {
@Inject
DebeziumServer server;
private static final Logger LOGGER = LoggerFactory.getLogger(HttpIT.class);
private static final int MESSAGE_COUNT = 4;
private static final int EXPECTED_RETRIES = 5;
private boolean expectServerFail = false;
private String expectedErrorMessage;
{
Testing.Files.delete(HttpTestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(HttpTestConfigSource.OFFSET_STORE_PATH);
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
Exception e = (Exception) event.getError().get();
if (e instanceof DebeziumException && expectServerFail && e.getMessage().equals(expectedErrorMessage)) {
LOGGER.info("Expected server failure: {}", e);
return;
}
throw e;
}
}
@BeforeEach
public void resetHttpMock() {
HttpTestResourceLifecycleManager.reset();
}
@Test
@Order(1) // Start steaming, but we fail to send anything, just verify that retries were made.
@FixFor("DBZ-5307")
public void testRetryUponError() {
Testing.Print.enable();
// Signal we expect server will fail in this test.
expectServerFail = true;
expectedErrorMessage = "Exceeded maximum number of attempts to publish event EmbeddedEngineChangeEvent";
List<ServeEvent> events = new ArrayList<>();
configureFor(HttpTestResourceLifecycleManager.getHost(), HttpTestResourceLifecycleManager.getPort());
stubFor(post("/").willReturn(aResponse().withStatus(500)));
Awaitility.await().atMost(Duration.ofSeconds(60)).until(() -> {
events.addAll(getAllServeEvents());
// The first event is sent #retries times, then exception is thrown and no other events are sent.
return events.size() == EXPECTED_RETRIES;
});
assertEvents(events, EXPECTED_RETRIES);
}
@Test
@Order(2) // Here we actually stream the events from Postgres to HTTP server.
public void testHttpServer() {
Testing.Print.enable();
expectServerFail = false;
List<ServeEvent> events = new ArrayList<>();
configureFor(HttpTestResourceLifecycleManager.getHost(), HttpTestResourceLifecycleManager.getPort());
stubFor(post("/").willReturn(aResponse().withStatus(200)));
Awaitility.await().atMost(Duration.ofSeconds(60)).until(() -> {
List<ServeEvent> currentEvents = getAllServeEvents();
events.addAll(currentEvents);
// Remove already added events, if e.g. 3 out of 4 events are added, in next attempt all 4 events
// are added again and test fails.
for (ServeEvent e : currentEvents) {
removeServeEvent(e.getId());
}
return events.size() == MESSAGE_COUNT;
});
assertEvents(events, MESSAGE_COUNT);
}
private void assertEvents(List<ServeEvent> events, int expectedSize) {
Assertions.assertEquals(expectedSize, events.size());
for (ServeEvent e : events) {
LoggedRequest request = e.getRequest();
// Assert the content type is set correctly to reflect a cloudevent
Assertions.assertEquals(request.getHeader("content-type"), "application/cloudevents+json");
// deserialize the cloudevent into a HashMap<String, Object> and assert the cloudevent metadata is set properly
try {
ObjectMapper om = new ObjectMapper();
HashMap<String, Object> hm;
TypeReference<HashMap<String, Object>> tref = new TypeReference<>() {
};
hm = om.readValue(request.getBody(), tref);
Assertions.assertEquals("/debezium/postgresql/testc", (String) hm.get("source"));
Assertions.assertEquals("io.debezium.postgresql.datachangeevent", (String) hm.get("type"));
Assertions.assertEquals("1.0", (String) hm.get("specversion"));
Assertions.assertEquals("postgres", (String) hm.get("iodebeziumdb"));
Assertions.assertEquals("inventory", (String) hm.get("iodebeziumschema"));
Assertions.assertEquals("customers", (String) hm.get("iodebeziumtable"));
String eventID = (String) hm.get("id");
Assertions.assertTrue(eventID.length() > 0);
}
catch (IOException ioe) {
Assertions.fail(ioe);
}
}
}
}

View File

@ -1,39 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.http;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class HttpTestConfigSource extends TestConfigSource {
public HttpTestConfigSource() {
Map<String, String> httpTest = new HashMap<>();
httpTest.put("debezium.sink.type", "http");
httpTest.put("debezium.format.value", "cloudevents"); // Need to explicitly pass in the cloudevents format
httpTest.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
httpTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH.toAbsolutePath().toString());
httpTest.put("debezium.source.offset.flush.interval.ms", "0");
httpTest.put("debezium.source.topic.prefix", "testc");
httpTest.put("debezium.source.schema.include.list", "inventory");
httpTest.put("debezium.source.table.include.list", "inventory.customers");
config = httpTest;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
}

View File

@ -1,87 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.http;
import static java.net.HttpURLConnection.HTTP_OK;
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.time.Duration;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.containers.GenericContainer;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
public class HttpTestResourceLifecycleManager implements QuarkusTestResourceLifecycleManager {
private static final Logger LOGGER = LoggerFactory.getLogger(HttpTestResourceLifecycleManager.class);
public static final String WIREMOCK_IMAGE = "docker.io/wiremock/wiremock:latest";
public static final int PORT = 8080; // Primary port used by wiremock
private static final AtomicBoolean running = new AtomicBoolean(false);
private static final GenericContainer<?> container = new GenericContainer<>(WIREMOCK_IMAGE)
.withExposedPorts(PORT);
private static synchronized void init() {
if (!running.get()) {
container.start();
running.set(true);
}
}
@Override
public Map<String, String> start() {
init();
return Collections.singletonMap("debezium.sink.http.url", getURL());
}
@Override
public void stop() {
try {
container.stop();
}
catch (Exception e) {
// ignored
}
running.set(false);
}
private String getURL() {
return "http://" + container.getHost() + ":" + container.getMappedPort(PORT);
}
public static String getHost() {
return container.getHost();
}
public static int getPort() {
return container.getMappedPort(PORT);
}
public static void reset() {
try {
HttpClient client = HttpClient.newHttpClient();
String resetURL = "http://" + container.getHost() + ":" + container.getMappedPort(PORT) + "/__admin/reset";
HttpRequest.Builder requestBuilder = HttpRequest.newBuilder(new URI(resetURL)).timeout(Duration.ofMillis(60_000));
HttpRequest request = requestBuilder.POST(HttpRequest.BodyPublishers.ofString("")).build();
HttpResponse r = client.send(request, HttpResponse.BodyHandlers.ofString());
if (r.statusCode() != HTTP_OK) {
throw new IllegalStateException("Get wrong response while resetting WireMock: " + r.statusCode());
}
LOGGER.info("WireMock reset");
}
catch (Exception e) {
LOGGER.warn("Failed to reset WireMock", e);
}
}
}

View File

@ -1,39 +0,0 @@
<configuration>
<appender name="CONSOLE"
class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n</pattern>
</encoder>
</appender>
<root level="warn">
<appender-ref ref="CONSOLE" />
</root>
<!-- Set up the default logging to be INFO level, then override specific
units -->
<logger name="io.debezium" level="info" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.embedded.EmbeddedEngine$EmbeddedConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.converters.CloudEventsConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="org.apache.kafka.connect.json.JsonConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.confluent"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
</configuration>

View File

@ -1,137 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-infinispan</artifactId>
<name>Debezium Server Infinispan Sink Adapter</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<dependency>
<groupId>org.infinispan</groupId>
<artifactId>infinispan-client-hotrod</artifactId>
</dependency>
<!-- Test dependencies -->
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.infinispan</groupId>
<artifactId>infinispan-core</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
<tag.infinispan>${version.infinispan}</tag.infinispan>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<configuration>
<show>private</show>
<nohelp>true</nohelp>
</configuration>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>quick</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>quick</name>
</property>
</activation>
<properties>
<skipITs>true</skipITs>
<docker.skip>true</docker.skip>
</properties>
</profile>
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Do not perform any Docker-related functionality
To use, specify "-DskipITs" on the Maven command line.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<profile>
<id>skip-integration-tests</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>skipITs</name>
</property>
</activation>
<properties>
<docker.skip>true</docker.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,128 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.infinispan;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import javax.inject.Named;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.client.hotrod.configuration.ConfigurationBuilder;
import org.infinispan.client.hotrod.impl.ConfigurationProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.server.CustomConsumerBuilder;
/**
* An implementation of the {@link DebeziumEngine.ChangeConsumer} interface that publishes change event messages to predefined Infinispan cache.
*
* @author vjuranek
*/
@Named("infinispan")
@Dependent
public class InfinispanSinkConsumer extends BaseChangeConsumer implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(InfinispanSinkConsumer.class);
private static final String CONF_PREFIX = "debezium.sink.infinispan.";
private static final String SERVER_HOST = CONF_PREFIX + "server.host";
private static final String SERVER_PORT = CONF_PREFIX + "server.port";
private static final String CACHE_NAME = CONF_PREFIX + "cache";
private static final String USER_NAME = CONF_PREFIX + "user";
private static final String PASSWORD = CONF_PREFIX + "password";
private RemoteCacheManager remoteCacheManager;
private RemoteCache cache;
@Inject
@CustomConsumerBuilder
Instance<RemoteCache> customCache;
@PostConstruct
void connect() {
if (customCache.isResolvable()) {
cache = customCache.get();
LOGGER.info("Obtained custom cache with configuration '{}'", cache.getRemoteCacheContainer().getConfiguration());
return;
}
final Config config = ConfigProvider.getConfig();
final String serverHost = config.getValue(SERVER_HOST, String.class);
final String cacheName = config.getValue(CACHE_NAME, String.class);
final Integer serverPort = config.getOptionalValue(SERVER_PORT, Integer.class).orElse(ConfigurationProperties.DEFAULT_HOTROD_PORT);
final Optional<String> user = config.getOptionalValue(USER_NAME, String.class);
final Optional<String> password = config.getOptionalValue(PASSWORD, String.class);
ConfigurationBuilder builder = new ConfigurationBuilder();
String uri;
if (user.isPresent() && password.isPresent()) {
uri = String.format("hotrod://%s:%s@%s:%d", user.get(), password.get(), serverHost, serverPort);
}
else {
uri = String.format("hotrod://%s:%d", serverHost, serverPort);
}
LOGGER.info("Connecting to the Infinispan server using URI '{}'", uri);
builder.uri(uri);
remoteCacheManager = new RemoteCacheManager(builder.build());
cache = remoteCacheManager.getCache(cacheName);
LOGGER.info("Connected to the Infinispan server {}", remoteCacheManager.getServers()[0]);
}
@PreDestroy
void close() {
try {
if (remoteCacheManager != null) {
remoteCacheManager.close();
LOGGER.info("Connection to Infinispan server closed.");
}
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, DebeziumEngine.RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
Map<Object, Object> entries = new HashMap<>(records.size());
for (ChangeEvent<Object, Object> record : records) {
if (record.value() != null) {
LOGGER.trace("Received event {} = '{}'", getString(record.key()), getString(record.value()));
entries.put(record.key(), record.value());
}
}
try {
cache.putAll(entries);
}
catch (Exception e) {
throw new DebeziumException(e);
}
for (ChangeEvent<Object, Object> record : records) {
committer.markProcessed(record);
}
committer.markBatchFinished();
}
}

View File

@ -1,73 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.infinispan;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration;
import org.awaitility.Awaitility;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.client.hotrod.configuration.ConfigurationBuilder;
import org.infinispan.manager.DefaultCacheManager;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import io.debezium.server.DebeziumServer;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
/**
* @author vjuranek
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@QuarkusTestResource(InfinispanTestResourceLifecycleManager.class)
public class InfinispanSinkConsumerIT {
private static final Logger LOGGER = LoggerFactory.getLogger(InfinispanSinkConsumerIT.class);
private static final int MESSAGE_COUNT = 4;
static {
Testing.Files.delete(InfinispanTestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(InfinispanTestConfigSource.OFFSET_STORE_PATH);
}
@Inject
DebeziumServer server;
private DefaultCacheManager cacheManager;
private Cache<String, String> cache;
@Test
public void testStreaming() throws Exception {
ConfigurationBuilder builder = new ConfigurationBuilder();
String uri = String.format("hotrod://%s:%s@%s:%d",
InfinispanTestConfigSource.USER_NAME,
InfinispanTestConfigSource.PASSWORD,
InfinispanTestResourceLifecycleManager.getHost(),
InfinispanTestResourceLifecycleManager.getPort());
LOGGER.info("Connected to Infinispan server using URI '{}'", uri);
builder.uri(uri);
RemoteCacheManager remoteCacheManager = new RemoteCacheManager(builder.build());
RemoteCache<Object, Object> remoteCache = remoteCacheManager.getCache(InfinispanTestConfigSource.CACHE_NAME);
assertThat(remoteCache.size()).isEqualTo(0);
Awaitility.await().atMost(Duration.ofSeconds(60)).until(() -> {
return remoteCache.size() == MESSAGE_COUNT;
});
assertThat(remoteCache.size()).isEqualTo(MESSAGE_COUNT);
}
}

View File

@ -1,45 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.infinispan;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class InfinispanTestConfigSource extends TestConfigSource {
public static final String CACHE_NAME = "debezium_test";
public static final String USER_NAME = "admin";
public static final String PASSWORD = "secret";
public static final String CONFIG_FILE = "infinispan-local.xml";
public InfinispanTestConfigSource() {
Map<String, String> infinispanTest = new HashMap<>();
infinispanTest.put("debezium.sink.type", "infinispan");
infinispanTest.put("debezium.sink.infinispan.cache", CACHE_NAME);
infinispanTest.put("debezium.sink.infinispan.user", USER_NAME);
infinispanTest.put("debezium.sink.infinispan.password", PASSWORD);
infinispanTest.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
infinispanTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH.toAbsolutePath().toString());
infinispanTest.put("debezium.source.offset.flush.interval.ms", "0");
infinispanTest.put("debezium.source.topic.prefix", "testc");
infinispanTest.put("debezium.source.schema.include.list", "inventory");
infinispanTest.put("debezium.source.table.include.list", "inventory.customers");
config = infinispanTest;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
}

View File

@ -1,70 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.infinispan;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.infinispan.client.hotrod.impl.ConfigurationProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.containers.BindMode;
import org.testcontainers.containers.GenericContainer;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
public class InfinispanTestResourceLifecycleManager implements QuarkusTestResourceLifecycleManager {
private static final Logger LOGGER = LoggerFactory.getLogger(InfinispanTestResourceLifecycleManager.class);
public static final String INFINISPAN_IMAGE = "quay.io/infinispan/server:" + System.getProperty("tag.infinispan", "latest");
public static final int PORT = ConfigurationProperties.DEFAULT_HOTROD_PORT;
public static final String CONFIG_PATH = "/etc/infinispan-local.xml";
private static final GenericContainer<?> container = new GenericContainer<>(INFINISPAN_IMAGE)
.withExposedPorts(PORT)
.withClasspathResourceMapping(InfinispanTestConfigSource.CONFIG_FILE, CONFIG_PATH, BindMode.READ_ONLY)
.withCommand("-c", CONFIG_PATH)
.withEnv("USER", InfinispanTestConfigSource.USER_NAME)
.withEnv("PASS", InfinispanTestConfigSource.PASSWORD);
private static final AtomicBoolean running = new AtomicBoolean(false);
private static synchronized void init() {
if (!running.get()) {
container.start();
running.set(true);
}
}
public static String getHost() {
return container.getHost();
}
public static int getPort() {
return container.getMappedPort(PORT);
}
@Override
public Map<String, String> start() {
init();
Map<String, String> params = new ConcurrentHashMap<>();
params.put("debezium.sink.infinispan.server.host", getHost());
params.put("debezium.sink.infinispan.server.port", String.valueOf(getPort()));
return params;
}
@Override
public void stop() {
try {
container.stop();
}
catch (Exception e) {
// ignored
}
running.set(false);
}
}

View File

@ -1,40 +0,0 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:14.0 https://infinispan.org/schemas/infinispan-config-14.0.xsd
urn:infinispan:server:14.0 https://infinispan.org/schemas/infinispan-server-14.0.xsd"
xmlns="urn:infinispan:config:14.0"
xmlns:server="urn:infinispan:server:14.0">
<cache-container name="default" statistics="true">
<security>
<authorization/>
</security>
<local-cache name="debezium_test"/>
</cache-container>
<server xmlns="urn:infinispan:server:14.0">
<interfaces>
<interface name="public">
<inet-address value="${infinispan.bind.address:127.0.0.1}"/>
</interface>
</interfaces>
<socket-bindings default-interface="public" port-offset="${infinispan.socket.binding.port-offset:0}">
<socket-binding name="default" port="${infinispan.bind.port:11222}"/>
<socket-binding name="memcached" port="11221"/>
</socket-bindings>
<security>
<security-realms>
<security-realm name="default">
<properties-realm groups-attribute="Roles">
<user-properties path="users.properties"/>
<group-properties path="groups.properties"/>
</properties-realm>
</security-realm>
</security-realms>
</security>
<endpoints socket-binding="default" security-realm="default" />
</server>
</infinispan>

View File

@ -1,128 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-kafka</artifactId>
<name>Debezium Server Kafka Sink Adapter</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
<resources>
<!-- Apply the properties set in the POM to the resource files -->
<resource>
<filtering>true</filtering>
<directory>src/main/resources</directory>
<includes>
<include>**/build.properties</include>
</includes>
</resource>
</resources>
</build>
<profiles>
<profile>
<id>quick</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>quick</name>
</property>
</activation>
<properties>
<skipITs>true</skipITs>
<docker.skip>true</docker.skip>
</properties>
</profile>
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Do not perform any Docker-related functionality
To use, specify "-DskipITs" on the Maven command line.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<profile>
<id>skip-integration-tests</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>skipITs</name>
</property>
</activation>
<properties>
<docker.skip>true</docker.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,120 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.kafka;
import java.time.Duration;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import javax.inject.Named;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.engine.Header;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.server.CustomConsumerBuilder;
/**
* An implementation of the {@link DebeziumEngine.ChangeConsumer} interface that publishes change event messages to Kafka.
*/
@Named("kafka")
@Dependent
public class KafkaChangeConsumer extends BaseChangeConsumer implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.kafka.";
private static final String PROP_PREFIX_PRODUCER = PROP_PREFIX + "producer.";
private KafkaProducer<Object, Object> producer;
@Inject
@CustomConsumerBuilder
Instance<KafkaProducer<Object, Object>> customKafkaProducer;
@PostConstruct
void start() {
if (customKafkaProducer.isResolvable()) {
producer = customKafkaProducer.get();
LOGGER.info("Obtained custom configured KafkaProducer '{}'", producer);
return;
}
final Config config = ConfigProvider.getConfig();
producer = new KafkaProducer<>(getConfigSubset(config, PROP_PREFIX_PRODUCER));
LOGGER.info("consumer started...");
}
@PreDestroy
void stop() {
LOGGER.info("consumer destroyed...");
if (producer != null) {
try {
producer.close(Duration.ofSeconds(5));
}
catch (Throwable t) {
LOGGER.warn("Could not close producer", t);
}
}
}
@Override
public void handleBatch(final List<ChangeEvent<Object, Object>> records,
final RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(records.size());
for (ChangeEvent<Object, Object> record : records) {
try {
LOGGER.trace("Received event '{}'", record);
Headers headers = convertHeaders(record);
producer.send(new ProducerRecord<>(record.destination(), null, null, record.key(), record.value(), headers), (metadata, exception) -> {
if (exception != null) {
LOGGER.error("Failed to send record to {}:", record.destination(), exception);
throw new DebeziumException(exception);
}
else {
LOGGER.trace("Sent message with offset: {}", metadata.offset());
latch.countDown();
}
});
committer.markProcessed(record);
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
latch.await();
committer.markBatchFinished();
}
private Headers convertHeaders(ChangeEvent<Object, Object> record) {
List<Header<Object>> headers = record.headers();
Headers kafkaHeaders = new RecordHeaders();
for (Header<Object> header : headers) {
kafkaHeaders.add(header.getKey(), getBytes(header.getValue()));
}
return kafkaHeaders;
}
}

View File

@ -1,43 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.kafka;
import java.util.Map;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.connect.header.ConnectHeaders;
import org.apache.kafka.connect.header.Headers;
import org.apache.kafka.connect.source.SourceRecord;
import org.apache.kafka.connect.transforms.Transformation;
public class AddHeaderTransform implements Transformation<SourceRecord> {
@Override
public SourceRecord apply(SourceRecord record) {
Headers headers = new ConnectHeaders();
headers.addString("headerKey", "headerValue");
record = record.newRecord(
record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value(), record.timestamp(), headers);
return record;
}
@Override
public ConfigDef config() {
return new ConfigDef();
}
@Override
public void close() {
}
@Override
public void configure(Map<String, ?> map) {
}
}

View File

@ -1,107 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.kafka;
import static org.assertj.core.api.Assertions.assertThat;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import javax.enterprise.event.Observes;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
/**
* Integration test for verifying that the Kafka sink adapter can stream change events from a PostgreSQL database
* to a configured Apache Kafka broker.
*
* @author Alfusainey Jallow
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@QuarkusTestResource(KafkaTestResourceLifecycleManager.class)
public class KafkaIT {
private static final String TOPIC_NAME = "testc.inventory.customers";
private static final int MESSAGE_COUNT = 4;
private static KafkaConsumer<String, String> consumer;
{
Testing.Files.delete(KafkaTestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(KafkaTestConfigSource.OFFSET_STORE_PATH);
}
void setupDependencies(@Observes final ConnectorStartedEvent event) {
Testing.Print.enable();
final Map<String, Object> configs = new ConcurrentHashMap<>();
configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaTestResourceLifecycleManager.getBootstrapServers());
configs.put(ConsumerConfig.GROUP_ID_CONFIG, "test-" + UUID.randomUUID());
consumer = new KafkaConsumer<>(configs, new StringDeserializer(), new StringDeserializer());
}
void connectorCompleted(@Observes final ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw (Exception) event.getError().get();
}
}
@AfterAll
static void stop() {
if (consumer != null) {
consumer.unsubscribe();
consumer.close();
}
}
@Test
public void testKafka() {
Awaitility.await().atMost(Duration.ofSeconds(KafkaTestConfigSource.waitForSeconds())).until(() -> {
return consumer != null;
});
consumer.subscribe(Arrays.asList(TOPIC_NAME));
final List<ConsumerRecord<String, String>> actual = new ArrayList<>();
Awaitility.await()
.atMost(Duration.ofSeconds(KafkaTestConfigSource.waitForSeconds()))
.until(() -> {
consumer.poll(Duration.ofSeconds(KafkaTestConfigSource.waitForSeconds()))
.iterator()
.forEachRemaining(actual::add);
return actual.size() >= MESSAGE_COUNT;
});
assertThat(actual.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
Headers headers = actual.get(0).headers();
assertThat(headers.headers("headerKey")).isNotEmpty();
assertThat(headers.headers("headerKey"))
.allMatch(h -> h.key().equals("headerKey") && Arrays.equals(h.value(), "\"headerValue\"".getBytes(StandardCharsets.UTF_8)));
}
}

View File

@ -1,48 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.kafka;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class KafkaTestConfigSource extends TestConfigSource {
public KafkaTestConfigSource() {
final Map<String, String> kafkaConfig = new HashMap<>();
kafkaConfig.put("debezium.sink.type", "kafka");
kafkaConfig.put("debezium.sink.kafka.producer.bootstrap.servers", KafkaTestResourceLifecycleManager.getBootstrapServers());
kafkaConfig.put("debezium.sink.kafka.producer.key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
kafkaConfig.put("debezium.sink.kafka.producer.value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
kafkaConfig.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
kafkaConfig.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH.toAbsolutePath().toString());
kafkaConfig.put("debezium.source.offset.flush.interval.ms", "0");
kafkaConfig.put("debezium.source.topic.prefix", "testc");
kafkaConfig.put("debezium.source.schema.include.list", "inventory");
kafkaConfig.put("debezium.source.table.include.list", "inventory.customers");
kafkaConfig.put("debezium.format.header.schemas.enable", "false");
// DBZ-5105
kafkaConfig.put("debezium.sink.kafka.producer.ssl.endpoint.identification.algorithm", "");
kafkaConfig.put("debezium.transforms", "addheader");
kafkaConfig.put("debezium.transforms.addheader.type", "io.debezium.server.kafka.AddHeaderTransform");
config = kafkaConfig;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
}

View File

@ -1,41 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.kafka;
import java.util.HashMap;
import java.util.Map;
import org.testcontainers.containers.KafkaContainer;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
/**
* Manages the lifecycle of a Kafka cluster test resource.
*
* @author Alfusainey Jallow
*/
public class KafkaTestResourceLifecycleManager implements QuarkusTestResourceLifecycleManager {
@SuppressWarnings("deprecation")
private static KafkaContainer kafkaContainer = new KafkaContainer();
@Override
public Map<String, String> start() {
kafkaContainer.start();
return new HashMap<>();
}
@Override
public void stop() {
kafkaContainer.stop();
}
public static String getBootstrapServers() {
// if container is already started, start() will return early
kafkaContainer.start();
return kafkaContainer.getBootstrapServers();
}
}

View File

@ -1,133 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-kinesis</artifactId>
<name>Debezium Server Amazon Kinesis Sink Adapter</name>
<packaging>jar</packaging>
<properties>
<skipITs>true</skipITs>
</properties>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<!-- Target systems -->
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>kinesis</artifactId>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -1,118 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.kinesis;
import java.net.URI;
import java.util.List;
import java.util.Optional;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import javax.inject.Named;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.server.CustomConsumerBuilder;
import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider;
import software.amazon.awssdk.core.SdkBytes;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.kinesis.KinesisClient;
import software.amazon.awssdk.services.kinesis.KinesisClientBuilder;
import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
/**
* Implementation of the consumer that delivers the messages into Amazon Kinesis destination.
*
* @author Jiri Pechanec
*
*/
@Named("kinesis")
@Dependent
public class KinesisChangeConsumer extends BaseChangeConsumer implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(KinesisChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.kinesis.";
private static final String PROP_REGION_NAME = PROP_PREFIX + "region";
private static final String PROP_ENDPOINT_NAME = PROP_PREFIX + "endpoint";
private String region;
private Optional<String> endpointOverride;
@ConfigProperty(name = PROP_PREFIX + "credentials.profile", defaultValue = "default")
String credentialsProfile;
@ConfigProperty(name = PROP_PREFIX + "null.key", defaultValue = "default")
String nullKey;
private KinesisClient client = null;
@Inject
@CustomConsumerBuilder
Instance<KinesisClient> customClient;
@PostConstruct
void connect() {
if (customClient.isResolvable()) {
client = customClient.get();
LOGGER.info("Obtained custom configured KinesisClient '{}'", client);
return;
}
final Config config = ConfigProvider.getConfig();
region = config.getValue(PROP_REGION_NAME, String.class);
endpointOverride = config.getOptionalValue(PROP_ENDPOINT_NAME, String.class);
final KinesisClientBuilder builder = KinesisClient.builder()
.region(Region.of(region))
.credentialsProvider(ProfileCredentialsProvider.create(credentialsProfile));
endpointOverride.ifPresent(endpoint -> builder.endpointOverride(URI.create(endpoint)));
client = builder.build();
LOGGER.info("Using default KinesisClient '{}'", client);
}
@PreDestroy
void close() {
try {
client.close();
}
catch (Exception e) {
LOGGER.warn("Exception while closing Kinesis client: {}", e);
}
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
for (ChangeEvent<Object, Object> record : records) {
LOGGER.trace("Received event '{}'", record);
Object rv = record.value();
if (rv == null) {
rv = "";
}
final PutRecordRequest putRecord = PutRecordRequest.builder()
.partitionKey((record.key() != null) ? getString(record.key()) : nullKey)
.streamName(streamNameMapper.map(record.destination()))
.data(SdkBytes.fromByteArray(getBytes(rv)))
.build();
client.putRecord(putRecord);
committer.markProcessed(record);
}
committer.markBatchFinished();
}
}

View File

@ -1,90 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.kinesis;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import javax.enterprise.event.Observes;
import javax.inject.Inject;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test;
import io.debezium.server.DebeziumServer;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.kinesis.KinesisClient;
import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest;
import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse;
import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest;
import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse;
import software.amazon.awssdk.services.kinesis.model.Record;
import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
/**
* Integration test that verifies basic reading from PostgreSQL database and writing to Kinesis stream.
*
* @author Jiri Pechanec
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
public class KinesisIT {
private static final int MESSAGE_COUNT = 4;
// The stream of this name must exist and be empty
private static final String STREAM_NAME = "testc.inventory.customers";
protected static KinesisClient kinesis = null;
{
Testing.Files.delete(KinesisTestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(KinesisTestConfigSource.OFFSET_STORE_PATH);
}
@Inject
DebeziumServer server;
void setupDependencies(@Observes ConnectorStartedEvent event) {
kinesis = KinesisClient.builder()
.region(Region.of(KinesisTestConfigSource.KINESIS_REGION))
.credentialsProvider(ProfileCredentialsProvider.create("default"))
.build();
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw (Exception) event.getError().get();
}
}
@Test
public void testKinesis() throws Exception {
Testing.Print.enable();
final GetShardIteratorResponse iteratorResponse = kinesis.getShardIterator(GetShardIteratorRequest.builder()
.streamName(STREAM_NAME)
.shardIteratorType(ShardIteratorType.TRIM_HORIZON)
.shardId("0")
.build());
final List<Record> records = new ArrayList<>();
Awaitility.await().atMost(Duration.ofSeconds(KinesisTestConfigSource.waitForSeconds())).until(() -> {
final GetRecordsResponse recordsResponse = kinesis.getRecords(GetRecordsRequest.builder()
.shardIterator(iteratorResponse.shardIterator())
.limit(MESSAGE_COUNT)
.build());
records.addAll(recordsResponse.records());
return records.size() >= MESSAGE_COUNT;
});
}
}

View File

@ -1,40 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.kinesis;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class KinesisTestConfigSource extends TestConfigSource {
public static final String KINESIS_REGION = "eu-central-1";
public KinesisTestConfigSource() {
Map<String, String> kinesisTest = new HashMap<>();
kinesisTest.put("debezium.sink.type", "kinesis");
kinesisTest.put("debezium.sink.kinesis.region", KINESIS_REGION);
kinesisTest.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
kinesisTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH.toAbsolutePath().toString());
kinesisTest.put("debezium.source.offset.flush.interval.ms", "0");
kinesisTest.put("debezium.source.topic.prefix", "testc");
kinesisTest.put("debezium.source.schema.include.list", "inventory");
kinesisTest.put("debezium.source.table.include.list", "inventory.customers");
config = kinesisTest;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
}

View File

@ -1,39 +0,0 @@
<configuration>
<appender name="CONSOLE"
class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n</pattern>
</encoder>
</appender>
<root level="warn">
<appender-ref ref="CONSOLE" />
</root>
<!-- Set up the default logging to be INFO level, then override specific
units -->
<logger name="io.debezium" level="info" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.embedded.EmbeddedEngine$EmbeddedConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.converters.CloudEventsConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="org.apache.kafka.connect.json.JsonConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.confluent"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
</configuration>

View File

@ -1,170 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-nats-jetstream</artifactId>
<name>Debezium Server NATS JetStream Sink Adapter</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<!-- Target systems -->
<dependency>
<groupId>io.nats</groupId>
<artifactId>jnats</artifactId>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
<resources>
<!-- Apply the properties set in the POM to the resource files -->
<resource>
<filtering>true</filtering>
<directory>src/main/resources</directory>
<includes>
<include>**/build.properties</include>
</includes>
</resource>
</resources>
</build>
<profiles>
<profile>
<id>quick</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>quick</name>
</property>
</activation>
<properties>
<skipITs>true</skipITs>
<docker.skip>true</docker.skip>
</properties>
</profile>
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Do not perform any Docker-related functionality
To use, specify "-DskipITs" on the Maven command line.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<profile>
<id>skip-integration-tests</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>skipITs</name>
</property>
</activation>
<properties>
<docker.skip>true</docker.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,146 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.nats.jetstream;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import javax.inject.Named;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.server.CustomConsumerBuilder;
import io.nats.client.Connection;
import io.nats.client.JetStream;
import io.nats.client.JetStreamManagement;
import io.nats.client.Nats;
import io.nats.client.api.StorageType;
import io.nats.client.api.StreamConfiguration;
/**
* Implementation of the consumer that delivers the messages into a NATS Jetstream stream.
*
* @author Balázs Sipos
*/
@Named("nats-jetstream")
@Dependent
public class NatsJetStreamChangeConsumer extends BaseChangeConsumer
implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(NatsJetStreamChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.nats-jetstream.";
private static final String PROP_URL = PROP_PREFIX + "url";
private static final String PROP_CREATE_STREAM = PROP_PREFIX + "create-stream";
private static final String PROP_SUBJECTS = PROP_PREFIX + "subjects";
private static final String PROP_STORAGE = PROP_PREFIX + "storage";
private Connection nc;
private JetStream js;
@ConfigProperty(name = PROP_CREATE_STREAM, defaultValue = "false")
boolean createStream;
@Inject
@CustomConsumerBuilder
Instance<JetStream> customStreamingConnection;
@PostConstruct
void connect() {
// Read config
final Config config = ConfigProvider.getConfig();
String url = config.getValue(PROP_URL, String.class);
if (customStreamingConnection.isResolvable()) {
js = customStreamingConnection.get();
LOGGER.info("Obtained custom configured JetStream '{}'", js);
return;
}
try {
// Setup NATS connection
io.nats.client.Options natsOptions = new io.nats.client.Options.Builder()
.servers(url.split(","))
.noReconnect()
.build();
nc = Nats.connect(natsOptions);
// Creating a basic stream, mostly for testing. If a user wants to configure their stream, it can be done
// via the nats cli.
if (createStream) {
String subjects = config.getOptionalValue(PROP_SUBJECTS, String.class).orElse("*.*.*");
String storage = config.getOptionalValue(PROP_STORAGE, String.class).orElse("memory");
StorageType storageType = storage.equals("file") ? StorageType.File : StorageType.Memory;
StreamConfiguration streamConfig = StreamConfiguration.builder()
.name("DebeziumStream")
.description("The debezium stream, contains messages which are coming from debezium")
.subjects(subjects.split(","))
.storageType(storageType)
.build();
LOGGER.info("Creating stream with config: {}", streamConfig);
JetStreamManagement jsm = nc.jetStreamManagement();
jsm.addStream(streamConfig);
}
js = nc.jetStream();
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
@PreDestroy
void close() {
try {
if (nc != null) {
nc.close();
LOGGER.info("NATS connection closed.");
}
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records,
RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
for (ChangeEvent<Object, Object> rec : records) {
if (rec.value() != null) {
String subject = streamNameMapper.map(rec.destination());
byte[] recordBytes = getBytes(rec.value());
LOGGER.trace("Received event @ {} = '{}'", subject, getString(rec.value()));
try {
js.publish(subject, recordBytes);
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
committer.markProcessed(rec);
}
committer.markBatchFinished();
}
}

View File

@ -1,96 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.nats.jetstream;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.enterprise.event.Observes;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.nats.client.Connection;
import io.nats.client.Dispatcher;
import io.nats.client.JetStream;
import io.nats.client.Message;
import io.nats.client.Nats;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
/**
* Integration test that verifies basic reading from PostgreSQL database and writing to NATS Jetstream subject.
*
* @author Thiago Avancini
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@QuarkusTestResource(NatsJetStreamTestResourceLifecycleManager.class)
class NatsJetStreamIT {
private static final int MESSAGE_COUNT = 4;
private static final String SUBJECT_NAME = "testc.inventory.customers";
protected static Connection nc;
protected static JetStream js;
protected static Dispatcher d;
static {
Testing.Files.delete(NatsJetStreamTestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(NatsJetStreamTestConfigSource.OFFSET_STORE_PATH);
}
private static final List<Message> messages = Collections.synchronizedList(new ArrayList<>());
void setupDependencies(@Observes ConnectorStartedEvent event) {
Testing.Print.enable();
// Setup NATS Jetstream connection
try {
nc = Nats.connect(NatsJetStreamTestResourceLifecycleManager.getNatsContainerUrl());
js = nc.jetStream();
}
catch (Exception e) {
Testing.print("Could not connect to NATS Jetstream");
}
// Setup message handler
try {
d = nc.createDispatcher();
js.subscribe(SUBJECT_NAME, d, messages::add, true);
}
catch (Exception e) {
Testing.print("Could not register message handler: " + e.getMessage());
}
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw (Exception) event.getError().get();
}
}
@AfterAll
static void stop() throws Exception {
if (d != null) {
d.unsubscribe(SUBJECT_NAME);
}
}
@Test
void testNatsStreaming() throws Exception {
Awaitility.await().atMost(Duration.ofSeconds(NatsJetStreamTestConfigSource.waitForSeconds())).until(() -> messages.size() >= MESSAGE_COUNT);
assertThat(messages.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
}
}

View File

@ -1,40 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.nats.jetstream;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class NatsJetStreamTestConfigSource extends TestConfigSource {
public NatsJetStreamTestConfigSource() {
Map<String, String> natsJetStreamTest = new HashMap<>();
natsJetStreamTest.put("debezium.sink.type", "nats-jetstream");
natsJetStreamTest.put("debezium.sink.nats-jetstream.url",
NatsJetStreamTestResourceLifecycleManager.getNatsContainerUrl());
natsJetStreamTest.put("debezium.sink.nats-jetstream.create-stream", "true");
natsJetStreamTest.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
natsJetStreamTest.put("debezium.source.topic.prefix", "testc");
natsJetStreamTest.put("debezium.source.schema.include.list", "inventory");
natsJetStreamTest.put("debezium.source.table.include.list", "inventory.customers");
natsJetStreamTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
OFFSET_STORE_PATH.toAbsolutePath().toString());
natsJetStreamTest.put("debezium.source.offset.flush.interval.ms", "0");
config = natsJetStreamTest;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
}

View File

@ -1,62 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.nats.jetstream;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.wait.strategy.LogMessageWaitStrategy;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
/**
* Manages the lifecycle of a NATS Streaming test resource.
*
* @author Thiago Avancini
*/
public class NatsJetStreamTestResourceLifecycleManager implements QuarkusTestResourceLifecycleManager {
public static final int NATS_PORT = 4222;
public static final String NATS_IMAGE = "nats:latest";
private static final AtomicBoolean running = new AtomicBoolean(false);
private static final GenericContainer<?> container = new GenericContainer<>(NATS_IMAGE)
.withExposedPorts(NATS_PORT)
.withCommand("-js")
.waitingFor(new LogMessageWaitStrategy().withRegEx(".*Server is ready.*"));
private static synchronized void start(boolean ignored) {
if (!running.get()) {
container.start();
running.set(true);
}
}
@Override
public Map<String, String> start() {
start(true);
Map<String, String> params = new ConcurrentHashMap<>();
return params;
}
@Override
public void stop() {
try {
container.stop();
}
catch (Exception e) {
// ignored
}
running.set(false);
}
public static String getNatsContainerUrl() {
start(true);
return String.format("nats://%s:%d", container.getHost(), container.getFirstMappedPort());
}
}

View File

@ -1 +0,0 @@
io.debezium.server.nats.jetstream.NatsJetStreamTestConfigSource

View File

@ -1,174 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-nats-streaming</artifactId>
<name>Debezium Server NATS Streaming Sink Adapter</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<!-- Target systems -->
<dependency>
<groupId>io.nats</groupId>
<artifactId>jnats</artifactId>
</dependency>
<dependency>
<groupId>io.nats</groupId>
<artifactId>java-nats-streaming</artifactId>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
<resources>
<!-- Apply the properties set in the POM to the resource files -->
<resource>
<filtering>true</filtering>
<directory>src/main/resources</directory>
<includes>
<include>**/build.properties</include>
</includes>
</resource>
</resources>
</build>
<profiles>
<profile>
<id>quick</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>quick</name>
</property>
</activation>
<properties>
<skipITs>true</skipITs>
<docker.skip>true</docker.skip>
</properties>
</profile>
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Do not perform any Docker-related functionality
To use, specify "-DskipITs" on the Maven command line.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<profile>
<id>skip-integration-tests</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>skipITs</name>
</property>
</activation>
<properties>
<docker.skip>true</docker.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,136 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.nats.streaming;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import javax.inject.Named;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.server.CustomConsumerBuilder;
import io.nats.client.Connection;
import io.nats.client.Nats;
import io.nats.streaming.NatsStreaming;
import io.nats.streaming.StreamingConnection;
/**
* Implementation of the consumer that delivers the messages into NATS Streaming subject.
*
* @author Thiago Avancini
*/
@Named("nats-streaming")
@Dependent
public class NatsStreamingChangeConsumer extends BaseChangeConsumer
implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(NatsStreamingChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.nats-streaming.";
private static final String PROP_URL = PROP_PREFIX + "url";
private static final String PROP_CLUSTER_ID = PROP_PREFIX + "cluster.id";
private static final String PROP_CLIENT_ID = PROP_PREFIX + "client.id";
private String url;
private String clusterId;
private String clientId;
private Connection nc;
private StreamingConnection sc;
@Inject
@CustomConsumerBuilder
Instance<StreamingConnection> customStreamingConnection;
@PostConstruct
void connect() {
if (customStreamingConnection.isResolvable()) {
sc = customStreamingConnection.get();
LOGGER.info("Obtained custom configured StreamingConnection '{}'", sc);
return;
}
// Read config
final Config config = ConfigProvider.getConfig();
url = config.getValue(PROP_URL, String.class);
clusterId = config.getValue(PROP_CLUSTER_ID, String.class);
clientId = config.getValue(PROP_CLIENT_ID, String.class);
try {
// Setup NATS connection
io.nats.client.Options natsOptions = new io.nats.client.Options.Builder()
.server(url)
.noReconnect()
.build();
nc = Nats.connect(natsOptions);
// Setup NATS Streaming connection
io.nats.streaming.Options stanOptions = new io.nats.streaming.Options.Builder()
.natsConn(nc)
.build();
sc = NatsStreaming.connect(clusterId, clientId, stanOptions);
}
catch (Exception e) {
throw new DebeziumException(e);
}
LOGGER.info("Using default StreamingConnection '{}'", sc);
}
@PreDestroy
void close() {
try {
if (sc != null) {
sc.close();
LOGGER.info("NATS Streaming connection closed.");
}
if (nc != null) {
nc.close();
LOGGER.info("NATS connection closed.");
}
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records,
RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
for (ChangeEvent<Object, Object> record : records) {
if (record.value() != null) {
String subject = streamNameMapper.map(record.destination());
byte[] recordBytes = getBytes(record.value());
LOGGER.trace("Received event @ {} = '{}'", subject, getString(record.value()));
try {
sc.publish(subject, recordBytes);
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
committer.markProcessed(record);
}
committer.markBatchFinished();
}
}

View File

@ -1,108 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.nats.streaming;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.enterprise.event.Observes;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.nats.streaming.Message;
import io.nats.streaming.MessageHandler;
import io.nats.streaming.NatsStreaming;
import io.nats.streaming.Options;
import io.nats.streaming.StreamingConnection;
import io.nats.streaming.Subscription;
import io.nats.streaming.SubscriptionOptions;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
/**
* Integration test that verifies basic reading from PostgreSQL database and writing to NATS Streaming subject.
*
* @author Thiago Avancini
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@QuarkusTestResource(NatsStreamingTestResourceLifecycleManager.class)
public class NatsStreamingIT {
private static final int MESSAGE_COUNT = 4;
private static final String SUBJECT_NAME = "testc.inventory.customers";
private static final String CLUSTER_ID = "debezium";
private static final String CLIENT_ID = "debezium-test";
protected static StreamingConnection sc;
protected static Subscription subscription;
static {
Testing.Files.delete(NatsStreamingTestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(NatsStreamingTestConfigSource.OFFSET_STORE_PATH);
}
private static final List<Message> messages = Collections.synchronizedList(new ArrayList<>());
void setupDependencies(@Observes ConnectorStartedEvent event) {
Testing.Print.enable();
// Setup NATS Streaming connection
Options stanOptions = new Options.Builder()
.natsUrl(NatsStreamingTestResourceLifecycleManager.getNatsStreamingContainerUrl())
.build();
try {
sc = NatsStreaming.connect(CLUSTER_ID, CLIENT_ID, stanOptions);
}
catch (Exception e) {
Testing.print("Could not connect to NATS Streaming");
}
// Setup message handler
try {
subscription = sc.subscribe(SUBJECT_NAME, new MessageHandler() {
public void onMessage(Message m) {
messages.add(m);
}
}, new SubscriptionOptions.Builder().deliverAllAvailable().build());
}
catch (Exception e) {
Testing.print("Could not register message handler");
}
}
void connectorCompleted(@Observes final ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw (Exception) event.getError().get();
}
}
@AfterAll
static void stop() throws Exception {
if (subscription != null) {
subscription.unsubscribe();
}
sc.close();
}
@Test
public void testNatsStreaming() {
Awaitility.await()
.atMost(Duration.ofSeconds(NatsStreamingTestConfigSource.waitForSeconds()))
.until(() -> messages.size() >= MESSAGE_COUNT);
assertThat(messages.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
}
}

View File

@ -1,42 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.nats.streaming;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class NatsStreamingTestConfigSource extends TestConfigSource {
public NatsStreamingTestConfigSource() {
Map<String, String> natsStreamingTest = new HashMap<>();
natsStreamingTest.put("debezium.sink.type", "nats-streaming");
natsStreamingTest.put("debezium.sink.nats-streaming.url",
NatsStreamingTestResourceLifecycleManager.getNatsStreamingContainerUrl());
natsStreamingTest.put("debezium.sink.nats-streaming.cluster.id", "debezium");
natsStreamingTest.put("debezium.sink.nats-streaming.client.id", "debezium-sink");
natsStreamingTest.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
natsStreamingTest.put("debezium.source.topic.prefix", "testc");
natsStreamingTest.put("debezium.source.schema.include.list", "inventory");
natsStreamingTest.put("debezium.source.table.include.list", "inventory.customers");
natsStreamingTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
OFFSET_STORE_PATH.toAbsolutePath().toString());
natsStreamingTest.put("debezium.source.offset.flush.interval.ms", "0");
config = natsStreamingTest;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
}

View File

@ -1,62 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.nats.streaming;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.wait.strategy.LogMessageWaitStrategy;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
/**
* Manages the lifecycle of a NATS Streaming test resource.
*
* @author Thiago Avancini
*/
public class NatsStreamingTestResourceLifecycleManager implements QuarkusTestResourceLifecycleManager {
public static final int NATS_STREAMING_PORT = 4222;
public static final String NATS_STREAMING_IMAGE = "nats-streaming:latest";
private static final AtomicBoolean running = new AtomicBoolean(false);
private static final GenericContainer<?> container = new GenericContainer<>(NATS_STREAMING_IMAGE)
.withExposedPorts(NATS_STREAMING_PORT)
.withCommand("-SD", "-cid", "debezium")
.waitingFor(new LogMessageWaitStrategy().withRegEx(".*Server is ready.*"));
private static synchronized void start(boolean ignored) {
if (!running.get()) {
container.start();
running.set(true);
}
}
@Override
public Map<String, String> start() {
start(true);
Map<String, String> params = new ConcurrentHashMap<>();
return params;
}
@Override
public void stop() {
try {
container.stop();
}
catch (Exception e) {
// ignored
}
running.set(false);
}
public static String getNatsStreamingContainerUrl() {
start(true);
return String.format("nats://%s:%d", container.getContainerIpAddress(), container.getFirstMappedPort());
}
}

View File

@ -1 +0,0 @@
io.debezium.server.nats.streaming.NatsStreamingTestConfigSource

View File

@ -1,219 +0,0 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<artifactId>debezium-server-pravega</artifactId>
<name>Debezium Server Pravega Sink Adapter</name>
<properties>
<!-- IMPORTANT: This must be aligned with the netty shipped with Quarkus, specified in debezium-build-parent -->
<netty.version>4.1.82.Final</netty.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-bom</artifactId>
<version>${netty.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<!-- Target systems -->
<dependency>
<groupId>io.pravega</groupId>
<artifactId>pravega-client</artifactId>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-common</artifactId>
<version>${netty.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-transport</artifactId>
<version>${netty.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-handler</artifactId>
<version>${netty.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-codec</artifactId>
<version>${netty.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-codec-http</artifactId>
<version>${netty.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-codec-http2</artifactId>
<version>${netty.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-codec-socks</artifactId>
<version>${netty.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-handler-proxy</artifactId>
<version>${netty.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-transport-native-epoll</artifactId>
<version>${netty.version}</version>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.jsonwebtoken</groupId>
<artifactId>jjwt</artifactId>
<version>0.9.1</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test-pravega</id>
<goals>
<goal>integration-test</goal>
</goals>
<configuration>
<systemPropertyVariables>
<debezium.sink.type>pravega</debezium.sink.type>
<debezium.sink.pravega.scope>testc.inventory.customers</debezium.sink.pravega.scope>
<debezium.sink.pravega.transaction>false</debezium.sink.pravega.transaction>
</systemPropertyVariables>
<runOrder>${runOrder}</runOrder>
</configuration>
</execution>
<execution>
<id>integration-test-pravega-txn</id>
<goals>
<goal>integration-test</goal>
</goals>
<configuration>
<systemPropertyVariables>
<debezium.sink.type>pravega</debezium.sink.type>
<debezium.sink.pravega.scope>testc.inventory.customers</debezium.sink.pravega.scope>
<debezium.sink.pravega.transaction>true</debezium.sink.pravega.transaction>
</systemPropertyVariables>
<runOrder>${runOrder}</runOrder>
</configuration>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
</systemProperties>
</configuration>
</plugin>
</plugins>
<resources>
<!-- Apply the properties set in the POM to the resource files -->
<resource>
<filtering>true</filtering>
<directory>src/main/resources</directory>
<includes>
<include>**/build.properties</include>
</includes>
</resource>
</resources>
</build>
<profiles>
<profile>
<id>quick</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>quick</name>
</property>
</activation>
<properties>
<skipITs>true</skipITs>
<docker.skip>true</docker.skip>
</properties>
</profile>
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Do not perform any Docker-related functionality
To use, specify "-DskipITs" on the Maven command line.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<profile>
<id>skip-integration-tests</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>skipITs</name>
</property>
</activation>
<properties>
<docker.skip>true</docker.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,169 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pravega;
import java.net.URI;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.inject.Named;
import org.eclipse.microprofile.config.ConfigProvider;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine.ChangeConsumer;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.server.BaseChangeConsumer;
import io.pravega.client.ClientConfig;
import io.pravega.client.EventStreamClientFactory;
import io.pravega.client.stream.EventStreamWriter;
import io.pravega.client.stream.EventWriterConfig;
import io.pravega.client.stream.Transaction;
import io.pravega.client.stream.TransactionalEventStreamWriter;
import io.pravega.client.stream.TxnFailedException;
import io.pravega.client.stream.impl.ByteArraySerializer;
@Named("pravega")
@Dependent
public class PravegaChangeConsumer extends BaseChangeConsumer implements ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(PravegaChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.pravega.";
private static final String PROP_CONTROLLER = PROP_PREFIX + "controller.uri";
private static final String PROP_SCOPE = PROP_PREFIX + "scope";
private static final String PROP_TXN = PROP_PREFIX + "transaction";
@ConfigProperty(name = PROP_CONTROLLER, defaultValue = "tcp://localhost:9090")
URI controllerUri;
private String scope;
@ConfigProperty(name = PROP_TXN, defaultValue = "false")
boolean txn;
private ClientConfig clientConfig;
private EventStreamClientFactory factory;
private EventWriterConfig writerConfig;
@PostConstruct
void constructor() {
scope = ConfigProvider.getConfig().getValue(PROP_SCOPE, String.class);
clientConfig = ClientConfig.builder()
.controllerURI(controllerUri)
.build();
LOGGER.debug("Creating client factory for scope {} with controller {}", scope, controllerUri);
factory = EventStreamClientFactory.withScope(scope, clientConfig);
writerConfig = EventWriterConfig.builder().build();
}
@PreDestroy
void destructor() {
LOGGER.debug("Closing client factory");
factory.close();
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, RecordCommitter<ChangeEvent<Object, Object>> committer) throws InterruptedException {
try (PravegaSink impl = (txn) ? new PravegaTxnSinkImpl() : new PravegaSinkImpl()) {
impl.handleBatch(records, committer);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
class PravegaSinkImpl implements PravegaSink {
private final Map<String, EventStreamWriter<byte[]>> writers = new HashMap<>();
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, RecordCommitter<ChangeEvent<Object, Object>> committer) throws InterruptedException {
for (ChangeEvent<Object, Object> changeEvent : records) {
String streamName = streamNameMapper.map(changeEvent.destination());
final EventStreamWriter<byte[]> writer = writers.computeIfAbsent(streamName, (stream) -> createWriter(stream));
if (changeEvent.key() != null) {
writer.writeEvent(getString(changeEvent.key()), getBytes(changeEvent.value()));
}
else {
writer.writeEvent(getBytes(changeEvent.value()));
}
committer.markProcessed(changeEvent);
}
committer.markBatchFinished();
}
private EventStreamWriter<byte[]> createWriter(String stream) {
LOGGER.debug("Creating writer for stream {}", stream);
return factory.createEventWriter(stream, new ByteArraySerializer(), writerConfig);
}
@Override
public void close() throws Exception {
LOGGER.debug("Closing {} writer(s)", writers.size());
writers.values().forEach(EventStreamWriter::close);
}
}
class PravegaTxnSinkImpl implements PravegaSink {
private final Map<String, TransactionalEventStreamWriter<byte[]>> writers = new HashMap<>();
private final Map<String, Transaction<byte[]>> txns = new HashMap<>();
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, RecordCommitter<ChangeEvent<Object, Object>> committer) throws InterruptedException {
for (ChangeEvent<Object, Object> changeEvent : records) {
String streamName = streamNameMapper.map(changeEvent.destination());
final Transaction<byte[]> txn = txns.computeIfAbsent(streamName, (stream) -> createTxn(stream));
try {
if (changeEvent.key() != null) {
txn.writeEvent(getString(changeEvent.key()), getBytes(changeEvent.value()));
}
else {
txn.writeEvent(getBytes(changeEvent.value()));
}
}
catch (TxnFailedException e) {
throw new RuntimeException(e);
}
committer.markProcessed(changeEvent);
}
txns.values().forEach(t -> {
try {
t.commit();
}
catch (TxnFailedException e) {
throw new RuntimeException(e);
}
});
committer.markBatchFinished();
txns.clear();
}
private Transaction<byte[]> createTxn(String stream) {
final TransactionalEventStreamWriter<byte[]> writer = writers.computeIfAbsent(stream, (s) -> createWriter(s));
LOGGER.debug("Creating transaction for stream {}", stream);
return writer.beginTxn();
}
private TransactionalEventStreamWriter<byte[]> createWriter(String stream) {
LOGGER.debug("Creating writer for stream {}", stream);
return factory.createTransactionalEventWriter(stream, new ByteArraySerializer(), writerConfig);
}
@Override
public void close() throws Exception {
LOGGER.debug("Closing {} writer(s)", writers.size());
writers.values().forEach(TransactionalEventStreamWriter::close);
}
}
}

View File

@ -1,13 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pravega;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine.ChangeConsumer;
public interface PravegaSink extends ChangeConsumer<ChangeEvent<Object, Object>>, AutoCloseable {
}

View File

@ -1,92 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pravega;
import java.net.URI;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import javax.enterprise.event.Observes;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test;
import io.debezium.server.TestConfigSource;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.pravega.client.ClientConfig;
import io.pravega.client.EventStreamClientFactory;
import io.pravega.client.admin.ReaderGroupManager;
import io.pravega.client.stream.EventStreamReader;
import io.pravega.client.stream.ReaderConfig;
import io.pravega.client.stream.ReaderGroupConfig;
import io.pravega.client.stream.Stream;
import io.pravega.client.stream.impl.UTF8StringSerializer;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@QuarkusTestResource(PravegaTestResource.class)
public class PravegaIT {
private static final int MESSAGE_COUNT = 4;
protected static final String STREAM_NAME = "testc.inventory.customers";
static EventStreamReader<String> reader;
{
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(TestConfigSource.OFFSET_STORE_PATH);
}
void setupDependencies(@Observes ConnectorStartedEvent event) {
Testing.Print.enable();
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) {
if (!event.isSuccess()) {
throw new RuntimeException(event.getError().get());
}
}
/**
* Creates a reader where scope name, stream name and reader group name are STREAM_NAME.
* Consumes 4 events using the reader.
*/
@Test
public void testPravega() {
URI controllerURI = URI.create(PravegaTestResource.getControllerUri());
ClientConfig clientConfig = ClientConfig.builder()
.controllerURI(controllerURI)
.build();
ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder()
.stream(Stream.of(STREAM_NAME, STREAM_NAME))
.disableAutomaticCheckpoints()
.build();
try (ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(STREAM_NAME, clientConfig)) {
readerGroupManager.createReaderGroup(STREAM_NAME, readerGroupConfig);
}
ReaderConfig readerConfig = ReaderConfig.builder().build();
reader = EventStreamClientFactory.withScope(STREAM_NAME, clientConfig)
.createReader("0", STREAM_NAME, new UTF8StringSerializer(), readerConfig);
final List<String> records = new ArrayList<>();
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())).until(() -> {
String event;
if ((event = reader.readNextEvent(2000).getEvent()) != null) {
records.add(event);
}
return records.size() >= MESSAGE_COUNT;
});
}
}

View File

@ -1,76 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pravega;
import java.net.URI;
import java.time.Duration;
import java.util.Collections;
import java.util.Map;
import org.eclipse.microprofile.config.ConfigProvider;
import org.testcontainers.containers.FixedHostPortGenericContainer;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.wait.strategy.Wait;
import io.pravega.client.admin.StreamManager;
import io.pravega.client.stream.ScalingPolicy;
import io.pravega.client.stream.StreamConfiguration;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
/**
* Runs a standalone Pravega cluster in-process.
* <p>
* <code>pravega.controller.uri</code> system property will contain the
* Pravega Controller URI.
*/
public class PravegaTestResource implements QuarkusTestResourceLifecycleManager {
private static final String PRAVEGA_VERSION = "0.9.0";
public static final int CONTROLLER_PORT = 9090;
public static final int SEGMENT_STORE_PORT = 12345;
public static final String PRAVEGA_IMAGE = "pravega/pravega:" + PRAVEGA_VERSION;
@SuppressWarnings("deprecation")
private static final GenericContainer<?> container = new FixedHostPortGenericContainer<>(PRAVEGA_IMAGE)
.withFixedExposedPort(CONTROLLER_PORT, CONTROLLER_PORT)
.withFixedExposedPort(SEGMENT_STORE_PORT, SEGMENT_STORE_PORT)
.withStartupTimeout(Duration.ofSeconds(90))
.waitingFor(Wait.forLogMessage(".*Starting gRPC server listening on port: 9090.*", 1))
.withCommand("standalone");
@Override
public Map<String, String> start() {
container.start();
String scope = ConfigProvider.getConfig().getValue("debezium.sink.pravega.scope", String.class);
try (StreamManager streamManager = StreamManager.create(URI.create(getControllerUri()))) {
streamManager.createScope(scope);
StreamConfiguration streamConfig = StreamConfiguration.builder()
.scalingPolicy(ScalingPolicy.fixed(1))
.build();
streamManager.createStream(scope, scope, streamConfig);
}
return Collections.singletonMap("pravega.controller.uri", getControllerUri());
}
@Override
public void stop() {
try {
if (container != null) {
container.stop();
}
}
catch (Exception e) {
// ignored
}
}
public static String getControllerUri() {
return "tcp://" + container.getHost() + ":" + container.getMappedPort(CONTROLLER_PORT);
}
}

View File

@ -1,156 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-pubsub</artifactId>
<name>Debezium Server Google Cloud Pub/Sub Sink Adapter</name>
<packaging>jar</packaging>
<properties>
<skipITs>true</skipITs>
</properties>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<!-- Target systems -->
<dependency>
<groupId>com.google.cloud</groupId>
<artifactId>google-cloud-pubsub</artifactId>
</dependency>
<dependency>
<groupId>com.google.cloud</groupId>
<artifactId>google-cloud-pubsublite</artifactId>
</dependency>
<dependency>
<groupId>com.google.cloud</groupId>
<artifactId>google-cloud-core</artifactId>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>gcloud</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
<resources>
<!-- Apply the properties set in the POM to the resource files -->
<resource>
<filtering>true</filtering>
<directory>src/main/resources</directory>
<includes>
<include>**/build.properties</include>
</includes>
</resource>
</resources>
</build>
</project>

View File

@ -1,265 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pubsub;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import javax.inject.Named;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.threeten.bp.Duration;
import com.google.api.core.ApiFuture;
import com.google.api.core.ApiFutures;
import com.google.api.gax.batching.BatchingSettings;
import com.google.api.gax.batching.FlowControlSettings;
import com.google.api.gax.batching.FlowController;
import com.google.api.gax.core.CredentialsProvider;
import com.google.api.gax.core.NoCredentialsProvider;
import com.google.api.gax.grpc.GrpcTransportChannel;
import com.google.api.gax.retrying.RetrySettings;
import com.google.api.gax.rpc.FixedTransportChannelProvider;
import com.google.api.gax.rpc.TransportChannelProvider;
import com.google.cloud.ServiceOptions;
import com.google.cloud.pubsub.v1.Publisher;
import com.google.cloud.pubsub.v1.Publisher.Builder;
import com.google.protobuf.ByteString;
import com.google.pubsub.v1.ProjectTopicName;
import com.google.pubsub.v1.PubsubMessage;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.server.CustomConsumerBuilder;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
/**
* Implementation of the consumer that delivers the messages into Google Pub/Sub destination.
*
* @author Jiri Pechanec
*
*/
@Named("pubsub")
@Dependent
public class PubSubChangeConsumer extends BaseChangeConsumer implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(PubSubChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.pubsub.";
private static final String PROP_PROJECT_ID = PROP_PREFIX + "project.id";
public interface PublisherBuilder {
Publisher get(ProjectTopicName topicName);
}
private String projectId;
private final Map<String, Publisher> publishers = new HashMap<>();
private PublisherBuilder publisherBuilder;
@ConfigProperty(name = PROP_PREFIX + "ordering.enabled", defaultValue = "true")
boolean orderingEnabled;
@ConfigProperty(name = PROP_PREFIX + "null.key", defaultValue = "default")
String nullKey;
@ConfigProperty(name = PROP_PREFIX + "batch.delay.threshold.ms", defaultValue = "100")
Integer maxDelayThresholdMs;
@ConfigProperty(name = PROP_PREFIX + "batch.element.count.threshold", defaultValue = "100")
Long maxBufferSize;
@ConfigProperty(name = PROP_PREFIX + "batch.request.byte.threshold", defaultValue = "10000000")
Long maxBufferBytes;
@ConfigProperty(name = PROP_PREFIX + "flowcontrol.enabled", defaultValue = "false")
boolean flowControlEnabled;
@ConfigProperty(name = PROP_PREFIX + "flowcontrol.max.outstanding.messages", defaultValue = "9223372036854775807")
Long maxOutstandingMessages;
@ConfigProperty(name = PROP_PREFIX + "flowcontrol.max.outstanding.bytes", defaultValue = "9223372036854775807")
Long maxOutstandingRequestBytes;
@ConfigProperty(name = PROP_PREFIX + "retry.total.timeout.ms", defaultValue = "60000")
Integer maxTotalTimeoutMs;
@ConfigProperty(name = PROP_PREFIX + "retry.max.rpc.timeout.ms", defaultValue = "10000")
Integer maxRequestTimeoutMs;
@ConfigProperty(name = PROP_PREFIX + "retry.initial.delay.ms", defaultValue = "5")
Integer initialRetryDelay;
@ConfigProperty(name = PROP_PREFIX + "retry.delay.multiplier", defaultValue = "2.0")
Double retryDelayMultiplier;
@ConfigProperty(name = PROP_PREFIX + "retry.max.delay.ms", defaultValue = "9223372036854775807")
Long maxRetryDelay;
@ConfigProperty(name = PROP_PREFIX + "retry.initial.rpc.timeout.ms", defaultValue = "10000")
Integer initialRpcTimeout;
@ConfigProperty(name = PROP_PREFIX + "retry.rpc.timeout.multiplier", defaultValue = "2.0")
Double rpcTimeoutMultiplier;
@ConfigProperty(name = PROP_PREFIX + "address")
Optional<String> address;
@Inject
@CustomConsumerBuilder
Instance<PublisherBuilder> customPublisherBuilder;
private ManagedChannel channel;
private TransportChannelProvider channelProvider;
private CredentialsProvider credentialsProvider;
@PostConstruct
void connect() {
final Config config = ConfigProvider.getConfig();
projectId = config.getOptionalValue(PROP_PROJECT_ID, String.class).orElse(ServiceOptions.getDefaultProjectId());
if (customPublisherBuilder.isResolvable()) {
publisherBuilder = customPublisherBuilder.get();
LOGGER.info("Obtained custom configured PublisherBuilder '{}'", customPublisherBuilder);
return;
}
BatchingSettings.Builder batchingSettings = BatchingSettings.newBuilder()
.setDelayThreshold(Duration.ofMillis(maxDelayThresholdMs))
.setElementCountThreshold(maxBufferSize)
.setRequestByteThreshold(maxBufferBytes);
if (flowControlEnabled) {
batchingSettings.setFlowControlSettings(FlowControlSettings.newBuilder()
.setMaxOutstandingRequestBytes(maxOutstandingRequestBytes)
.setMaxOutstandingElementCount(maxOutstandingMessages)
.setLimitExceededBehavior(FlowController.LimitExceededBehavior.Block)
.build());
}
if (address.isPresent()) {
String hostport = address.get();
channel = ManagedChannelBuilder
.forTarget(hostport)
.usePlaintext()
.build();
channelProvider = FixedTransportChannelProvider.create(GrpcTransportChannel.create(channel));
credentialsProvider = NoCredentialsProvider.create();
}
publisherBuilder = (t) -> {
try {
Builder builder = Publisher.newBuilder(t)
.setEnableMessageOrdering(orderingEnabled)
.setBatchingSettings(batchingSettings.build())
.setRetrySettings(
RetrySettings.newBuilder()
.setTotalTimeout(Duration.ofMillis(maxTotalTimeoutMs))
.setMaxRpcTimeout(Duration.ofMillis(maxRequestTimeoutMs))
.setInitialRetryDelay(Duration.ofMillis(initialRetryDelay))
.setRetryDelayMultiplier(retryDelayMultiplier)
.setMaxRetryDelay(Duration.ofMillis(maxRetryDelay))
.setInitialRpcTimeout(Duration.ofMillis(initialRpcTimeout))
.setRpcTimeoutMultiplier(rpcTimeoutMultiplier)
.build());
if (address.isPresent()) {
builder.setChannelProvider(channelProvider).setCredentialsProvider(credentialsProvider);
}
return builder.build();
}
catch (IOException e) {
throw new DebeziumException(e);
}
};
LOGGER.info("Using default PublisherBuilder '{}'", publisherBuilder);
}
@PreDestroy
void close() {
publishers.values().forEach(publisher -> {
try {
publisher.shutdown();
}
catch (Exception e) {
LOGGER.warn("Exception while closing publisher: {}", e);
}
});
if (channel != null && !channel.isShutdown()) {
channel.shutdown();
}
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
final List<ApiFuture<String>> deliveries = new ArrayList<>();
for (ChangeEvent<Object, Object> record : records) {
LOGGER.trace("Received event '{}'", record);
final String topicName = streamNameMapper.map(record.destination());
Publisher publisher = publishers.computeIfAbsent(topicName, (x) -> publisherBuilder.get(ProjectTopicName.of(projectId, x)));
final PubsubMessage.Builder pubsubMessage = PubsubMessage.newBuilder();
if (orderingEnabled) {
if (record.key() == null) {
pubsubMessage.setOrderingKey(nullKey);
}
else if (record.key() instanceof String) {
pubsubMessage.setOrderingKey((String) record.key());
}
else if (record.key() instanceof byte[]) {
pubsubMessage.setOrderingKeyBytes(ByteString.copyFrom((byte[]) record.key()));
}
}
if (record.value() instanceof String) {
pubsubMessage.setData(ByteString.copyFromUtf8((String) record.value()));
}
else if (record.value() instanceof byte[]) {
pubsubMessage.setData(ByteString.copyFrom((byte[]) record.value()));
}
deliveries.add(publisher.publish(pubsubMessage.build()));
committer.markProcessed(record);
}
List<String> messageIds;
try {
messageIds = ApiFutures.allAsList(deliveries).get();
}
catch (ExecutionException e) {
throw new DebeziumException(e);
}
LOGGER.trace("Sent messages with ids: {}", messageIds);
committer.markBatchFinished();
}
@Override
public boolean supportsTombstoneEvents() {
return false;
}
}

View File

@ -1,166 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pubsub;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import javax.inject.Named;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.api.core.ApiFuture;
import com.google.api.core.ApiFutures;
import com.google.cloud.ServiceOptions;
import com.google.cloud.pubsublite.CloudRegionOrZone;
import com.google.cloud.pubsublite.ProjectId;
import com.google.cloud.pubsublite.TopicName;
import com.google.cloud.pubsublite.TopicPath;
import com.google.cloud.pubsublite.cloudpubsub.Publisher;
import com.google.cloud.pubsublite.cloudpubsub.PublisherSettings;
import com.google.protobuf.ByteString;
import com.google.pubsub.v1.PubsubMessage;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.server.CustomConsumerBuilder;
/**
* Implementation of the consumer that delivers the messages into Google Pub/Sub Lite destination.
*/
@Named("pubsublite")
@Dependent
public class PubSubLiteChangeConsumer extends BaseChangeConsumer implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(PubSubLiteChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.pubsublite.";
private static final String PROP_PROJECT_ID = PROP_PREFIX + "project.id";
private static final String PROP_REGION = PROP_PREFIX + "region";
public interface PublisherBuilder {
Publisher get(String topicName);
}
private PublisherBuilder publisherBuilder;
private final Map<String, Publisher> publishers = new HashMap<>();
@ConfigProperty(name = PROP_PREFIX + "ordering.enabled", defaultValue = "true")
boolean orderingEnabled;
@ConfigProperty(name = PROP_PREFIX + "null.key", defaultValue = "default")
String nullKey;
@Inject
@CustomConsumerBuilder
Instance<PublisherBuilder> customPublisherBuilder;
@PostConstruct
void connect() {
final Config config = ConfigProvider.getConfig();
String projectId = config.getOptionalValue(PROP_PROJECT_ID, String.class).orElse(ServiceOptions.getDefaultProjectId());
String region = config.getValue(PROP_REGION, String.class);
if (customPublisherBuilder.isResolvable()) {
publisherBuilder = customPublisherBuilder.get();
LOGGER.info("Obtained custom configured PublisherBuilder '{}'", customPublisherBuilder);
return;
}
publisherBuilder = (t) -> {
TopicPath topicPath = TopicPath
.newBuilder()
.setName(TopicName.of(t))
.setProject(ProjectId.of(projectId))
.setLocation(CloudRegionOrZone.parse(region))
.build();
PublisherSettings publisherSettings = PublisherSettings
.newBuilder()
.setTopicPath(topicPath)
.build();
Publisher publisher = Publisher.create(publisherSettings);
publisher.startAsync().awaitRunning();
return publisher;
};
LOGGER.info("Using default PublisherBuilder '{}'", publisherBuilder);
}
@PreDestroy
void close() {
publishers.values().forEach(publisher -> {
try {
publisher.stopAsync().awaitTerminated();
}
catch (Exception e) {
LOGGER.warn("Exception while closing publisher: " + e);
}
});
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, RecordCommitter<ChangeEvent<Object, Object>> committer) throws InterruptedException {
final List<ApiFuture<String>> deliveries = new ArrayList<>();
for (ChangeEvent<Object, Object> record : records) {
LOGGER.trace("Received event '{}'", record);
final String topicName = streamNameMapper.map(record.destination());
Publisher publisher = publishers.computeIfAbsent(topicName, (topic) -> publisherBuilder.get(topic));
final PubsubMessage.Builder pubsubMessage = PubsubMessage.newBuilder();
if (orderingEnabled) {
if (record.key() == null) {
pubsubMessage.setOrderingKey(nullKey);
}
else if (record.key() instanceof String) {
pubsubMessage.setOrderingKey((String) record.key());
}
else if (record.key() instanceof byte[]) {
pubsubMessage.setOrderingKeyBytes(ByteString.copyFrom((byte[]) record.key()));
}
}
if (record.value() instanceof String) {
pubsubMessage.setData(ByteString.copyFromUtf8((String) record.value()));
}
else if (record.value() instanceof byte[]) {
pubsubMessage.setData(ByteString.copyFrom((byte[]) record.value()));
}
deliveries.add(publisher.publish(pubsubMessage.build()));
committer.markProcessed(record);
}
List<String> messageIds;
try {
messageIds = ApiFutures.allAsList(deliveries).get();
}
catch (ExecutionException e) {
throw new DebeziumException(e);
}
LOGGER.trace("Sent messages with ids: {}", messageIds);
committer.markBatchFinished();
}
@Override
public boolean supportsTombstoneEvents() {
return false;
}
}

View File

@ -1,196 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pubsub;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.enterprise.event.Observes;
import javax.inject.Inject;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test;
import com.google.api.gax.core.CredentialsProvider;
import com.google.api.gax.core.NoCredentialsProvider;
import com.google.api.gax.grpc.GrpcTransportChannel;
import com.google.api.gax.rpc.FixedTransportChannelProvider;
import com.google.api.gax.rpc.NotFoundException;
import com.google.api.gax.rpc.TransportChannelProvider;
import com.google.cloud.ServiceOptions;
import com.google.cloud.pubsub.v1.AckReplyConsumer;
import com.google.cloud.pubsub.v1.MessageReceiver;
import com.google.cloud.pubsub.v1.Subscriber;
import com.google.cloud.pubsub.v1.SubscriptionAdminClient;
import com.google.cloud.pubsub.v1.SubscriptionAdminSettings;
import com.google.cloud.pubsub.v1.TopicAdminClient;
import com.google.cloud.pubsub.v1.TopicAdminSettings;
import com.google.pubsub.v1.ProjectSubscriptionName;
import com.google.pubsub.v1.PubsubMessage;
import com.google.pubsub.v1.PushConfig;
import com.google.pubsub.v1.Topic;
import com.google.pubsub.v1.TopicName;
import io.debezium.server.DebeziumServer;
import io.debezium.server.TestConfigSource;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
/**
* Integration test that verifies basic reading from PostgreSQL database and writing to a Google Cloud PubSub stream running on a Google PubSub Emulator
*
* @author Jiri Pechanec
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@QuarkusTestResource(PubSubTestResourceLifecycleManager.class)
public class PubSubIT {
private static final int MESSAGE_COUNT = 4;
// The topic of this name must exist and be empty
private static final String STREAM_NAME = "testc.inventory.customers";
private static final String SUBSCRIPTION_NAME = "testsubs";
protected static Subscriber subscriber;
private static ProjectSubscriptionName subscriptionName = ProjectSubscriptionName.of(ServiceOptions.getDefaultProjectId(), SUBSCRIPTION_NAME);
private static TopicName topicName = TopicName.of(ServiceOptions.getDefaultProjectId(), STREAM_NAME);
{
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(PubSubTestConfigSource.OFFSET_STORE_PATH);
}
private static ManagedChannel channel;
private static TransportChannelProvider channelProvider;
private static CredentialsProvider credentialsProvider;
@AfterAll
static void stop() throws IOException {
if (subscriber != null) {
subscriber.stopAsync();
subscriber.awaitTerminated();
try (SubscriptionAdminClient subscriptionAdminClient = createSubscriptionAdminClient()) {
subscriptionAdminClient.deleteSubscription(subscriptionName);
}
try (TopicAdminClient topicAdminClient = createTopicAdminClient()) {
topicAdminClient.deleteTopic(topicName);
}
}
if (channel != null && !channel.isShutdown()) {
channel.shutdown();
}
}
@Inject
DebeziumServer server;
private static final List<PubsubMessage> messages = Collections.synchronizedList(new ArrayList<>());
class TestMessageReceiver implements MessageReceiver {
@Override
public void receiveMessage(PubsubMessage message, AckReplyConsumer consumer) {
Testing.print("Message arrived: " + message);
messages.add(message);
consumer.ack();
}
}
void setupDependencies(@Observes ConnectorStartedEvent event) throws IOException {
Testing.Print.enable();
createChannel();
// get into a clean state before running the test
try (SubscriptionAdminClient subscriptionAdminClient = createSubscriptionAdminClient()) {
subscriptionAdminClient.deleteSubscription(subscriptionName);
}
catch (NotFoundException e) {
}
try (TopicAdminClient topicAdminClient = createTopicAdminClient()) {
topicAdminClient.deleteTopic(topicName);
}
catch (NotFoundException e) {
}
// setup topic and sub
try (TopicAdminClient topicAdminClient = createTopicAdminClient()) {
Topic topic = topicAdminClient.createTopic(topicName);
Testing.print("Created topic: " + topic.getName());
}
try (SubscriptionAdminClient subscriptionAdminClient = createSubscriptionAdminClient()) {
int ackDeadlineSeconds = 0;
subscriptionAdminClient.createSubscription(subscriptionName, topicName,
PushConfig.newBuilder().build(), ackDeadlineSeconds);
}
subscriber = createSubscriber();
subscriber.startAsync().awaitRunning();
}
void createChannel() {
channel = ManagedChannelBuilder.forTarget(PubSubTestResourceLifecycleManager.getEmulatorEndpoint()).usePlaintext().build();
channelProvider = FixedTransportChannelProvider.create(GrpcTransportChannel.create(channel));
credentialsProvider = NoCredentialsProvider.create();
Testing.print("Executing test towards pubsub emulator running at: " + PubSubTestResourceLifecycleManager.getEmulatorEndpoint());
}
Subscriber createSubscriber() {
return Subscriber.newBuilder(subscriptionName, new TestMessageReceiver())
.setChannelProvider(channelProvider)
.setCredentialsProvider(credentialsProvider)
.build();
}
static SubscriptionAdminClient createSubscriptionAdminClient() throws IOException {
return SubscriptionAdminClient.create(
SubscriptionAdminSettings.newBuilder()
.setTransportChannelProvider(channelProvider)
.setCredentialsProvider(credentialsProvider)
.build());
}
static TopicAdminClient createTopicAdminClient() throws IOException {
return TopicAdminClient.create(
TopicAdminSettings.newBuilder()
.setTransportChannelProvider(channelProvider)
.setCredentialsProvider(credentialsProvider)
.build());
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw (Exception) event.getError().get();
}
}
@Test
public void testPubSub() {
Awaitility.await()
.atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> messages.size() >= MESSAGE_COUNT);
assertThat(messages.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
}
}

View File

@ -1,164 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pubsub;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
import javax.enterprise.event.Observes;
import javax.inject.Inject;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import com.google.cloud.ServiceOptions;
import com.google.cloud.pubsub.v1.AckReplyConsumer;
import com.google.cloud.pubsub.v1.MessageReceiver;
import com.google.cloud.pubsublite.AdminClient;
import com.google.cloud.pubsublite.AdminClientSettings;
import com.google.cloud.pubsublite.CloudRegion;
import com.google.cloud.pubsublite.CloudZone;
import com.google.cloud.pubsublite.ProjectId;
import com.google.cloud.pubsublite.SubscriptionName;
import com.google.cloud.pubsublite.SubscriptionPath;
import com.google.cloud.pubsublite.TopicName;
import com.google.cloud.pubsublite.TopicPath;
import com.google.cloud.pubsublite.cloudpubsub.FlowControlSettings;
import com.google.cloud.pubsublite.cloudpubsub.Subscriber;
import com.google.cloud.pubsublite.cloudpubsub.SubscriberSettings;
import com.google.cloud.pubsublite.proto.Subscription;
import com.google.pubsub.v1.PubsubMessage;
import io.debezium.server.DebeziumServer;
import io.debezium.server.TestConfigSource;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
/**
* Integration test that verifies basic reading from PostgreSQL database and writing to a Google Cloud PubSub Lite stream.
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@EnabledIfSystemProperty(named = "debezium.sink.type", matches = "pubsublite")
public class PubSubLiteIT {
private static final int MESSAGE_COUNT = 4;
private static final String STREAM_NAME = "testc.inventory.customers";
private static final String SUBSCRIPTION_NAME = "testsubs";
private static final String cloudRegion = "us-central1";
private static final char zoneId = 'b';
protected static Subscriber subscriber;
private static final String projectId = ServiceOptions.getDefaultProjectId();
private static final SubscriptionPath subscriptionPath = SubscriptionPath.newBuilder()
.setLocation(CloudZone.of(CloudRegion.of(cloudRegion), zoneId))
.setProject(ProjectId.of(projectId))
.setName(SubscriptionName.of(SUBSCRIPTION_NAME))
.build();
{
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(PubSubLiteTestConfigSource.OFFSET_STORE_PATH);
}
@AfterAll
static void stop() throws IOException {
if (subscriber != null) {
subscriber.stopAsync();
subscriber.awaitTerminated();
AdminClientSettings adminClientSettings = AdminClientSettings.newBuilder().setRegion(CloudRegion.of(cloudRegion)).build();
try (AdminClient adminClient = AdminClient.create(adminClientSettings)) {
adminClient.deleteSubscription(subscriptionPath).get();
}
catch (ExecutionException | InterruptedException e) {
Testing.printError(e);
}
}
}
@Inject
DebeziumServer server;
private static final List<PubsubMessage> messages = Collections.synchronizedList(new ArrayList<>());
class TestMessageReceiver implements MessageReceiver {
@Override
public void receiveMessage(PubsubMessage message, AckReplyConsumer consumer) {
Testing.print("Message arrived: " + message);
messages.add(message);
consumer.ack();
}
}
void setupDependencies(@Observes ConnectorStartedEvent event) throws IOException {
Testing.Print.enable();
TopicPath topicPath = TopicPath.newBuilder()
.setProject(ProjectId.of(projectId))
.setLocation(CloudZone.of(CloudRegion.of(cloudRegion), zoneId))
.setName(TopicName.of(STREAM_NAME))
.build();
Subscription subscription = Subscription.newBuilder()
.setDeliveryConfig(
Subscription.DeliveryConfig.newBuilder()
.setDeliveryRequirement(Subscription.DeliveryConfig.DeliveryRequirement.DELIVER_IMMEDIATELY))
.setName(subscriptionPath.toString())
.setTopic(topicPath.toString())
.build();
AdminClientSettings adminClientSettings = AdminClientSettings.newBuilder().setRegion(CloudRegion.of(cloudRegion)).build();
FlowControlSettings flowControlSettings = FlowControlSettings.builder()
.setBytesOutstanding(10 * 1024 * 1024L)
.setMessagesOutstanding(1000L)
.build();
SubscriberSettings subscriberSettings = SubscriberSettings.newBuilder()
.setSubscriptionPath(subscriptionPath)
.setReceiver(new TestMessageReceiver())
.setPerPartitionFlowControlSettings(flowControlSettings)
.build();
try (AdminClient adminClient = AdminClient.create(adminClientSettings)) {
adminClient.createSubscription(subscription).get();
}
catch (ExecutionException | InterruptedException e) {
Testing.printError(e);
}
Subscriber subscriber = Subscriber.create(subscriberSettings);
subscriber.startAsync().awaitRunning();
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw (Exception) event.getError().get();
}
}
@Test
public void testPubSubLite() {
Awaitility.await()
.atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds()))
.until(() -> messages.size() >= MESSAGE_COUNT);
assertThat(messages.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
}
}

View File

@ -1,39 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pubsub;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class PubSubLiteTestConfigSource extends TestConfigSource {
public PubSubLiteTestConfigSource() {
Map<String, String> pubsubLiteTest = new HashMap<>();
pubsubLiteTest.put("debezium.sink.type", "pubsublite");
pubsubLiteTest.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
pubsubLiteTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
OFFSET_STORE_PATH.toAbsolutePath().toString());
pubsubLiteTest.put("debezium.source.offset.flush.interval.ms", "0");
pubsubLiteTest.put("debezium.source.topic.prefix", "testc");
pubsubLiteTest.put("debezium.source.schema.include.list", "inventory");
pubsubLiteTest.put("debezium.source.table.include.list", "inventory.customers");
config = pubsubLiteTest;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
}

View File

@ -1,38 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pubsub;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class PubSubTestConfigSource extends TestConfigSource {
public PubSubTestConfigSource() {
Map<String, String> pubsubTest = new HashMap<>();
pubsubTest.put("debezium.sink.type", "pubsub");
pubsubTest.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
pubsubTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
OFFSET_STORE_PATH.toAbsolutePath().toString());
pubsubTest.put("debezium.source.offset.flush.interval.ms", "0");
pubsubTest.put("debezium.source.topic.prefix", "testc");
pubsubTest.put("debezium.source.schema.include.list", "inventory");
pubsubTest.put("debezium.source.table.include.list", "inventory.customers");
config = pubsubTest;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
}

View File

@ -1,47 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pubsub;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.testcontainers.containers.PubSubEmulatorContainer;
import org.testcontainers.utility.DockerImageName;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
public class PubSubTestResourceLifecycleManager implements QuarkusTestResourceLifecycleManager {
public PubSubEmulatorContainer emulator = new PubSubEmulatorContainer(
DockerImageName.parse("gcr.io/google.com/cloudsdktool/cloud-sdk:380.0.0-emulators"));
private static String endpoint;
@Override
public Map<String, String> start() {
emulator.start();
Map<String, String> params = new ConcurrentHashMap<>();
endpoint = emulator.getEmulatorEndpoint();
params.put("debezium.sink.pubsub.address", endpoint);
return params;
}
@Override
public void stop() {
try {
if (emulator != null) {
emulator.stop();
}
}
catch (Exception e) {
// ignored
}
}
public static String getEmulatorEndpoint() {
return endpoint;
}
}

View File

@ -1,39 +0,0 @@
<configuration>
<appender name="CONSOLE"
class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n</pattern>
</encoder>
</appender>
<root level="warn">
<appender-ref ref="CONSOLE" />
</root>
<!-- Set up the default logging to be INFO level, then override specific
units -->
<logger name="io.debezium" level="info" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.embedded.EmbeddedEngine$EmbeddedConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.converters.CloudEventsConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="org.apache.kafka.connect.json.JsonConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.confluent"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
</configuration>

View File

@ -1,176 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-pulsar</artifactId>
<name>Debezium Server Pulsar Sink Adapter</name>
<packaging>jar</packaging>
<properties>
<pulsar.port.native>6650</pulsar.port.native>
<pulsar.port.http>8080</pulsar.port.http>
</properties>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<!-- Target systems -->
<dependency>
<groupId>org.apache.pulsar</groupId>
<artifactId>pulsar-client</artifactId>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
<pulsar.port.native>${pulsar.port.native}</pulsar.port.native>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
<resources>
<!-- Apply the properties set in the POM to the resource files -->
<resource>
<filtering>true</filtering>
<directory>src/main/resources</directory>
<includes>
<include>**/build.properties</include>
</includes>
</resource>
</resources>
</build>
<profiles>
<profile>
<id>quick</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>quick</name>
</property>
</activation>
<properties>
<skipITs>true</skipITs>
<docker.skip>true</docker.skip>
</properties>
</profile>
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Do not perform any Docker-related functionality
To use, specify "-DskipITs" on the Maven command line.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<profile>
<id>skip-integration-tests</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>skipITs</name>
</property>
</activation>
<properties>
<docker.skip>true</docker.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,154 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pulsar;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.inject.Named;
import org.apache.pulsar.client.api.MessageId;
import org.apache.pulsar.client.api.Producer;
import org.apache.pulsar.client.api.PulsarClient;
import org.apache.pulsar.client.api.PulsarClientException;
import org.apache.pulsar.client.api.Schema;
import org.apache.pulsar.client.api.TypedMessageBuilder;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.server.BaseChangeConsumer;
/**
* Implementation of the consumer that delivers the messages into a Pulsar destination.
*
* @author Jiri Pechanec
*
*/
@Named("pulsar")
@Dependent
public class PulsarChangeConsumer extends BaseChangeConsumer implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(PulsarChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.pulsar.";
private static final String PROP_CLIENT_PREFIX = PROP_PREFIX + "client.";
private static final String PROP_PRODUCER_PREFIX = PROP_PREFIX + "producer.";
public interface ProducerBuilder {
Producer<Object> get(String topicName, Object value);
}
private final Map<String, Producer<?>> producers = new HashMap<>();
private PulsarClient pulsarClient;
private Map<String, Object> producerConfig;
@ConfigProperty(name = PROP_PREFIX + "null.key", defaultValue = "default")
String nullKey;
@ConfigProperty(name = PROP_PREFIX + "tenant", defaultValue = "public")
String pulsarTenant;
@ConfigProperty(name = PROP_PREFIX + "namespace", defaultValue = "default")
String pulsarNamespace;
@PostConstruct
void connect() {
final Config config = ConfigProvider.getConfig();
try {
pulsarClient = PulsarClient.builder()
.loadConf(getConfigSubset(config, PROP_CLIENT_PREFIX))
.build();
}
catch (PulsarClientException e) {
throw new DebeziumException(e);
}
producerConfig = getConfigSubset(config, PROP_PRODUCER_PREFIX);
}
@PreDestroy
void close() {
producers.values().forEach(producer -> {
try {
producer.close();
}
catch (Exception e) {
LOGGER.warn("Exception while closing producer: {}", e);
}
});
try {
pulsarClient.close();
}
catch (Exception e) {
LOGGER.warn("Exception while closing client: {}", e);
}
}
private Producer<?> createProducer(String topicName, Object value) {
final String topicFullName = pulsarTenant + "/" + pulsarNamespace + "/" + topicName;
try {
if (value instanceof String) {
return pulsarClient.newProducer(Schema.STRING)
.loadConf(producerConfig)
.topic(topicFullName)
.create();
}
else {
return pulsarClient.newProducer()
.loadConf(producerConfig)
.topic(topicFullName)
.create();
}
}
catch (PulsarClientException e) {
throw new DebeziumException(e);
}
}
@SuppressWarnings("unchecked")
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
for (ChangeEvent<Object, Object> record : records) {
LOGGER.trace("Received event '{}'", record);
final String topicName = streamNameMapper.map(record.destination());
final Producer<?> producer = producers.computeIfAbsent(topicName, (topic) -> createProducer(topic, record.value()));
final String key = (record.key()) == null ? nullKey : getString(record.key());
@SuppressWarnings("rawtypes")
final TypedMessageBuilder message;
if (record.value() instanceof String) {
message = producer.newMessage(Schema.STRING);
}
else {
message = producer.newMessage();
}
message
.key(key)
.value(record.value());
try {
final MessageId messageId = message.send();
LOGGER.trace("Sent message with id: {}", messageId);
}
catch (PulsarClientException e) {
throw new DebeziumException(e);
}
committer.markProcessed(record);
}
committer.markBatchFinished();
}
}

View File

@ -1,124 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pulsar;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import javax.enterprise.event.Observes;
import org.apache.pulsar.client.api.Consumer;
import org.apache.pulsar.client.api.Message;
import org.apache.pulsar.client.api.PulsarClient;
import org.apache.pulsar.client.api.Schema;
import org.awaitility.Awaitility;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.junit.jupiter.api.Test;
import io.debezium.connector.postgresql.connection.PostgresConnection;
import io.debezium.jdbc.JdbcConfiguration;
import io.debezium.server.TestConfigSource;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.server.events.ConnectorStartedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
/**
* Integration test that verifies basic reading from PostgreSQL database and writing to an Apache Pulsar topic.
*
* @author Jiri Pechanec
*/
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@QuarkusTestResource(PulsarTestResourceLifecycleManager.class)
public class PulsarIT {
private static final int MESSAGE_COUNT = 4;
private static final String TOPIC_NAME = "testc.inventory.customers";
private static final String NOKEY_TOPIC_NAME = "testc.inventory.nokey";
@ConfigProperty(name = "debezium.source.database.hostname")
String dbHostname;
@ConfigProperty(name = "debezium.source.database.port")
String dbPort;
@ConfigProperty(name = "debezium.source.database.user")
String dbUser;
@ConfigProperty(name = "debezium.source.database.password")
String dbPassword;
@ConfigProperty(name = "debezium.source.database.dbname")
String dbName;
protected static PulsarClient pulsarClient;
{
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(PulsarTestConfigSource.OFFSET_STORE_PATH);
}
void setupDependencies(@Observes ConnectorStartedEvent event) throws IOException {
Testing.Print.enable();
pulsarClient = PulsarClient.builder()
.serviceUrl(PulsarTestResourceLifecycleManager.getPulsarServiceUrl())
.build();
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw new RuntimeException(event.getError().get());
}
}
@Test
public void testPulsar() throws Exception {
Awaitility.await().atMost(Duration.ofSeconds(PulsarTestConfigSource.waitForSeconds())).until(() -> {
return pulsarClient != null;
});
final Consumer<String> consumer = pulsarClient.newConsumer(Schema.STRING)
.topic(TOPIC_NAME)
.subscriptionName("test-" + UUID.randomUUID())
.subscribe();
final List<Message<String>> records = new ArrayList<>();
Awaitility.await().atMost(Duration.ofSeconds(PulsarTestConfigSource.waitForSeconds())).until(() -> {
records.add(consumer.receive());
return records.size() >= MESSAGE_COUNT;
});
final JdbcConfiguration config = JdbcConfiguration.create()
.with("hostname", dbHostname)
.with("port", dbPort)
.with("user", dbUser)
.with("password", dbPassword)
.with("dbname", dbName)
.build();
try (PostgresConnection connection = new PostgresConnection(config, "Debezium Pulsar Test")) {
connection.execute(
"CREATE TABLE inventory.nokey (val INT);",
"INSERT INTO inventory.nokey VALUES (1)",
"INSERT INTO inventory.nokey VALUES (2)",
"INSERT INTO inventory.nokey VALUES (3)",
"INSERT INTO inventory.nokey VALUES (4)");
}
final Consumer<String> nokeyConsumer = pulsarClient.newConsumer(Schema.STRING)
.topic(NOKEY_TOPIC_NAME)
.subscriptionName("test-" + UUID.randomUUID())
.subscribe();
final List<Message<String>> nokeyRecords = new ArrayList<>();
Awaitility.await().atMost(Duration.ofSeconds(PulsarTestConfigSource.waitForSeconds())).until(() -> {
nokeyRecords.add(nokeyConsumer.receive());
return nokeyRecords.size() >= MESSAGE_COUNT;
});
}
}

View File

@ -1,38 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pulsar;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class PulsarTestConfigSource extends TestConfigSource {
public PulsarTestConfigSource() {
Map<String, String> pulsarTest = new HashMap<>();
pulsarTest.put("debezium.sink.type", "pulsar");
pulsarTest.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
pulsarTest.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
OFFSET_STORE_PATH.toAbsolutePath().toString());
pulsarTest.put("debezium.source.offset.flush.interval.ms", "0");
pulsarTest.put("debezium.source.topic.prefix", "testc");
pulsarTest.put("debezium.source.schema.include.list", "inventory");
pulsarTest.put("debezium.source.table.include.list", "inventory.customers,inventory.nokey");
config = pulsarTest;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
}

View File

@ -1,58 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.pulsar;
import java.time.Duration;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.testcontainers.containers.BindMode;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.wait.strategy.Wait;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
public class PulsarTestResourceLifecycleManager implements QuarkusTestResourceLifecycleManager {
private static final String PULSAR_VERSION = "2.5.2";
public static final int PULSAR_PORT = 6650;
public static final int PULSAR_HTTP_PORT = 8080;
public static final String PULSAR_IMAGE = "apachepulsar/pulsar:" + PULSAR_VERSION;
private static final GenericContainer<?> container = new GenericContainer<>(PULSAR_IMAGE)
.withStartupTimeout(Duration.ofSeconds(90))
.waitingFor(Wait.forLogMessage(".*messaging service is ready.*", 1))
.withCommand("bin/pulsar", "standalone")
.withClasspathResourceMapping("/docker/conf/", "/pulsar/conf", BindMode.READ_ONLY)
.withExposedPorts(PULSAR_PORT, PULSAR_HTTP_PORT);
@Override
public Map<String, String> start() {
container.start();
Map<String, String> params = new ConcurrentHashMap<>();
params.put("debezium.sink.pulsar.client.serviceUrl", getPulsarServiceUrl());
return params;
}
@Override
public void stop() {
try {
if (container != null) {
container.stop();
}
}
catch (Exception e) {
// ignored
}
}
public static String getPulsarServiceUrl() {
return "pulsar://localhost:" + container.getMappedPort(PULSAR_PORT);
}
}

View File

@ -1,54 +0,0 @@
#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Set JAVA_HOME here to override the environment setting
# JAVA_HOME=
# default settings for starting bookkeeper
# Configuration file of settings used in bookie server
BOOKIE_CONF=${BOOKIE_CONF:-"$BK_HOME/conf/bookkeeper.conf"}
# Log4j configuration file
# BOOKIE_LOG_CONF=
# Logs location
# BOOKIE_LOG_DIR=
# Memory size options
BOOKIE_MEM=${BOOKIE_MEM:-${PULSAR_MEM:-"-Xms2g -Xmx2g -XX:MaxDirectMemorySize=2g"}}
# Garbage collection options
BOOKIE_GC=${BOOKIE_GC:-"-XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB"}
# Extra options to be passed to the jvm
BOOKIE_EXTRA_OPTS="${BOOKIE_EXTRA_OPTS} ${BOOKIE_MEM} ${BOOKIE_GC} -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.maxCapacity.default=1000 -Dio.netty.recycler.linkCapacity=1024"
# Add extra paths to the bookkeeper classpath
# BOOKIE_EXTRA_CLASSPATH=
#Folder where the Bookie server PID file should be stored
#BOOKIE_PID_DIR=
#Wait time before forcefully kill the Bookie server instance, if the stop is not successful
#BOOKIE_STOP_TIMEOUT=
#Entry formatter class to format entries.
#ENTRY_FORMATTER_CLASS=

View File

@ -1,663 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
## Bookie settings
#############################################################################
## Server parameters
#############################################################################
# Port that bookie server listen on
bookiePort=3181
# Directories BookKeeper outputs its write ahead log.
# Could define multi directories to store write head logs, separated by ','.
# For example:
# journalDirectories=/tmp/bk-journal1,/tmp/bk-journal2
# If journalDirectories is set, bookies will skip journalDirectory and use
# this setting directory.
# journalDirectories=/tmp/bk-journal
# Directory Bookkeeper outputs its write ahead log
# @deprecated since 4.5.0. journalDirectories is preferred over journalDirectory.
journalDirectory=data/bookkeeper/journal
# Configure the bookie to allow/disallow multiple ledger/index/journal directories
# in the same filesystem disk partition
# allowMultipleDirsUnderSameDiskPartition=false
# Minimum safe Usable size to be available in index directory for bookie to create
# Index File while replaying journal at the time of bookie Start in Readonly Mode (in bytes)
minUsableSizeForIndexFileCreation=1073741824
# Set the network interface that the bookie should listen on.
# If not set, the bookie will listen on all interfaces.
# listeningInterface=eth0
# Configure a specific hostname or IP address that the bookie should use to advertise itself to
# clients. If not set, bookie will advertised its own IP address or hostname, depending on the
# listeningInterface and useHostNameAsBookieID settings.
advertisedAddress=
# Whether the bookie allowed to use a loopback interface as its primary
# interface(i.e. the interface it uses to establish its identity)?
# By default, loopback interfaces are not allowed as the primary
# interface.
# Using a loopback interface as the primary interface usually indicates
# a configuration error. For example, its fairly common in some VPS setups
# to not configure a hostname, or to have the hostname resolve to
# 127.0.0.1. If this is the case, then all bookies in the cluster will
# establish their identities as 127.0.0.1:3181, and only one will be able
# to join the cluster. For VPSs configured like this, you should explicitly
# set the listening interface.
allowLoopback=false
# Interval to watch whether bookie is dead or not, in milliseconds
bookieDeathWatchInterval=1000
# When entryLogPerLedgerEnabled is enabled, checkpoint doesn't happens
# when a new active entrylog is created / previous one is rolled over.
# Instead SyncThread checkpoints periodically with 'flushInterval' delay
# (in milliseconds) in between executions. Checkpoint flushes both ledger
# entryLogs and ledger index pages to disk.
# Flushing entrylog and index files will introduce much random disk I/O.
# If separating journal dir and ledger dirs each on different devices,
# flushing would not affect performance. But if putting journal dir
# and ledger dirs on same device, performance degrade significantly
# on too frequent flushing. You can consider increment flush interval
# to get better performance, but you need to pay more time on bookie
# server restart after failure.
# This config is used only when entryLogPerLedgerEnabled is enabled.
flushInterval=60000
# Allow the expansion of bookie storage capacity. Newly added ledger
# and index dirs must be empty.
# allowStorageExpansion=false
# Whether the bookie should use its hostname to register with the
# co-ordination service(eg: Zookeeper service).
# When false, bookie will use its ipaddress for the registration.
# Defaults to false.
useHostNameAsBookieID=false
# Whether the bookie is allowed to use an ephemeral port (port 0) as its
# server port. By default, an ephemeral port is not allowed.
# Using an ephemeral port as the service port usually indicates a configuration
# error. However, in unit tests, using an ephemeral port will address port
# conflict problems and allow running tests in parallel.
# allowEphemeralPorts=false
# Whether allow the bookie to listen for BookKeeper clients executed on the local JVM.
# enableLocalTransport=false
# Whether allow the bookie to disable bind on network interfaces,
# this bookie will be available only to BookKeeper clients executed on the local JVM.
# disableServerSocketBind=false
# The number of bytes we should use as chunk allocation for
# org.apache.bookkeeper.bookie.SkipListArena
# skipListArenaChunkSize=4194304
# The max size we should allocate from the skiplist arena. Allocations
# larger than this should be allocated directly by the VM to avoid fragmentation.
# skipListArenaMaxAllocSize=131072
# The bookie authentication provider factory class name.
# If this is null, no authentication will take place.
# bookieAuthProviderFactoryClass=null
#############################################################################
## Garbage collection settings
#############################################################################
# How long the interval to trigger next garbage collection, in milliseconds
# Since garbage collection is running in background, too frequent gc
# will heart performance. It is better to give a higher number of gc
# interval if there is enough disk capacity.
gcWaitTime=900000
# How long the interval to trigger next garbage collection of overreplicated
# ledgers, in milliseconds [Default: 1 day]. This should not be run very frequently
# since we read the metadata for all the ledgers on the bookie from zk
gcOverreplicatedLedgerWaitTime=86400000
# Number of threads that should handle write requests. if zero, the writes would
# be handled by netty threads directly.
numAddWorkerThreads=0
# Number of threads that should handle read requests. if zero, the reads would
# be handled by netty threads directly.
numReadWorkerThreads=8
# Number of threads that should be used for high priority requests
# (i.e. recovery reads and adds, and fencing).
numHighPriorityWorkerThreads=8
# If read workers threads are enabled, limit the number of pending requests, to
# avoid the executor queue to grow indefinitely
maxPendingReadRequestsPerThread=2500
# If add workers threads are enabled, limit the number of pending requests, to
# avoid the executor queue to grow indefinitely
maxPendingAddRequestsPerThread=10000
# Whether force compaction is allowed when the disk is full or almost full.
# Forcing GC may get some space back, but may also fill up disk space more quickly.
# This is because new log files are created before GC, while old garbage
# log files are deleted after GC.
# isForceGCAllowWhenNoSpace=false
# True if the bookie should double check readMetadata prior to gc
# verifyMetadataOnGC=false
#############################################################################
## TLS settings
#############################################################################
# TLS Provider (JDK or OpenSSL).
# tlsProvider=OpenSSL
# The path to the class that provides security.
# tlsProviderFactoryClass=org.apache.bookkeeper.security.SSLContextFactory
# Type of security used by server.
# tlsClientAuthentication=true
# Bookie Keystore type.
# tlsKeyStoreType=JKS
# Bookie Keystore location (path).
# tlsKeyStore=null
# Bookie Keystore password path, if the keystore is protected by a password.
# tlsKeyStorePasswordPath=null
# Bookie Truststore type.
# tlsTrustStoreType=null
# Bookie Truststore location (path).
# tlsTrustStore=null
# Bookie Truststore password path, if the trust store is protected by a password.
# tlsTrustStorePasswordPath=null
#############################################################################
## Long poll request parameter settings
#############################################################################
# The number of threads that should handle long poll requests.
# numLongPollWorkerThreads=10
# The tick duration in milliseconds for long poll requests.
# requestTimerTickDurationMs=10
# The number of ticks per wheel for the long poll request timer.
# requestTimerNumTicks=1024
#############################################################################
## AutoRecovery settings
#############################################################################
# The interval between auditor bookie checks.
# The auditor bookie check, checks ledger metadata to see which bookies should
# contain entries for each ledger. If a bookie which should contain entries is
# unavailable, then the ledger containing that entry is marked for recovery.
# Setting this to 0 disabled the periodic check. Bookie checks will still
# run when a bookie fails.
# The interval is specified in seconds.
auditorPeriodicBookieCheckInterval=86400
# The number of entries that a replication will rereplicate in parallel.
rereplicationEntryBatchSize=100
# Auto-replication
# The grace period, in seconds, that the replication worker waits before fencing and
# replicating a ledger fragment that's still being written to upon bookie failure.
openLedgerRereplicationGracePeriod=30
# Whether the bookie itself can start auto-recovery service also or not
autoRecoveryDaemonEnabled=true
# How long to wait, in seconds, before starting auto recovery of a lost bookie
lostBookieRecoveryDelay=0
#############################################################################
## Netty server settings
#############################################################################
# This settings is used to enabled/disabled Nagle's algorithm, which is a means of
# improving the efficiency of TCP/IP networks by reducing the number of packets
# that need to be sent over the network.
# If you are sending many small messages, such that more than one can fit in
# a single IP packet, setting server.tcpnodelay to false to enable Nagle algorithm
# can provide better performance.
# Default value is true.
serverTcpNoDelay=true
# This setting is used to send keep-alive messages on connection-oriented sockets.
# serverSockKeepalive=true
# The socket linger timeout on close.
# When enabled, a close or shutdown will not return until all queued messages for
# the socket have been successfully sent or the linger timeout has been reached.
# Otherwise, the call returns immediately and the closing is done in the background.
# serverTcpLinger=0
# The Recv ByteBuf allocator initial buf size.
# byteBufAllocatorSizeInitial=65536
# The Recv ByteBuf allocator min buf size.
# byteBufAllocatorSizeMin=65536
# The Recv ByteBuf allocator max buf size.
# byteBufAllocatorSizeMax=1048576
#############################################################################
## Journal settings
#############################################################################
# The journal format version to write.
# Available formats are 1-6:
# 1: no header
# 2: a header section was added
# 3: ledger key was introduced
# 4: fencing key was introduced
# 5: expanding header to 512 and padding writes to align sector size configured by `journalAlignmentSize`
# 6: persisting explicitLac is introduced
# By default, it is `6`.
# If you'd like to disable persisting ExplicitLac, you can set this config to < `6` and also
# fileInfoFormatVersionToWrite should be set to 0. If there is mismatch then the serverconfig is considered invalid.
# You can disable `padding-writes` by setting journal version back to `4`. This feature is available in 4.5.0
# and onward versions.
journalFormatVersionToWrite=5
# Max file size of journal file, in mega bytes
# A new journal file will be created when the old one reaches the file size limitation
journalMaxSizeMB=2048
# Max number of old journal file to kept
# Keep a number of old journal files would help data recovery in specia case
journalMaxBackups=5
# How much space should we pre-allocate at a time in the journal.
journalPreAllocSizeMB=16
# Size of the write buffers used for the journal
journalWriteBufferSizeKB=64
# Should we remove pages from page cache after force write
journalRemoveFromPageCache=true
# Should the data be fsynced on journal before acknowledgment.
# By default, data sync is enabled to guarantee durability of writes.
# Beware: while disabling data sync in the Bookie journal might improve the bookie write performance, it will also
# introduce the possibility of data loss. With no sync, the journal entries are written in the OS page cache but
# not flushed to disk. In case of power failure, the affected bookie might lose the unflushed data. If the ledger
# is replicated to multiple bookies, the chances of data loss are reduced though still present.
journalSyncData=true
# Should we group journal force writes, which optimize group commit
# for higher throughput
journalAdaptiveGroupWrites=true
# Maximum latency to impose on a journal write to achieve grouping
journalMaxGroupWaitMSec=1
# Maximum writes to buffer to achieve grouping
journalBufferedWritesThreshold=524288
# The number of threads that should handle journal callbacks
numJournalCallbackThreads=8
# All the journal writes and commits should be aligned to given size.
# If not, zeros will be padded to align to given size.
# It only takes effects when journalFormatVersionToWrite is set to 5
journalAlignmentSize=4096
# Maximum entries to buffer to impose on a journal write to achieve grouping.
# journalBufferedEntriesThreshold=0
# If we should flush the journal when journal queue is empty
journalFlushWhenQueueEmpty=false
#############################################################################
## Ledger storage settings
#############################################################################
# Ledger storage implementation class
ledgerStorageClass=org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage
# Directory Bookkeeper outputs ledger snapshots
# could define multi directories to store snapshots, separated by ','
# For example:
# ledgerDirectories=/tmp/bk1-data,/tmp/bk2-data
#
# Ideally ledger dirs and journal dir are each in a differet device,
# which reduce the contention between random i/o and sequential write.
# It is possible to run with a single disk, but performance will be significantly lower.
ledgerDirectories=data/bookkeeper/ledgers
# Directories to store index files. If not specified, will use ledgerDirectories to store.
# indexDirectories=data/bookkeeper/ledgers
# Interval at which the auditor will do a check of all ledgers in the cluster.
# By default this runs once a week. The interval is set in seconds.
# To disable the periodic check completely, set this to 0.
# Note that periodic checking will put extra load on the cluster, so it should
# not be run more frequently than once a day.
auditorPeriodicCheckInterval=604800
# Whether sorted-ledger storage enabled (default true)
# sortedLedgerStorageEnabled=ture
# The skip list data size limitation (default 64MB) in EntryMemTable
# skipListSizeLimit=67108864L
#############################################################################
## Ledger cache settings
#############################################################################
# Max number of ledger index files could be opened in bookie server
# If number of ledger index files reaches this limitation, bookie
# server started to swap some ledgers from memory to disk.
# Too frequent swap will affect performance. You can tune this number
# to gain performance according your requirements.
openFileLimit=0
# The fileinfo format version to write.
# Available formats are 0-1:
# 0: Initial version
# 1: persisting explicitLac is introduced
# By default, it is `1`.
# If you'd like to disable persisting ExplicitLac, you can set this config to 0 and
# also journalFormatVersionToWrite should be set to < 6. If there is mismatch then the
# serverconfig is considered invalid.
fileInfoFormatVersionToWrite=0
# Size of a index page in ledger cache, in bytes
# A larger index page can improve performance writing page to disk,
# which is efficent when you have small number of ledgers and these
# ledgers have similar number of entries.
# If you have large number of ledgers and each ledger has fewer entries,
# smaller index page would improve memory usage.
# pageSize=8192
# How many index pages provided in ledger cache
# If number of index pages reaches this limitation, bookie server
# starts to swap some ledgers from memory to disk. You can increment
# this value when you found swap became more frequent. But make sure
# pageLimit*pageSize should not more than JVM max memory limitation,
# otherwise you would got OutOfMemoryException.
# In general, incrementing pageLimit, using smaller index page would
# gain bettern performance in lager number of ledgers with fewer entries case
# If pageLimit is -1, bookie server will use 1/3 of JVM memory to compute
# the limitation of number of index pages.
pageLimit=0
#############################################################################
## Ledger manager settings
#############################################################################
# Ledger Manager Class
# What kind of ledger manager is used to manage how ledgers are stored, managed
# and garbage collected. Try to read 'BookKeeper Internals' for detail info.
# ledgerManagerFactoryClass=org.apache.bookkeeper.meta.HierarchicalLedgerManagerFactory
# @Drepcated - `ledgerManagerType` is deprecated in favor of using `ledgerManagerFactoryClass`.
# ledgerManagerType=hierarchical
# Root Zookeeper path to store ledger metadata
# This parameter is used by zookeeper-based ledger manager as a root znode to
# store all ledgers.
zkLedgersRootPath=/ledgers
#############################################################################
## Entry log settings
#############################################################################
# Max file size of entry logger, in bytes
# A new entry log file will be created when the old one reaches the file size limitation
logSizeLimit=1073741824
# Enable/Disable entry logger preallocation
entryLogFilePreallocationEnabled=true
# Entry log flush interval in bytes.
# Default is 0. 0 or less disables this feature and effectively flush
# happens on log rotation.
# Flushing in smaller chunks but more frequently reduces spikes in disk
# I/O. Flushing too frequently may also affect performance negatively.
# flushEntrylogBytes=0
# The number of bytes we should use as capacity for BufferedReadChannel. Default is 512 bytes.
readBufferSizeBytes=4096
# The number of bytes used as capacity for the write buffer. Default is 64KB.
writeBufferSizeBytes=65536
# Specifies if entryLog per ledger is enabled/disabled. If it is enabled, then there would be a
# active entrylog for each ledger. It would be ideal to enable this feature if the underlying
# storage device has multiple DiskPartitions or SSD and if in a given moment, entries of fewer
# number of active ledgers are written to a bookie.
# entryLogPerLedgerEnabled=false
#############################################################################
## Entry log compaction settings
#############################################################################
# Set the rate at which compaction will readd entries. The unit is adds per second.
compactionRate=1000
# If bookie is using hostname for registration and in ledger metadata then
# whether to use short hostname or FQDN hostname. Defaults to false.
# useShortHostName=false
# Threshold of minor compaction
# For those entry log files whose remaining size percentage reaches below
# this threshold will be compacted in a minor compaction.
# If it is set to less than zero, the minor compaction is disabled.
minorCompactionThreshold=0.2
# Interval to run minor compaction, in seconds
# If it is set to less than zero, the minor compaction is disabled.
# Note: should be greater than gcWaitTime.
minorCompactionInterval=3600
# Set the maximum number of entries which can be compacted without flushing.
# When compacting, the entries are written to the entrylog and the new offsets
# are cached in memory. Once the entrylog is flushed the index is updated with
# the new offsets. This parameter controls the number of entries added to the
# entrylog before a flush is forced. A higher value for this parameter means
# more memory will be used for offsets. Each offset consists of 3 longs.
# This parameter should _not_ be modified unless you know what you're doing.
# The default is 100,000.
compactionMaxOutstandingRequests=100000
# Threshold of major compaction
# For those entry log files whose remaining size percentage reaches below
# this threshold will be compacted in a major compaction.
# Those entry log files whose remaining size percentage is still
# higher than the threshold will never be compacted.
# If it is set to less than zero, the minor compaction is disabled.
majorCompactionThreshold=0.5
# Interval to run major compaction, in seconds
# If it is set to less than zero, the major compaction is disabled.
# Note: should be greater than gcWaitTime.
majorCompactionInterval=86400
# Throttle compaction by bytes or by entries.
isThrottleByBytes=false
# Set the rate at which compaction will readd entries. The unit is adds per second.
compactionRateByEntries=1000
# Set the rate at which compaction will readd entries. The unit is bytes added per second.
compactionRateByBytes=1000000
#############################################################################
## Statistics
#############################################################################
# Whether statistics are enabled
# enableStatistics=true
# Stats Provider Class (if statistics are enabled)
statsProviderClass=org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
# Default port for Prometheus metrics exporter
prometheusStatsHttpPort=8000
#############################################################################
## Read-only mode support
#############################################################################
# If all ledger directories configured are full, then support only read requests for clients.
# If "readOnlyModeEnabled=true" then on all ledger disks full, bookie will be converted
# to read-only mode and serve only read requests. Otherwise the bookie will be shutdown.
# By default this will be disabled.
readOnlyModeEnabled=true
# Whether the bookie is force started in read only mode or not
# forceReadOnlyBookie=false
# Persiste the bookie status locally on the disks. So the bookies can keep their status upon restarts
# @Since 4.6
# persistBookieStatusEnabled=false
#############################################################################
## Disk utilization
#############################################################################
# For each ledger dir, maximum disk space which can be used.
# Default is 0.95f. i.e. 95% of disk can be used at most after which nothing will
# be written to that partition. If all ledger dir partions are full, then bookie
# will turn to readonly mode if 'readOnlyModeEnabled=true' is set, else it will
# shutdown.
# Valid values should be in between 0 and 1 (exclusive).
diskUsageThreshold=0.95
# The disk free space low water mark threshold.
# Disk is considered full when usage threshold is exceeded.
# Disk returns back to non-full state when usage is below low water mark threshold.
# This prevents it from going back and forth between these states frequently
# when concurrent writes and compaction are happening. This also prevent bookie from
# switching frequently between read-only and read-writes states in the same cases.
# diskUsageWarnThreshold=0.95
# Set the disk free space low water mark threshold. Disk is considered full when
# usage threshold is exceeded. Disk returns back to non-full state when usage is
# below low water mark threshold. This prevents it from going back and forth
# between these states frequently when concurrent writes and compaction are
# happening. This also prevent bookie from switching frequently between
# read-only and read-writes states in the same cases.
# diskUsageLwmThreshold=0.90
# Disk check interval in milli seconds, interval to check the ledger dirs usage.
# Default is 10000
diskCheckInterval=10000
#############################################################################
## ZooKeeper parameters
#############################################################################
# A list of one of more servers on which Zookeeper is running.
# The server list can be comma separated values, for example:
# zkServers=zk1:2181,zk2:2181,zk3:2181
zkServers=localhost:2181
# ZooKeeper client session timeout in milliseconds
# Bookie server will exit if it received SESSION_EXPIRED because it
# was partitioned off from ZooKeeper for more than the session timeout
# JVM garbage collection, disk I/O will cause SESSION_EXPIRED.
# Increment this value could help avoiding this issue
zkTimeout=30000
# The Zookeeper client backoff retry start time in millis.
# zkRetryBackoffStartMs=1000
# The Zookeeper client backoff retry max time in millis.
# zkRetryBackoffMaxMs=10000
# Set ACLs on every node written on ZooKeeper, this way only allowed users
# will be able to read and write BookKeeper metadata stored on ZooKeeper.
# In order to make ACLs work you need to setup ZooKeeper JAAS authentication
# all the bookies and Client need to share the same user, and this is usually
# done using Kerberos authentication. See ZooKeeper documentation
zkEnableSecurity=false
#############################################################################
## Server parameters
#############################################################################
# The flag enables/disables starting the admin http server. Default value is 'false'.
httpServerEnabled=false
# The http server port to listen on. Default value is 8080.
# Use `8000` as the port to keep it consistent with prometheus stats provider
httpServerPort=8000
# The http server class
httpServerClass=org.apache.bookkeeper.http.vertx.VertxHttpServer
# Configure a list of server components to enable and load on a bookie server.
# This provides the plugin run extra services along with a bookie server.
#
# extraServerComponents=
#############################################################################
## DB Ledger storage configuration
#############################################################################
# These configs are used when the selected 'ledgerStorageClass' is
# org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage
# Size of Write Cache. Memory is allocated from JVM direct memory.
# Write cache is used to buffer entries before flushing into the entry log
# For good performance, it should be big enough to hold a substantial amount
# of entries in the flush interval
# By default it will be allocated to 1/4th of the available direct memory
dbStorage_writeCacheMaxSizeMb=
# Size of Read cache. Memory is allocated from JVM direct memory.
# This read cache is pre-filled doing read-ahead whenever a cache miss happens
# By default it will be allocated to 1/4th of the available direct memory
dbStorage_readAheadCacheMaxSizeMb=
# How many entries to pre-fill in cache after a read cache miss
dbStorage_readAheadCacheBatchSize=1000
## RocksDB specific configurations
## DbLedgerStorage uses RocksDB to store the indexes from
## (ledgerId, entryId) -> (entryLog, offset)
# Size of RocksDB block-cache. For best performance, this cache
# should be big enough to hold a significant portion of the index
# database which can reach ~2GB in some cases
# Default is to use 10% of the direct memory size
dbStorage_rocksDB_blockCacheSize=
# Other RocksDB specific tunables
dbStorage_rocksDB_writeBufferSizeMB=64
dbStorage_rocksDB_sstSizeInMB=64
dbStorage_rocksDB_blockSize=65536
dbStorage_rocksDB_bloomFilterBitsPerKey=10
dbStorage_rocksDB_numLevels=-1
dbStorage_rocksDB_numFilesInLevel0=4
dbStorage_rocksDB_maxSizeInLevel1MB=256

View File

@ -1,936 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
### --- General broker settings --- ###
# Zookeeper quorum connection string
zookeeperServers=
# Configuration Store connection string
configurationStoreServers=
# Broker data port
brokerServicePort=6650
# Broker data port for TLS - By default TLS is disabled
brokerServicePortTls=
# Port to use to server HTTP request
webServicePort=8080
# Port to use to server HTTPS request - By default TLS is disabled
webServicePortTls=
# Hostname or IP address the service binds on, default is 0.0.0.0.
bindAddress=0.0.0.0
# Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getHostName() is used.
advertisedAddress=
# Number of threads to use for Netty IO. Default is set to 2 * Runtime.getRuntime().availableProcessors()
numIOThreads=
# Number of threads to use for HTTP requests processing. Default is set to 2 * Runtime.getRuntime().availableProcessors()
numHttpServerThreads=
# Flag to control features that are meant to be used when running in standalone mode
isRunningStandalone=
# Name of the cluster to which this broker belongs to
clusterName=
# Enable cluster's failure-domain which can distribute brokers into logical region
failureDomainsEnabled=false
# Zookeeper session timeout in milliseconds
zooKeeperSessionTimeoutMillis=30000
# ZooKeeper operation timeout in seconds
zooKeeperOperationTimeoutSeconds=30
# Time to wait for broker graceful shutdown. After this time elapses, the process will be killed
brokerShutdownTimeoutMs=60000
# Enable backlog quota check. Enforces action on topic when the quota is reached
backlogQuotaCheckEnabled=true
# How often to check for topics that have reached the quota
backlogQuotaCheckIntervalInSeconds=60
# Default per-topic backlog quota limit, less than 0 means no limitation. default is -1.
backlogQuotaDefaultLimitGB=-1
# Default backlog quota retention policy. Default is producer_request_hold
# 'producer_request_hold' Policy which holds producer's send request until the resource becomes available (or holding times out)
# 'producer_exception' Policy which throws javax.jms.ResourceAllocationException to the producer
# 'consumer_backlog_eviction' Policy which evicts the oldest message from the slowest consumer's backlog
backlogQuotaDefaultRetentionPolicy=producer_request_hold
# Default ttl for namespaces if ttl is not already configured at namespace policies. (disable default-ttl with value 0)
ttlDurationDefaultInSeconds=0
# Enable topic auto creation if new producer or consumer connected (disable auto creation with value false)
allowAutoTopicCreation=true
# The type of topic that is allowed to be automatically created.(partitioned/non-partitioned)
allowAutoTopicCreationType=non-partitioned
# The number of partitioned topics that is allowed to be automatically created if allowAutoTopicCreationType is partitioned.
defaultNumPartitions=1
# Enable the deletion of inactive topics
brokerDeleteInactiveTopicsEnabled=true
# How often to check for inactive topics
brokerDeleteInactiveTopicsFrequencySeconds=60
# Set the inactive topic delete mode. Default is delete_when_no_subscriptions
# 'delete_when_no_subscriptions' mode only delete the topic which has no subscriptions and no active producers
# 'delete_when_subscriptions_caught_up' mode only delete the topic that all subscriptions has no backlogs(caught up)
# and no active producers/consumers
brokerDeleteInactiveTopicsMode=delete_when_no_subscriptions
# Max duration of topic inactivity in seconds, default is not present
# If not present, 'brokerDeleteInactiveTopicsFrequencySeconds' will be used
# Topics that are inactive for longer than this value will be deleted
brokerDeleteInactiveTopicsMaxInactiveDurationSeconds=
# How frequently to proactively check and purge expired messages
messageExpiryCheckIntervalInMinutes=5
# How long to delay rewinding cursor and dispatching messages when active consumer is changed
activeConsumerFailoverDelayTimeMillis=1000
# How long to delete inactive subscriptions from last consuming
# When it is 0, inactive subscriptions are not deleted automatically
subscriptionExpirationTimeMinutes=0
# Enable subscription message redelivery tracker to send redelivery count to consumer (default is enabled)
subscriptionRedeliveryTrackerEnabled=true
# How frequently to proactively check and purge expired subscription
subscriptionExpiryCheckIntervalInMinutes=5
# Enable Key_Shared subscription (default is enabled)
subscriptionKeySharedEnable=true
# Set the default behavior for message deduplication in the broker
# This can be overridden per-namespace. If enabled, broker will reject
# messages that were already stored in the topic
brokerDeduplicationEnabled=false
# Maximum number of producer information that it's going to be
# persisted for deduplication purposes
brokerDeduplicationMaxNumberOfProducers=10000
# Number of entries after which a dedup info snapshot is taken.
# A larger interval will lead to fewer snapshots being taken, though it would
# increase the topic recovery time when the entries published after the
# snapshot need to be replayed.
brokerDeduplicationEntriesInterval=1000
# Time of inactivity after which the broker will discard the deduplication information
# relative to a disconnected producer. Default is 6 hours.
brokerDeduplicationProducerInactivityTimeoutMinutes=360
# When a namespace is created without specifying the number of bundle, this
# value will be used as the default
defaultNumberOfNamespaceBundles=4
# Enable check for minimum allowed client library version
clientLibraryVersionCheckEnabled=false
# Path for the file used to determine the rotation status for the broker when responding
# to service discovery health checks
statusFilePath=
# If true, (and ModularLoadManagerImpl is being used), the load manager will attempt to
# use only brokers running the latest software version (to minimize impact to bundles)
preferLaterVersions=false
# Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending
# messages to consumer once, this limit reaches until consumer starts acknowledging messages back.
# Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restriction
maxUnackedMessagesPerConsumer=50000
# Max number of unacknowledged messages allowed per shared subscription. Broker will stop dispatching messages to
# all consumers of the subscription once this limit reaches until consumer starts acknowledging messages back and
# unack count reaches to limit/2. Using a value of 0, is disabling unackedMessage-limit
# check and dispatcher can dispatch messages without any restriction
maxUnackedMessagesPerSubscription=200000
# Max number of unacknowledged messages allowed per broker. Once this limit reaches, broker will stop dispatching
# messages to all shared subscription which has higher number of unack messages until subscriptions start
# acknowledging messages back and unack count reaches to limit/2. Using a value of 0, is disabling
# unackedMessage-limit check and broker doesn't block dispatchers
maxUnackedMessagesPerBroker=0
# Once broker reaches maxUnackedMessagesPerBroker limit, it blocks subscriptions which has higher unacked messages
# than this percentage limit and subscription will not receive any new messages until that subscription acks back
# limit/2 messages
maxUnackedMessagesPerSubscriptionOnBrokerBlocked=0.16
# Tick time to schedule task that checks topic publish rate limiting across all topics
# Reducing to lower value can give more accuracy while throttling publish but
# it uses more CPU to perform frequent check. (Disable publish throttling with value 0)
topicPublisherThrottlingTickTimeMillis=10
# Tick time to schedule task that checks broker publish rate limiting across all topics
# Reducing to lower value can give more accuracy while throttling publish but
# it uses more CPU to perform frequent check. (Disable publish throttling with value 0)
brokerPublisherThrottlingTickTimeMillis=50
# Max Rate(in 1 seconds) of Message allowed to publish for a broker if broker publish rate limiting enabled
# (Disable message rate limit with value 0)
brokerPublisherThrottlingMaxMessageRate=0
# Max Rate(in 1 seconds) of Byte allowed to publish for a broker if broker publish rate limiting enabled.
# (Disable byte rate limit with value 0)
brokerPublisherThrottlingMaxByteRate=0
# Too many subscribe requests from a consumer can cause broker rewinding consumer cursors and loading data from bookies,
# hence causing high network bandwidth usage
# When the positive value is set, broker will throttle the subscribe requests for one consumer.
# Otherwise, the throttling will be disabled. The default value of this setting is 0 - throttling is disabled.
subscribeThrottlingRatePerConsumer=0
# Rate period for {subscribeThrottlingRatePerConsumer}. Default is 30s.
subscribeRatePeriodPerConsumerInSecond=30
# Default messages per second dispatch throttling-limit for every topic. Using a value of 0, is disabling default
# message dispatch-throttling
dispatchThrottlingRatePerTopicInMsg=0
# Default bytes per second dispatch throttling-limit for every topic. Using a value of 0, is disabling
# default message-byte dispatch-throttling
dispatchThrottlingRatePerTopicInByte=0
# Default number of message dispatching throttling-limit for a subscription.
# Using a value of 0, is disabling default message dispatch-throttling.
dispatchThrottlingRatePerSubscriptionInMsg=0
# Default number of message-bytes dispatching throttling-limit for a subscription.
# Using a value of 0, is disabling default message-byte dispatch-throttling.
dispatchThrottlingRatePerSubscriptionInByte=0
# Default messages per second dispatch throttling-limit for every replicator in replication.
# Using a value of 0, is disabling replication message dispatch-throttling
dispatchThrottlingRatePerReplicatorInMsg=0
# Default bytes per second dispatch throttling-limit for every replicator in replication.
# Using a value of 0, is disabling replication message-byte dispatch-throttling
dispatchThrottlingRatePerReplicatorInByte=0
# Dispatch rate-limiting relative to publish rate.
# (Enabling flag will make broker to dynamically update dispatch-rate relatively to publish-rate:
# throttle-dispatch-rate = (publish-rate + configured dispatch-rate).
dispatchThrottlingRateRelativeToPublishRate=false
# By default we enable dispatch-throttling for both caught up consumers as well as consumers who have
# backlog.
dispatchThrottlingOnNonBacklogConsumerEnabled=true
# Max number of entries to read from bookkeeper. By default it is 100 entries.
dispatcherMaxReadBatchSize=100
# Min number of entries to read from bookkeeper. By default it is 1 entries.
# When there is an error occurred on reading entries from bookkeeper, the broker
# will backoff the batch size to this minimum number."
dispatcherMinReadBatchSize=1
# Max number of entries to dispatch for a shared subscription. By default it is 20 entries.
dispatcherMaxRoundRobinBatchSize=20
# Max number of concurrent lookup request broker allows to throttle heavy incoming lookup traffic
maxConcurrentLookupRequest=50000
# Max number of concurrent topic loading request broker allows to control number of zk-operations
maxConcurrentTopicLoadRequest=5000
# Max concurrent non-persistent message can be processed per connection
maxConcurrentNonPersistentMessagePerConnection=1000
# Number of worker threads to serve non-persistent topic
numWorkerThreadsForNonPersistentTopic=8
# Enable broker to load persistent topics
enablePersistentTopics=true
# Enable broker to load non-persistent topics
enableNonPersistentTopics=true
# Enable to run bookie along with broker
enableRunBookieTogether=false
# Enable to run bookie autorecovery along with broker
enableRunBookieAutoRecoveryTogether=false
# Max number of producers allowed to connect to topic. Once this limit reaches, Broker will reject new producers
# until the number of connected producers decrease.
# Using a value of 0, is disabling maxProducersPerTopic-limit check.
maxProducersPerTopic=0
# Max number of consumers allowed to connect to topic. Once this limit reaches, Broker will reject new consumers
# until the number of connected consumers decrease.
# Using a value of 0, is disabling maxConsumersPerTopic-limit check.
maxConsumersPerTopic=0
# Max number of consumers allowed to connect to subscription. Once this limit reaches, Broker will reject new consumers
# until the number of connected consumers decrease.
# Using a value of 0, is disabling maxConsumersPerSubscription-limit check.
maxConsumersPerSubscription=0
# Max size of messages.
maxMessageSize=5242880
# Interval between checks to see if topics with compaction policies need to be compacted
brokerServiceCompactionMonitorIntervalInSeconds=60
# Whether to enable the delayed delivery for messages.
# If disabled, messages will be immediately delivered and there will
# be no tracking overhead.
delayedDeliveryEnabled=true
# Control the tick time for when retrying on delayed delivery,
# affecting the accuracy of the delivery time compared to the scheduled time.
# Default is 1 second.
delayedDeliveryTickTimeMillis=1000
# Enable tracking of replicated subscriptions state across clusters.
enableReplicatedSubscriptions=true
# Frequency of snapshots for replicated subscriptions tracking.
replicatedSubscriptionsSnapshotFrequencyMillis=1000
# Timeout for building a consistent snapshot for tracking replicated subscriptions state.
replicatedSubscriptionsSnapshotTimeoutSeconds=30
# Max number of snapshot to be cached per subscription.
replicatedSubscriptionsSnapshotMaxCachedPerSubscription=10
# Max memory size for broker handling messages sending from producers.
# If the processing message size exceed this value, broker will stop read data
# from the connection. The processing messages means messages are sends to broker
# but broker have not send response to client, usually waiting to write to bookies.
# It's shared across all the topics running in the same broker.
# Use -1 to disable the memory limitation. Default is 1/2 of direct memory.
maxMessagePublishBufferSizeInMB=
# Interval between checks to see if message publish buffer size is exceed the max message publish buffer size
# Use 0 or negative number to disable the max publish buffer limiting.
messagePublishBufferCheckIntervalInMillis=100
# Check between intervals to see if consumed ledgers need to be trimmed
# Use 0 or negative number to disable the check
retentionCheckIntervalInSeconds=120
### --- Authentication --- ###
# Role names that are treated as "proxy roles". If the broker sees a request with
#role as proxyRoles - it will demand to see a valid original principal.
proxyRoles=
# If this flag is set then the broker authenticates the original Auth data
# else it just accepts the originalPrincipal and authorizes it (if required).
authenticateOriginalAuthData=false
# Deprecated - Use webServicePortTls and brokerServicePortTls instead
tlsEnabled=false
# Tls cert refresh duration in seconds (set 0 to check on every new connection)
tlsCertRefreshCheckDurationSec=300
# Path for the TLS certificate file
tlsCertificateFilePath=
# Path for the TLS private key file
tlsKeyFilePath=
# Path for the trusted TLS certificate file.
# This cert is used to verify that any certs presented by connecting clients
# are signed by a certificate authority. If this verification
# fails, then the certs are untrusted and the connections are dropped.
tlsTrustCertsFilePath=
# Accept untrusted TLS certificate from client.
# If true, a client with a cert which cannot be verified with the
# 'tlsTrustCertsFilePath' cert will allowed to connect to the server,
# though the cert will not be used for client authentication.
tlsAllowInsecureConnection=false
# Specify the tls protocols the broker will use to negotiate during TLS handshake
# (a comma-separated list of protocol names).
# Examples:- [TLSv1.2, TLSv1.1, TLSv1]
tlsProtocols=
# Specify the tls cipher the broker will use to negotiate during TLS Handshake
# (a comma-separated list of ciphers).
# Examples:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]
tlsCiphers=
# Trusted client certificates are required for to connect TLS
# Reject the Connection if the Client Certificate is not trusted.
# In effect, this requires that all connecting clients perform TLS client
# authentication.
tlsRequireTrustedClientCertOnConnect=false
### --- KeyStore TLS config variables --- ###
# Enable TLS with KeyStore type configuration in broker.
tlsEnabledWithKeyStore=false
# TLS Provider for KeyStore type
tlsProvider=
# TLS KeyStore type configuration in broker: JKS, PKCS12
tlsKeyStoreType=JKS
# TLS KeyStore path in broker
tlsKeyStore=
# TLS KeyStore password for broker
tlsKeyStorePassword=
# TLS TrustStore type configuration in broker: JKS, PKCS12
tlsTrustStoreType=JKS
# TLS TrustStore path in broker
tlsTrustStore=
# TLS TrustStore password in broker
tlsTrustStorePassword=
# Whether internal client use KeyStore type to authenticate with Pulsar brokers
brokerClientTlsEnabledWithKeyStore=false
# The TLS Provider used by internal client to authenticate with other Pulsar brokers
brokerClientSslProvider=
# TLS TrustStore type configuration for internal client: JKS, PKCS12
# used by the internal client to authenticate with Pulsar brokers
brokerClientTlsTrustStoreType=JKS
# TLS TrustStore path for internal client
# used by the internal client to authenticate with Pulsar brokers
brokerClientTlsTrustStore=
# TLS TrustStore password for internal client,
# used by the internal client to authenticate with Pulsar brokers
brokerClientTlsTrustStorePassword=
# Specify the tls cipher the internal client will use to negotiate during TLS Handshake
# (a comma-separated list of ciphers)
# e.g. [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256].
# used by the internal client to authenticate with Pulsar brokers
brokerClientTlsCiphers=
# Specify the tls protocols the broker will use to negotiate during TLS handshake
# (a comma-separated list of protocol names).
# e.g. [TLSv1.2, TLSv1.1, TLSv1]
# used by the internal client to authenticate with Pulsar brokers
brokerClientTlsProtocols=
### --- Authentication --- ###
# Enable authentication
authenticationEnabled=false
# Autentication provider name list, which is comma separated list of class names
authenticationProviders=
# Interval of time for checking for expired authentication credentials
authenticationRefreshCheckSeconds=60
# Enforce authorization
authorizationEnabled=false
# Authorization provider fully qualified class-name
authorizationProvider=org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider
# Allow wildcard matching in authorization
# (wildcard matching only applicable if wildcard-char:
# * presents at first or last position eg: *.pulsar.service, pulsar.service.*)
authorizationAllowWildcardsMatching=false
# Role names that are treated as "super-user", meaning they will be able to do all admin
# operations and publish/consume from all topics
superUserRoles=
# Authentication settings of the broker itself. Used when the broker connects to other brokers,
# either in same or other clusters
brokerClientTlsEnabled=false
brokerClientAuthenticationPlugin=
brokerClientAuthenticationParameters=
brokerClientTrustCertsFilePath=
# Supported Athenz provider domain names(comma separated) for authentication
athenzDomainNames=
# When this parameter is not empty, unauthenticated users perform as anonymousUserRole
anonymousUserRole=
### --- Token Authentication Provider --- ###
## Symmetric key
# Configure the secret key to be used to validate auth tokens
# The key can be specified like:
# tokenSecretKey=data:base64,xxxxxxxxx
# tokenSecretKey=file:///my/secret.key
tokenSecretKey=
## Asymmetric public/private key pair
# Configure the public key to be used to validate auth tokens
# The key can be specified like:
# tokenPublicKey=data:base64,xxxxxxxxx
# tokenPublicKey=file:///my/public.key
tokenPublicKey=
# The token "claim" that will be interpreted as the authentication "role" or "principal" by AuthenticationProviderToken (defaults to "sub" if blank)
tokenAuthClaim=
### --- SASL Authentication Provider --- ###
# This is a regexp, which limits the range of possible ids which can connect to the Broker using SASL.
# Default value: `SaslConstants.JAAS_CLIENT_ALLOWED_IDS_DEFAULT`, which is ".*pulsar.*",
# so only clients whose id contains 'pulsar' are allowed to connect.
saslJaasClientAllowedIds=
# Service Principal, for login context name.
# Default value `SaslConstants.JAAS_DEFAULT_BROKER_SECTION_NAME`, which is "Broker".
saslJaasBrokerSectionName=
### --- BookKeeper Client --- ###
# Authentication plugin to use when connecting to bookies
bookkeeperClientAuthenticationPlugin=
# BookKeeper auth plugin implementatation specifics parameters name and values
bookkeeperClientAuthenticationParametersName=
bookkeeperClientAuthenticationParameters=
# Timeout for BK add / read operations
bookkeeperClientTimeoutInSeconds=30
# Speculative reads are initiated if a read request doesn't complete within a certain time
# Using a value of 0, is disabling the speculative reads
bookkeeperClientSpeculativeReadTimeoutInMillis=0
# Use older Bookkeeper wire protocol with bookie
bookkeeperUseV2WireProtocol=true
# Enable bookies health check. Bookies that have more than the configured number of failure within
# the interval will be quarantined for some time. During this period, new ledgers won't be created
# on these bookies
bookkeeperClientHealthCheckEnabled=true
bookkeeperClientHealthCheckIntervalSeconds=60
bookkeeperClientHealthCheckErrorThresholdPerInterval=5
bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800
# Enable rack-aware bookie selection policy. BK will chose bookies from different racks when
# forming a new bookie ensemble
bookkeeperClientRackawarePolicyEnabled=true
# Enable region-aware bookie selection policy. BK will chose bookies from
# different regions and racks when forming a new bookie ensemble
# If enabled, the value of bookkeeperClientRackawarePolicyEnabled is ignored
bookkeeperClientRegionawarePolicyEnabled=false
# Enable/disable reordering read sequence on reading entries.
bookkeeperClientReorderReadSequenceEnabled=false
# Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie
# outside the specified groups will not be used by the broker
bookkeeperClientIsolationGroups=
# Enable bookie secondary-isolation group if bookkeeperClientIsolationGroups doesn't
# have enough bookie available.
bookkeeperClientSecondaryIsolationGroups=
# Minimum bookies that should be available as part of bookkeeperClientIsolationGroups
# else broker will include bookkeeperClientSecondaryIsolationGroups bookies in isolated list.
bookkeeperClientMinAvailableBookiesInIsolationGroups=
# Enable/disable having read operations for a ledger to be sticky to a single bookie.
# If this flag is enabled, the client will use one single bookie (by preference) to read
# all entries for a ledger.
#
# Disable Sticy Read until {@link https://github.com/apache/bookkeeper/issues/1970} is fixed
bookkeeperEnableStickyReads=false
# Set the client security provider factory class name.
# Default: org.apache.bookkeeper.tls.TLSContextFactory
bookkeeperTLSProviderFactoryClass=org.apache.bookkeeper.tls.TLSContextFactory
# Enable tls authentication with bookie
bookkeeperTLSClientAuthentication=false
# Supported type: PEM, JKS, PKCS12. Default value: PEM
bookkeeperTLSKeyFileType=PEM
#Supported type: PEM, JKS, PKCS12. Default value: PEM
bookkeeperTLSTrustCertTypes=PEM
# Path to file containing keystore password, if the client keystore is password protected.
bookkeeperTLSKeyStorePasswordPath=
# Path to file containing truststore password, if the client truststore is password protected.
bookkeeperTLSTrustStorePasswordPath=
# Path for the TLS private key file
bookkeeperTLSKeyFilePath=
# Path for the TLS certificate file
bookkeeperTLSCertificateFilePath=
# Path for the trusted TLS certificate file
bookkeeperTLSTrustCertsFilePath=
# Enable/disable disk weight based placement. Default is false
bookkeeperDiskWeightBasedPlacementEnabled=false
# Set the interval to check the need for sending an explicit LAC
# A value of '0' disables sending any explicit LACs. Default is 0.
bookkeeperExplicitLacIntervalInMills=0
### --- Managed Ledger --- ###
# Number of bookies to use when creating a ledger
managedLedgerDefaultEnsembleSize=2
# Number of copies to store for each message
managedLedgerDefaultWriteQuorum=2
# Number of guaranteed copies (acks to wait before write is complete)
managedLedgerDefaultAckQuorum=2
# Default type of checksum to use when writing to BookKeeper. Default is "CRC32C"
# Other possible options are "CRC32", "MAC" or "DUMMY" (no checksum).
managedLedgerDigestType=CRC32C
# Number of threads to be used for managed ledger tasks dispatching
managedLedgerNumWorkerThreads=8
# Number of threads to be used for managed ledger scheduled tasks
managedLedgerNumSchedulerThreads=8
# Amount of memory to use for caching data payload in managed ledger. This memory
# is allocated from JVM direct memory and it's shared across all the topics
# running in the same broker. By default, uses 1/5th of available direct memory
managedLedgerCacheSizeMB=
# Whether we should make a copy of the entry payloads when inserting in cache
managedLedgerCacheCopyEntries=false
# Threshold to which bring down the cache level when eviction is triggered
managedLedgerCacheEvictionWatermark=0.9
# Configure the cache eviction frequency for the managed ledger cache (evictions/sec)
managedLedgerCacheEvictionFrequency=100.0
# All entries that have stayed in cache for more than the configured time, will be evicted
managedLedgerCacheEvictionTimeThresholdMillis=1000
# Configure the threshold (in number of entries) from where a cursor should be considered 'backlogged'
# and thus should be set as inactive.
managedLedgerCursorBackloggedThreshold=1000
# Rate limit the amount of writes per second generated by consumer acking the messages
managedLedgerDefaultMarkDeleteRateLimit=1.0
# Max number of entries to append to a ledger before triggering a rollover
# A ledger rollover is triggered on these conditions
# * Either the max rollover time has been reached
# * or max entries have been written to the ledged and at least min-time
# has passed
managedLedgerMaxEntriesPerLedger=50000
# Minimum time between ledger rollover for a topic
managedLedgerMinLedgerRolloverTimeMinutes=10
# Maximum time before forcing a ledger rollover for a topic
managedLedgerMaxLedgerRolloverTimeMinutes=240
# Delay between a ledger being successfully offloaded to long term storage
# and the ledger being deleted from bookkeeper (default is 4 hours)
managedLedgerOffloadDeletionLagMs=14400000
# The number of bytes before triggering automatic offload to long term storage
# (default is -1, which is disabled)
managedLedgerOffloadAutoTriggerSizeThresholdBytes=-1
# Max number of entries to append to a cursor ledger
managedLedgerCursorMaxEntriesPerLedger=50000
# Max time before triggering a rollover on a cursor ledger
managedLedgerCursorRolloverTimeInSeconds=14400
# Max number of "acknowledgment holes" that are going to be persistently stored.
# When acknowledging out of order, a consumer will leave holes that are supposed
# to be quickly filled by acking all the messages. The information of which
# messages are acknowledged is persisted by compressing in "ranges" of messages
# that were acknowledged. After the max number of ranges is reached, the information
# will only be tracked in memory and messages will be redelivered in case of
# crashes.
managedLedgerMaxUnackedRangesToPersist=10000
# Max number of "acknowledgment holes" that can be stored in Zookeeper. If number of unack message range is higher
# than this limit then broker will persist unacked ranges into bookkeeper to avoid additional data overhead into
# zookeeper.
managedLedgerMaxUnackedRangesToPersistInZooKeeper=1000
# Skip reading non-recoverable/unreadable data-ledger under managed-ledger's list. It helps when data-ledgers gets
# corrupted at bookkeeper and managed-cursor is stuck at that ledger.
autoSkipNonRecoverableData=false
# operation timeout while updating managed-ledger metadata.
managedLedgerMetadataOperationsTimeoutSeconds=60
# Read entries timeout when broker tries to read messages from bookkeeper.
managedLedgerReadEntryTimeoutSeconds=0
# Add entry timeout when broker tries to publish message to bookkeeper (0 to disable it).
managedLedgerAddEntryTimeoutSeconds=0
### --- Load balancer --- ###
# Enable load balancer
loadBalancerEnabled=true
# Percentage of change to trigger load report update
loadBalancerReportUpdateThresholdPercentage=10
# maximum interval to update load report
loadBalancerReportUpdateMaxIntervalMinutes=15
# Frequency of report to collect
loadBalancerHostUsageCheckIntervalMinutes=1
# Enable/disable automatic bundle unloading for load-shedding
loadBalancerSheddingEnabled=true
# Load shedding interval. Broker periodically checks whether some traffic should be offload from
# some over-loaded broker to other under-loaded brokers
loadBalancerSheddingIntervalMinutes=1
# Prevent the same topics to be shed and moved to other broker more that once within this timeframe
loadBalancerSheddingGracePeriodMinutes=30
# Usage threshold to allocate max number of topics to broker
loadBalancerBrokerMaxTopics=50000
# Usage threshold to determine a broker as over-loaded
loadBalancerBrokerOverloadedThresholdPercentage=85
# Interval to flush dynamic resource quota to ZooKeeper
loadBalancerResourceQuotaUpdateIntervalMinutes=15
# enable/disable namespace bundle auto split
loadBalancerAutoBundleSplitEnabled=true
# enable/disable automatic unloading of split bundles
loadBalancerAutoUnloadSplitBundlesEnabled=true
# maximum topics in a bundle, otherwise bundle split will be triggered
loadBalancerNamespaceBundleMaxTopics=1000
# maximum sessions (producers + consumers) in a bundle, otherwise bundle split will be triggered
loadBalancerNamespaceBundleMaxSessions=1000
# maximum msgRate (in + out) in a bundle, otherwise bundle split will be triggered
loadBalancerNamespaceBundleMaxMsgRate=30000
# maximum bandwidth (in + out) in a bundle, otherwise bundle split will be triggered
loadBalancerNamespaceBundleMaxBandwidthMbytes=100
# maximum number of bundles in a namespace
loadBalancerNamespaceMaximumBundles=128
# Override the auto-detection of the network interfaces max speed.
# This option is useful in some environments (eg: EC2 VMs) where the max speed
# reported by Linux is not reflecting the real bandwidth available to the broker.
# Since the network usage is employed by the load manager to decide when a broker
# is overloaded, it is important to make sure the info is correct or override it
# with the right value here. The configured value can be a double (eg: 0.8) and that
# can be used to trigger load-shedding even before hitting on NIC limits.
loadBalancerOverrideBrokerNicSpeedGbps=
# Name of load manager to use
loadManagerClassName=org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl
# Supported algorithms name for namespace bundle split.
# "range_equally_divide" divides the bundle into two parts with the same hash range size.
# "topic_count_equally_divide" divides the bundle into two parts with the same topics count.
supportedNamespaceBundleSplitAlgorithms=range_equally_divide,topic_count_equally_divide
# Default algorithm name for namespace bundle split
defaultNamespaceBundleSplitAlgorithm=range_equally_divide
### --- Replication --- ###
# Enable replication metrics
replicationMetricsEnabled=true
# Max number of connections to open for each broker in a remote cluster
# More connections host-to-host lead to better throughput over high-latency
# links.
replicationConnectionsPerBroker=16
# Replicator producer queue size
replicationProducerQueueSize=1000
# Replicator prefix used for replicator producer name and cursor name
replicatorPrefix=pulsar.repl
# Default message retention time
defaultRetentionTimeInMinutes=0
# Default retention size
defaultRetentionSizeInMB=0
# How often to check whether the connections are still alive
keepAliveIntervalSeconds=30
# bootstrap namespaces
bootstrapNamespaces=
### --- WebSocket --- ###
# Enable the WebSocket API service in broker
webSocketServiceEnabled=false
# Number of IO threads in Pulsar Client used in WebSocket proxy
webSocketNumIoThreads=8
# Number of connections per Broker in Pulsar Client used in WebSocket proxy
webSocketConnectionsPerBroker=8
# Time in milliseconds that idle WebSocket session times out
webSocketSessionIdleTimeoutMillis=300000
### --- Metrics --- ###
# Enable topic level metrics
exposeTopicLevelMetricsInPrometheus=true
# Enable consumer level metrics. default is false
exposeConsumerLevelMetricsInPrometheus=false
# Classname of Pluggable JVM GC metrics logger that can log GC specific metrics
# jvmGCMetricsLoggerClassName=
### --- Functions --- ###
# Enable Functions Worker Service in Broker
functionsWorkerEnabled=false
### --- Broker Web Stats --- ###
# Enable topic level metrics
exposePublisherStats=true
statsUpdateFrequencyInSecs=60
statsUpdateInitialDelayInSecs=60
# Enable expose the precise backlog stats.
# Set false to use published counter and consumed counter to calculate, this would be more efficient but may be inaccurate.
# Default is false.
exposePreciseBacklogInPrometheus=false
### --- Schema storage --- ###
# The schema storage implementation used by this broker
schemaRegistryStorageClassName=org.apache.pulsar.broker.service.schema.BookkeeperSchemaStorageFactory
# Enforce schema validation on following cases:
#
# - if a producer without a schema attempts to produce to a topic with schema, the producer will be
# failed to connect. PLEASE be carefully on using this, since non-java clients don't support schema.
# if you enable this setting, it will cause non-java clients failed to produce.
isSchemaValidationEnforced=false
### --- Ledger Offloading --- ###
# The directory for all the offloader implementations
offloadersDirectory=./offloaders
# Driver to use to offload old data to long term storage (Possible values: S3, aws-s3, google-cloud-storage)
# When using google-cloud-storage, Make sure both Google Cloud Storage and Google Cloud Storage JSON API are enabled for
# the project (check from Developers Console -> Api&auth -> APIs).
managedLedgerOffloadDriver=
# Maximum number of thread pool threads for ledger offloading
managedLedgerOffloadMaxThreads=2
# Maximum prefetch rounds for ledger reading for offloading
managedLedgerOffloadPrefetchRounds=1
# Use Open Range-Set to cache unacked messages
managedLedgerUnackedRangesOpenCacheSetEnabled=true
# For Amazon S3 ledger offload, AWS region
s3ManagedLedgerOffloadRegion=
# For Amazon S3 ledger offload, Bucket to place offloaded ledger into
s3ManagedLedgerOffloadBucket=
# For Amazon S3 ledger offload, Alternative endpoint to connect to (useful for testing)
s3ManagedLedgerOffloadServiceEndpoint=
# For Amazon S3 ledger offload, Max block size in bytes. (64MB by default, 5MB minimum)
s3ManagedLedgerOffloadMaxBlockSizeInBytes=67108864
# For Amazon S3 ledger offload, Read buffer size in bytes (1MB by default)
s3ManagedLedgerOffloadReadBufferSizeInBytes=1048576
# For Google Cloud Storage ledger offload, region where offload bucket is located.
# reference this page for more details: https://cloud.google.com/storage/docs/bucket-locations
gcsManagedLedgerOffloadRegion=
# For Google Cloud Storage ledger offload, Bucket to place offloaded ledger into
gcsManagedLedgerOffloadBucket=
# For Google Cloud Storage ledger offload, Max block size in bytes. (64MB by default, 5MB minimum)
gcsManagedLedgerOffloadMaxBlockSizeInBytes=67108864
# For Google Cloud Storage ledger offload, Read buffer size in bytes (1MB by default)
gcsManagedLedgerOffloadReadBufferSizeInBytes=1048576
# For Google Cloud Storage, path to json file containing service account credentials.
# For more details, see the "Service Accounts" section of https://support.google.com/googleapi/answer/6158849
gcsManagedLedgerOffloadServiceAccountKeyFile=
#For File System Storage, file system profile path
fileSystemProfilePath=../conf/filesystem_offload_core_site.xml
#For File System Storage, file system uri
fileSystemURI=
### --- Deprecated config variables --- ###
# Deprecated. Use configurationStoreServers
globalZookeeperServers=
# Deprecated - Enable TLS when talking with other clusters to replicate messages
replicationTlsEnabled=false
# Deprecated. Use brokerDeleteInactiveTopicsFrequencySeconds
brokerServicePurgeInactiveFrequencyInSeconds=60
### --- Transaction config variables --- ###
# Enable transaction coordinator in broker
transactionCoordinatorEnabled=true
transactionMetadataStoreProviderClassName=org.apache.pulsar.transaction.coordinator.impl.InMemTransactionMetadataStoreProvider

View File

@ -1,69 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Configuration for pulsar-client and pulsar-admin CLI tools
# URL for Pulsar REST API (for admin operations)
# For TLS:
# webServiceUrl=https://localhost:8443/
webServiceUrl=http://localhost:8080/
# URL for Pulsar Binary Protocol (for produce and consume operations)
# For TLS:
# brokerServiceUrl=pulsar+ssl://localhost:6651/
brokerServiceUrl=pulsar://localhost:6650/
# Authentication plugin to authenticate with servers
# e.g. for TLS
# authPlugin=org.apache.pulsar.client.impl.auth.AuthenticationTls
authPlugin=
# Parameters passed to authentication plugin.
# A comma separated list of key:value pairs.
# Keys depend on the configured authPlugin.
# e.g. for TLS
# authParams=tlsCertFile:/path/to/client-cert.pem,tlsKeyFile:/path/to/client-key.pem
authParams=
# Allow TLS connections to servers whose certificate cannot be
# be verified to have been signed by a trusted certificate
# authority.
tlsAllowInsecureConnection=false
# Whether server hostname must match the common name of the certificate
# the server is using.
tlsEnableHostnameVerification=false
# Path for the trusted TLS certificate file.
# This cert is used to verify that any cert presented by a server
# is signed by a certificate authority. If this verification
# fails, then the cert is untrusted and the connection is dropped.
tlsTrustCertsFilePath=
# Enable TLS with KeyStore type configuration in broker.
useKeyStoreTls=false
# TLS KeyStore type configuration: JKS, PKCS12
tlsTrustStoreType=JKS
# TLS TrustStore path
tlsTrustStorePath=
# TLS TrustStore password
tlsTrustStorePassword=

View File

@ -1,87 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Zookeeper quorum connection string (comma-separated)
zookeeperServers=
# Configuration Store connection string (comma-separated)
configurationStoreServers=
# ZooKeeper session timeout
zookeeperSessionTimeoutMs=30000
# Port to use to server binary-proto request
servicePort=6650
# Port to use to server binary-proto-tls request
servicePortTls=
# Port that discovery service listen on
webServicePort=8080
# Port to use to server HTTPS request
webServicePortTls=
# Control whether to bind directly on localhost rather than on normal hostname
bindOnLocalhost=false
### --- Authentication --- ###
# Enable authentication
authenticationEnabled=false
# Authentication provider name list, which is comma separated list of class names (comma-separated)
authenticationProviders=
# Enforce authorization
authorizationEnabled=false
# Authorization provider name list, which is comma separated list of class names
authorizationProviders=org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider
# Role names that are treated as "super-user", meaning they will be able to do all admin
# operations and publish/consume from all topics (comma-separated)
superUserRoles=
# Allow wildcard matching in authorization
# (wildcard matching only applicable if wildcard-char:
# * presents at first or last position eg: *.pulsar.service, pulsar.service.*)
authorizationAllowWildcardsMatching=false
##### --- TLS --- #####
# Deprecated - Use servicePortTls and webServicePortTls instead
tlsEnabled=false
# Path for the TLS certificate file
tlsCertificateFilePath=
# Path for the TLS private key file
tlsKeyFilePath=
# Specify whether Client certificates are required for TLS
# Reject the Connection if the Client Certificate is not trusted.
tlsRequireTrustedClientCertOnConnect=false
# Tls cert refresh duration in seconds (set 0 to check on every new connection)
tlsCertRefreshCheckDurationSec=300
### --- Deprecated config variables --- ###
# Deprecated. Use configurationStoreServers
globalZookeeperServers=

View File

@ -1,48 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<configuration>
<!--file system uri, necessary-->
<property>
<name>fs.defaultFS</name>
<value></value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>pulsar</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>4096</value>
</property>
<property>
<name>io.seqfile.compress.blocksize</name>
<value>1000000</value>
</property>
<property>
<name>io.seqfile.compression.type</name>
<value>BLOCK</value>
</property>
<property>
<name>io.map.index.interval</name>
<value>128</value>
</property>
</configuration>

View File

@ -1,41 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
[loggers]
keys=root
[handlers]
keys=stream_handler
[formatters]
keys=formatter
[logger_root]
level=INFO
handlers=stream_handler
[handler_stream_handler]
class=StreamHandler
level=INFO
formatter=formatter
args=(sys.stdout,)
[formatter_formatter]
format=[%(asctime)s] [%(levelname)s] %(filename)s: %(message)s
datefmt=%Y-%m-%d %H:%M:%S %z

View File

@ -1,41 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
[loggers]
keys=root
[handlers]
keys=rotating_file_handler
[formatters]
keys=formatter
[logger_root]
level=INFO
handlers=rotating_file_handler
[handler_rotating_file_handler]
class=log.CreatePathRotatingFileHandler
level=INFO
formatter=formatter
args=(os.getenv("LOG_FILE",""), 5, 10 * 1024 * 1024)
[formatter_formatter]
format=[%(asctime)s] [%(levelname)s] %(filename)s: %(message)s
datefmt=%Y-%m-%d %H:%M:%S %z

View File

@ -1,203 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
####################
# Worker Settings
####################
workerId: standalone
workerHostname: localhost
workerPort: 6750
workerPortTls: 6751
# Configuration Store connection string
configurationStoreServers: localhost:2181
# ZooKeeper session timeout in milliseconds
zooKeeperSessionTimeoutMillis: 30000
# ZooKeeper operation timeout in seconds
zooKeeperOperationTimeoutSeconds: 30
################################
# Function package management
################################
numFunctionPackageReplicas: 1
downloadDirectory: download/pulsar_functions
# Classname of Pluggable JVM GC metrics logger that can log GC specific metrics
# jvmGCMetricsLoggerClassName:
#################################################################
# Function metadata managment (assignment, scheduling, and etc)
#################################################################
# Configure the pulsar client used by function metadata management
#
# points
pulsarServiceUrl: pulsar://localhost:6650
pulsarWebServiceUrl: http://localhost:8080
# the authentication plugin to be used by the pulsar client used in worker service
# clientAuthenticationPlugin:
# the authentication parameter to be used by the pulsar client used in worker service
# clientAuthenticationParameters:
# Bookie Authentication
#
# Authentication plugin to use when connecting to bookies
# bookkeeperClientAuthenticationPlugin:
# BookKeeper auth plugin implementatation specifics parameters name and values
# bookkeeperClientAuthenticationParametersName:
# Parameters for bookkeeper auth plugin
# bookkeeperClientAuthenticationParameters:
# pulsar topics used for function metadata management
pulsarFunctionsNamespace: public/functions
pulsarFunctionsCluster: standalone
functionMetadataTopicName: metadata
clusterCoordinationTopicName: coordinate
# Number of threads to use for HTTP requests processing. Default is set to 8
numHttpServerThreads: 8
# function assignment and scheduler
schedulerClassName: "org.apache.pulsar.functions.worker.scheduler.RoundRobinScheduler"
functionAssignmentTopicName: "assignments"
failureCheckFreqMs: 30000
rescheduleTimeoutMs: 60000
initialBrokerReconnectMaxRetries: 60
assignmentWriteMaxRetries: 60
instanceLivenessCheckFreqMs: 30000
# Frequency how often worker performs compaction on function-topics
topicCompactionFrequencySec: 1800
###############################
# Function Runtime Management
###############################
#### Process Runtime ####
# Pulsar function instances are launched as processes
functionRuntimeFactoryClassName: org.apache.pulsar.functions.runtime.process.ProcessRuntimeFactory
functionRuntimeFactoryConfigs:
# location of log files for functions
logDirectory: /tmp
# change the jar location only when you put the java instance jar in a different location
javaInstanceJarLocation:
# change the python instance location only when you put the python instance jar in a different location
pythonInstanceLocation:
# change the extra dependencies location:
extraFunctionDependenciesDir:
#### Thread Runtime ####
# Pulsar function instances are run as threads
#functionRuntimeFactoryClassName: org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory
#functionRuntimeFactoryConfigs:
# # thread group name
# threadGroupName: "Thread Function Container Group"
#### Kubernetes Runtime ####
# Pulsar function are deployed to Kubernetes
#functionRuntimeFactoryClassName: org.apache.pulsar.functions.runtime.kubernetes.KubernetesRuntimeFactory
#functionRuntimeFactoryConfigs:
# # uri to kubernetes cluster, leave it to empty and it will use the kubernetes settings in function worker
# k8Uri:
# # the kubernetes namespace to run the function instances. it is `default`, if this setting is left to be empty
# jobNamespace:
# # the docker image to run function instance. by default it is `apachepulsar/pulsar`
# pulsarDockerImageName:
# # the root directory of pulsar home directory in `pulsarDockerImageName`. by default it is `/pulsar`.
# # if you are using your own built image in `pulsarDockerImageName`, you need to set this setting accordingly
# pulsarRootDir:
# # this setting only takes effects if `k8Uri` is set to null. if your function worker is running as a k8 pod,
# # setting this to true is let function worker to submit functions to the same k8s cluster as function worker
# # is running. setting this to false if your function worker is not running as a k8 pod.
# submittingInsidePod: false
# # setting the pulsar service url that pulsar function should use to connect to pulsar
# # if it is not set, it will use the pulsar service url configured in worker service
# pulsarServiceUrl:
# # setting the pulsar admin url that pulsar function should use to connect to pulsar
# # if it is not set, it will use the pulsar admin url configured in worker service
# pulsarAdminUrl:
# # the custom labels that function worker uses to select the nodes for pods
# customLabels:
# # the directory for dropping extra function dependencies
# # if it is not an absolute path, it is relative to `pulsarRootDir`
# extraFunctionDependenciesDir:
# # Additional memory padding added on top of the memory requested by the function per on a per instance basis
# percentMemoryPadding: 10
# # The ratio cpu request and cpu limit to be set for a function/source/sink.
# # The formula for cpu request is cpuRequest = userRequestCpu / cpuOverCommitRatio
# cpuOverCommitRatio: 1.0
# # The ratio memory request and memory limit to be set for a function/source/sink.
# # The formula for memory request is memoryRequest = userRequestMemory / memoryOverCommitRatio
# memoryOverCommitRatio: 1.0
## A set of the minimum amount of resources functions must request.
## Support for this depends on function runtime.
## Only kubernetes runtime currently supports this.
# functionInstanceMinResources:
# cpu: 1
# ram: 1073741824
# disk: 10737418240
############################################
# security settings for worker service
############################################
# Enforce authentication
authenticationEnabled: false
# Enforce authorization on accessing functions api
authorizationEnabled: false
# Set of authentication provider name list, which is a list of class names
authenticationProviders:
# Authorization provider fully qualified class-name
authorizationProvider: org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider
# Set of role names that are treated as "super-user", meaning they will be able to access any admin-api
superUserRoles:
#### tls configuration for worker service
# Enable TLS
tlsEnabled: false
# Path for the TLS certificate file
tlsCertificateFilePath:
# Path for the TLS private key file
tlsKeyFilePath:
# Path for the trusted TLS certificate file
tlsTrustCertsFilePath:
# Accept untrusted TLS certificate from client
tlsAllowInsecureConnection: false
# Tls cert refresh duration in seconds (set 0 to check on every new connection)
tlsCertRefreshCheckDurationSec: 300
########################
# State Management
########################
# the service url points to bookkeeper table service
# stateStorageServiceUrl: bk://localhost:4181
########################
# Connectors
########################
connectorsDirectory: ./connectors

View File

@ -1,50 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
dataDir=data/global-zookeeper
# the port at which the clients will connect
clientPort=2184
# the port at which the admin will listen
admin.enableServer=true
admin.serverPort=9991
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
autopurge.purgeInterval=1

View File

@ -1,33 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// This is a sample file which can be used by log4j2 to: only log debug-statement which has text: pulsar-topic-name
var result = false;
var topicName = "pulsar-topic-name";
/*
* Find more logEvent attributes at :
* https://github.com/apache/logging-log4j2/blob/dbd2d252a1b4139a9bd9eb213c89f28498db6dcf/log4j-core/src/main/java/org/apache/logging/log4j/core/LogEvent.java
*/
if (logEvent.getLevel() == "DEBUG"){
if(logEvent.getMessage().getFormattedMessage().indexOf(topicName)!=-1) {
result = true;
}
} else {
result = true;
}
result;

View File

@ -1,164 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
Configuration:
status: INFO
monitorInterval: 30
name: pulsar
packages: io.prometheus.client.log4j2
Properties:
Property:
- name: "pulsar.log.dir"
value: "logs"
- name: "pulsar.log.file"
value: "pulsar.log"
- name: "pulsar.log.appender"
value: "RoutingAppender"
- name: "pulsar.log.root.level"
value: "info"
- name: "pulsar.log.level"
value: "info"
- name: "pulsar.routing.appender.default"
value: "Console"
# Example: logger-filter script
Scripts:
ScriptFile:
name: filter.js
language: JavaScript
path: ./conf/log4j2-scripts/filter.js
charset: UTF-8
Appenders:
# Console
Console:
name: Console
target: SYSTEM_OUT
PatternLayout:
Pattern: "%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"
# Rolling file appender configuration
RollingFile:
name: RollingFile
fileName: "${sys:pulsar.log.dir}/${sys:pulsar.log.file}"
filePattern: "${sys:pulsar.log.dir}/${sys:pulsar.log.file}-%d{MM-dd-yyyy}-%i.log.gz"
immediateFlush: false
PatternLayout:
Pattern: "%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"
Policies:
TimeBasedTriggeringPolicy:
interval: 1
modulate: true
SizeBasedTriggeringPolicy:
size: 1 GB
# Delete file older than 30days
DefaultRolloverStrategy:
Delete:
basePath: ${sys:pulsar.log.dir}
maxDepth: 2
IfFileName:
glob: "*/${sys:pulsar.log.file}*log.gz"
IfLastModified:
age: 30d
Prometheus:
name: Prometheus
# Routing
Routing:
name: RoutingAppender
Routes:
pattern: "$${ctx:function}"
Route:
-
Routing:
name: InstanceRoutingAppender
Routes:
pattern: "$${ctx:instance}"
Route:
-
RollingFile:
name: "Rolling-${ctx:function}"
fileName : "${sys:pulsar.log.dir}/functions/${ctx:function}/${ctx:functionname}-${ctx:instance}.log"
filePattern : "${sys:pulsar.log.dir}/functions/${sys:pulsar.log.file}-${ctx:instance}-%d{MM-dd-yyyy}-%i.log.gz"
PatternLayout:
Pattern: "%d{ABSOLUTE} %level{length=5} [%thread] [instance: %X{instance}] %logger{1} - %msg%n"
Policies:
TimeBasedTriggeringPolicy:
interval: 1
modulate: true
SizeBasedTriggeringPolicy:
size: "20MB"
# Trigger every day at midnight that also scan
# roll-over strategy that deletes older file
CronTriggeringPolicy:
schedule: "0 0 0 * * ?"
# Delete file older than 30days
DefaultRolloverStrategy:
Delete:
basePath: ${sys:pulsar.log.dir}
maxDepth: 2
IfFileName:
glob: "*/${sys:pulsar.log.file}*log.gz"
IfLastModified:
age: 30d
- ref: "${sys:pulsar.routing.appender.default}"
key: "${ctx:function}"
- ref: "${sys:pulsar.routing.appender.default}"
key: "${ctx:function}"
Loggers:
# Default root logger configuration
Root:
level: "${sys:pulsar.log.root.level}"
additivity: true
AppenderRef:
- ref: "${sys:pulsar.log.appender}"
level: "${sys:pulsar.log.level}"
- ref: Prometheus
level: info
Logger:
- name: org.apache.bookkeeper.bookie.BookieShell
level: info
additivity: false
AppenderRef:
- ref: Console
- name: verbose
level: info
additivity: false
AppenderRef:
- ref: Console
# Logger to inject filter script
# - name: org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl
# level: debug
# additivity: false
# AppenderRef:
# ref: "${sys:pulsar.log.appender}"
# ScriptFilter:
# onMatch: ACCEPT
# onMisMatch: DENY
# ScriptRef:
# ref: filter.js

View File

@ -1,103 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# name of the connector to be displayed in the catalog
connector.name=pulsar
# the url of Pulsar broker service
pulsar.broker-service-url=http://localhost:8080
# URI of Zookeeper cluster
pulsar.zookeeper-uri=127.0.0.1:2181
# minimum number of entries to read at a single time
pulsar.max-entry-read-batch-size=100
# default number of splits to use per query
pulsar.target-num-splits=2
# max message queue size
pulsar.max-split-message-queue-size=10000
# max entry queue size
pulsar.max-split-entry-queue-size=1000
# Rewrite namespace delimiter
# Warn: avoid using symbols allowed by Namespace (a-zA-Z_0-9 -=:%)
# to prevent erroneous rewriting
pulsar.namespace-delimiter-rewrite-enable=false
pulsar.rewrite-namespace-delimiter=/
####### TIERED STORAGE OFFLOADER CONFIGS #######
## Driver to use to offload old data to long term storage
#pulsar.managed-ledger-offload-driver = aws-s3
## The directory to locate offloaders
#pulsar.offloaders-directory = /pulsar/offloaders
## Maximum number of thread pool threads for ledger offloading
#pulsar.managed-ledger-offload-max-threads = 2
## Properties and configurations related to specific offloader implementation
#pulsar.offloader-properties = \
# {"s3ManagedLedgerOffloadBucket": "offload-bucket", \
# "s3ManagedLedgerOffloadRegion": "us-west-2", \
# "s3ManagedLedgerOffloadServiceEndpoint": "http://s3.amazonaws.com"}
####### AUTHENTICATION CONFIGS #######
## the authentication plugin to be used to authenticate to Pulsar cluster
#pulsar.auth-plugin =
## the authentication parameter to be used to authenticate to Pulsar cluster
#pulsar.auth-params =
## Accept untrusted TLS certificate
#pulsar.tls-allow-insecure-connection =
## Whether to enable hostname verification on TLS connections
#pulsar.tls-hostname-verification-enable =
## Path for the trusted TLS certificate file
#pulsar.tls-trust-cert-file-path =
####### BOOKKEEPER CONFIGS #######
# Entries read count throttling-limit per seconds, 0 is represents disable the throttle, default is 0.
pulsar.bookkeeper-throttle-value = 0
# The number of threads used by Netty to handle TCP connections,
# default is 2 * Runtime.getRuntime().availableProcessors().
# pulsar.bookkeeper-num-io-threads =
# The number of worker threads used by bookkeeper client to submit operations,
# default is Runtime.getRuntime().availableProcessors().
# pulsar.bookkeeper-num-worker-threads =
####### MANAGED LEDGER CONFIGS #######
# Amount of memory to use for caching data payload in managed ledger. This memory
# is allocated from JVM direct memory and it's shared across all the managed ledgers
# running in same sql worker. 0 is represents disable the cache, default is 0.
pulsar.managed-ledger-cache-size-MB = 0
# Number of threads to be used for managed ledger tasks dispatching,
# default is Runtime.getRuntime().availableProcessors().
# pulsar.managed-ledger-num-worker-threads =
# Number of threads to be used for managed ledger scheduled tasks,
# default is Runtime.getRuntime().availableProcessors().
# pulsar.managed-ledger-num-scheduler-threads =

View File

@ -1,42 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
node.id=ffffffff-ffff-ffff-ffff-ffffffffffff
node.environment=test
http-server.http.port=8081
discovery-server.enabled=true
discovery.uri=http://localhost:8081
exchange.http-client.max-connections=1000
exchange.http-client.max-connections-per-server=1000
exchange.http-client.connect-timeout=1m
exchange.http-client.idle-timeout=1m
scheduler.http-client.max-connections=1000
scheduler.http-client.max-connections-per-server=1000
scheduler.http-client.connect-timeout=1m
scheduler.http-client.idle-timeout=1m
query.client.timeout=5m
query.min-expire-age=30m
presto.version=testversion
distributed-joins-enabled=true
node-scheduler.include-coordinator=true

View File

@ -1,27 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
-server
-Xmx16G
-XX:+UseG1GC
-XX:G1HeapRegionSize=32M
-XX:+UseGCOverheadLimit
-XX:+ExplicitGCInvokesConcurrent
-XX:+HeapDumpOnOutOfMemoryError
-XX:+ExitOnOutOfMemoryError

View File

@ -1,23 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
com.facebook.presto=INFO
com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory=WARN
com.ning.http.client=WARN
com.facebook.presto.server.PluginManager=DEBUG

View File

@ -1,195 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
### --- Broker Discovery --- ###
# The ZooKeeper quorum connection string (as a comma-separated list)
zookeeperServers=
# Configuration store connection string (as a comma-separated list)
configurationStoreServers=
# if Service Discovery is Disabled this url should point to the discovery service provider.
brokerServiceURL=
brokerServiceURLTLS=
# These settings are unnecessary if `zookeeperServers` is specified
brokerWebServiceURL=
brokerWebServiceURLTLS=
# If function workers are setup in a separate cluster, configure the following 2 settings
# to point to the function workers cluster
functionWorkerWebServiceURL=
functionWorkerWebServiceURLTLS=
# ZooKeeper session timeout (in milliseconds)
zookeeperSessionTimeoutMs=30000
### --- Server --- ###
# The port to use for server binary Protobuf requests
servicePort=6650
# The port to use to server binary Protobuf TLS requests
servicePortTls=
# Port that discovery service listen on
webServicePort=8080
# Port to use to server HTTPS request
webServicePortTls=
# Path for the file used to determine the rotation status for the proxy instance when responding
# to service discovery health checks
statusFilePath=
### ---Authorization --- ###
# Role names that are treated as "super-users," meaning that they will be able to perform all admin
# operations and publish/consume to/from all topics (as a comma-separated list)
superUserRoles=
# Whether authorization is enforced by the Pulsar proxy
authorizationEnabled=false
# Authorization provider as a fully qualified class name
authorizationProvider=org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider
# Whether client authorization credentials are forwared to the broker for re-authorization.
# Authentication must be enabled via authenticationEnabled=true for this to take effect.
forwardAuthorizationCredentials=false
### --- Authentication --- ###
# Whether authentication is enabled for the Pulsar proxy
authenticationEnabled=false
# Authentication provider name list (a comma-separated list of class names)
authenticationProviders=
# When this parameter is not empty, unauthenticated users perform as anonymousUserRole
anonymousUserRole=
### --- Client Authentication --- ###
# The three brokerClient* authentication settings below are for the proxy itself and determine how it
# authenticates with Pulsar brokers
# The authentication plugin used by the Pulsar proxy to authenticate with Pulsar brokers
brokerClientAuthenticationPlugin=
# The authentication parameters used by the Pulsar proxy to authenticate with Pulsar brokers
brokerClientAuthenticationParameters=
# The path to trusted certificates used by the Pulsar proxy to authenticate with Pulsar brokers
brokerClientTrustCertsFilePath=
# Whether TLS is enabled when communicating with Pulsar brokers
tlsEnabledWithBroker=false
# Tls cert refresh duration in seconds (set 0 to check on every new connection)
tlsCertRefreshCheckDurationSec=300
##### --- Rate Limiting --- #####
# Max concurrent inbound connections. The proxy will reject requests beyond that.
maxConcurrentInboundConnections=10000
# Max concurrent outbound connections. The proxy will error out requests beyond that.
maxConcurrentLookupRequests=50000
##### --- TLS --- #####
# Deprecated - use servicePortTls and webServicePortTls instead
tlsEnabledInProxy=false
# Path for the TLS certificate file
tlsCertificateFilePath=
# Path for the TLS private key file
tlsKeyFilePath=
# Path for the trusted TLS certificate file.
# This cert is used to verify that any certs presented by connecting clients
# are signed by a certificate authority. If this verification
# fails, then the certs are untrusted and the connections are dropped.
tlsTrustCertsFilePath=
# Accept untrusted TLS certificate from client.
# If true, a client with a cert which cannot be verified with the
# 'tlsTrustCertsFilePath' cert will allowed to connect to the server,
# though the cert will not be used for client authentication.
tlsAllowInsecureConnection=false
# Whether the hostname is validated when the proxy creates a TLS connection with brokers
tlsHostnameVerificationEnabled=false
# Specify the tls protocols the broker will use to negotiate during TLS handshake
# (a comma-separated list of protocol names).
# Examples:- [TLSv1.2, TLSv1.1, TLSv1]
tlsProtocols=
# Specify the tls cipher the broker will use to negotiate during TLS Handshake
# (a comma-separated list of ciphers).
# Examples:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]
tlsCiphers=
# Whether client certificates are required for TLS. Connections are rejected if the client
# certificate isn't trusted.
tlsRequireTrustedClientCertOnConnect=false
##### --- HTTP --- #####
# Http directs to redirect to non-pulsar services.
httpReverseProxyConfigs=
# Http output buffer size. The amount of data that will be buffered for http requests
# before it is flushed to the channel. A larger buffer size may result in higher http throughput
# though it may take longer for the client to see data.
# If using HTTP streaming via the reverse proxy, this should be set to the minimum value, 1,
# so that clients see the data as soon as possible.
httpOutputBufferSize=32768
# Number of threads to use for HTTP requests processing. Default is
# 2 * Runtime.getRuntime().availableProcessors()
httpNumThreads=
### --- Token Authentication Provider --- ###
## Symmetric key
# Configure the secret key to be used to validate auth tokens
# The key can be specified like:
# tokenSecretKey=data:base64,xxxxxxxxx
# tokenSecretKey=file:///my/secret.key
tokenSecretKey=
## Asymmetric public/private key pair
# Configure the public key to be used to validate auth tokens
# The key can be specified like:
# tokenPublicKey=data:base64,xxxxxxxxx
# tokenPublicKey=file:///my/public.key
tokenPublicKey=
# The token "claim" that will be interpreted as the authentication "role" or "principal" by AuthenticationProviderToken (defaults to "sub" if blank)
tokenAuthClaim=
### --- Deprecated config variables --- ###
# Deprecated. Use configurationStoreServers
globalZookeeperServers=

View File

@ -1,61 +0,0 @@
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Set JAVA_HOME here to override the environment setting
# JAVA_HOME=
# default settings for starting pulsar broker
# Log4j configuration file
# PULSAR_LOG_CONF=
# Logs location
# PULSAR_LOG_DIR=
# Configuration file of settings used in broker server
# PULSAR_BROKER_CONF=
# Configuration file of settings used in bookie server
# PULSAR_BOOKKEEPER_CONF=
# Configuration file of settings used in zookeeper server
# PULSAR_ZK_CONF=
# Configuration file of settings used in global zookeeper server
# PULSAR_GLOBAL_ZK_CONF=
# Extra options to be passed to the jvm
PULSAR_MEM=${PULSAR_MEM:-"-Xms2g -Xmx2g -XX:MaxDirectMemorySize=4g"}
# Garbage collection options
PULSAR_GC=${PULSAR_GC:-"-XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB"}
# Extra options to be passed to the jvm
PULSAR_EXTRA_OPTS=${PULSAR_EXTRA_OPTS:-" -Dpulsar.allocator.exit_on_oom=true -Dio.netty.recycler.maxCapacity.default=1000 -Dio.netty.recycler.linkCapacity=1024"}
# Add extra paths to the bookkeeper classpath
# PULSAR_EXTRA_CLASSPATH=
#Folder where the Bookie server PID file should be stored
#PULSAR_PID_DIR=
#Wait time before forcefully kill the pulser server instance, if the stop is not successful
#PULSAR_STOP_TIMEOUT=

View File

@ -1,60 +0,0 @@
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Set JAVA_HOME here to override the environment setting
# JAVA_HOME=
# default settings for starting pulsar broker
# Log4j configuration file
# PULSAR_LOG_CONF=
# Logs location
# PULSAR_LOG_DIR=
# Configuration file of settings used in broker server
# PULSAR_BROKER_CONF=
# Configuration file of settings used in bookie server
# PULSAR_BOOKKEEPER_CONF=
# Configuration file of settings used in zookeeper server
# PULSAR_ZK_CONF=
# Configuration file of settings used in global zookeeper server
# PULSAR_GLOBAL_ZK_CONF=
# Extra options to be passed to the jvm
PULSAR_MEM="-Xmx128m -XX:MaxDirectMemorySize=128m"
# Garbage collection options
PULSAR_GC=" -client "
# Extra options to be passed to the jvm
PULSAR_EXTRA_OPTS="${PULSAR_EXTRA_OPTS} ${PULSAR_MEM} ${PULSAR_GC} -Dio.netty.leakDetectionLevel=disabled"
# Add extra paths to the bookkeeper classpath
# PULSAR_EXTRA_CLASSPATH=
#Folder where the Bookie server PID file should be stored
#PULSAR_PID_DIR=
#Wait time before forcefully kill the pulser server instance, if the stop is not successful
#PULSAR_STOP_TIMEOUT=

View File

@ -1,7 +0,0 @@
{
"type": "STRING",
"schema": "",
"properties": {
"key1" : "value1"
}
}

View File

@ -1,735 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
### --- General broker settings --- ###
# Zookeeper quorum connection string
zookeeperServers=
# Configuration Store connection string
configurationStoreServers=
brokerServicePort=6650
# Port to use to server HTTP request
webServicePort=8080
# Hostname or IP address the service binds on, default is 0.0.0.0.
bindAddress=0.0.0.0
# Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getHostName() is used.
advertisedAddress=
# Number of threads to use for Netty IO. Default is set to 2 * Runtime.getRuntime().availableProcessors()
numIOThreads=
# Number of threads to use for HTTP requests processing. Default is set to 2 * Runtime.getRuntime().availableProcessors()
numHttpServerThreads=
# Name of the cluster to which this broker belongs to
clusterName=standalone
# Enable cluster's failure-domain which can distribute brokers into logical region
failureDomainsEnabled=false
# Zookeeper session timeout in milliseconds
zooKeeperSessionTimeoutMillis=60000
# ZooKeeper operation timeout in seconds
zooKeeperOperationTimeoutSeconds=60
# Time to wait for broker graceful shutdown. After this time elapses, the process will be killed
brokerShutdownTimeoutMs=60000
# Enable backlog quota check. Enforces action on topic when the quota is reached
backlogQuotaCheckEnabled=true
# How often to check for topics that have reached the quota
backlogQuotaCheckIntervalInSeconds=60
# Default per-topic backlog quota limit
backlogQuotaDefaultLimitGB=10
# Default ttl for namespaces if ttl is not already configured at namespace policies. (disable default-ttl with value 0)
ttlDurationDefaultInSeconds=0
# Enable the deletion of inactive topics
brokerDeleteInactiveTopicsEnabled=true
# How often to check for inactive topics
brokerDeleteInactiveTopicsFrequencySeconds=60
# How frequently to proactively check and purge expired messages
messageExpiryCheckIntervalInMinutes=5
# How long to delay rewinding cursor and dispatching messages when active consumer is changed
activeConsumerFailoverDelayTimeMillis=1000
# How long to delete inactive subscriptions from last consuming
# When it is 0, inactive subscriptions are not deleted automatically
subscriptionExpirationTimeMinutes=0
# Enable subscription message redelivery tracker to send redelivery count to consumer (default is enabled)
subscriptionRedeliveryTrackerEnabled=true
# How frequently to proactively check and purge expired subscription
subscriptionExpiryCheckIntervalInMinutes=5
# Set the default behavior for message deduplication in the broker
# This can be overridden per-namespace. If enabled, broker will reject
# messages that were already stored in the topic
brokerDeduplicationEnabled=false
# Maximum number of producer information that it's going to be
# persisted for deduplication purposes
brokerDeduplicationMaxNumberOfProducers=10000
# Number of entries after which a dedup info snapshot is taken.
# A bigger interval will lead to less snapshots being taken though it would
# increase the topic recovery time, when the entries published after the
# snapshot need to be replayed
brokerDeduplicationEntriesInterval=1000
# Time of inactivity after which the broker will discard the deduplication information
# relative to a disconnected producer. Default is 6 hours.
brokerDeduplicationProducerInactivityTimeoutMinutes=360
# When a namespace is created without specifying the number of bundle, this
# value will be used as the default
defaultNumberOfNamespaceBundles=4
# Enable check for minimum allowed client library version
clientLibraryVersionCheckEnabled=false
# Path for the file used to determine the rotation status for the broker when responding
# to service discovery health checks
statusFilePath=/usr/local/apache/htdocs
# Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending
# messages to consumer once, this limit reaches until consumer starts acknowledging messages back
# Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restriction
maxUnackedMessagesPerConsumer=50000
# Max number of unacknowledged messages allowed per shared subscription. Broker will stop dispatching messages to
# all consumers of the subscription once this limit reaches until consumer starts acknowledging messages back and
# unack count reaches to limit/2. Using a value of 0, is disabling unackedMessage-limit
# check and dispatcher can dispatch messages without any restriction
maxUnackedMessagesPerSubscription=200000
# Max number of unacknowledged messages allowed per broker. Once this limit reaches, broker will stop dispatching
# messages to all shared subscription which has higher number of unack messages until subscriptions start
# acknowledging messages back and unack count reaches to limit/2. Using a value of 0, is disabling
# unackedMessage-limit check and broker doesn't block dispatchers
maxUnackedMessagesPerBroker=0
# Once broker reaches maxUnackedMessagesPerBroker limit, it blocks subscriptions which has higher unacked messages
# than this percentage limit and subscription will not receive any new messages until that subscription acks back
# limit/2 messages
maxUnackedMessagesPerSubscriptionOnBrokerBlocked=0.16
# Tick time to schedule task that checks topic publish rate limiting across all topics
# Reducing to lower value can give more accuracy while throttling publish but
# it uses more CPU to perform frequent check. (Disable publish throttling with value 0)
topicPublisherThrottlingTickTimeMillis=2
# Tick time to schedule task that checks broker publish rate limiting across all topics
# Reducing to lower value can give more accuracy while throttling publish but
# it uses more CPU to perform frequent check. (Disable publish throttling with value 0)
brokerPublisherThrottlingTickTimeMillis=50
# Max Rate(in 1 seconds) of Message allowed to publish for a broker if broker publish rate limiting enabled
# (Disable message rate limit with value 0)
brokerPublisherThrottlingMaxMessageRate=0
# Max Rate(in 1 seconds) of Byte allowed to publish for a broker if broker publish rate limiting enabled
# (Disable byte rate limit with value 0)
brokerPublisherThrottlingMaxByteRate=0
# Default messages per second dispatch throttling-limit for every topic. Using a value of 0, is disabling default
# message dispatch-throttling
dispatchThrottlingRatePerTopicInMsg=0
# Default bytes per second dispatch throttling-limit for every topic. Using a value of 0, is disabling
# default message-byte dispatch-throttling
dispatchThrottlingRatePerTopicInByte=0
# Dispatch rate-limiting relative to publish rate.
# (Enabling flag will make broker to dynamically update dispatch-rate relatively to publish-rate:
# throttle-dispatch-rate = (publish-rate + configured dispatch-rate).
dispatchThrottlingRateRelativeToPublishRate=false
# By default we enable dispatch-throttling for both caught up consumers as well as consumers who have
# backlog.
dispatchThrottlingOnNonBacklogConsumerEnabled=true
# Max number of concurrent lookup request broker allows to throttle heavy incoming lookup traffic
maxConcurrentLookupRequest=50000
# Max number of concurrent topic loading request broker allows to control number of zk-operations
maxConcurrentTopicLoadRequest=5000
# Max concurrent non-persistent message can be processed per connection
maxConcurrentNonPersistentMessagePerConnection=1000
# Number of worker threads to serve non-persistent topic
numWorkerThreadsForNonPersistentTopic=8
# Enable broker to load persistent topics
enablePersistentTopics=true
# Enable broker to load non-persistent topics
enableNonPersistentTopics=true
# Max number of producers allowed to connect to topic. Once this limit reaches, Broker will reject new producers
# until the number of connected producers decrease.
# Using a value of 0, is disabling maxProducersPerTopic-limit check.
maxProducersPerTopic=0
# Max number of consumers allowed to connect to topic. Once this limit reaches, Broker will reject new consumers
# until the number of connected consumers decrease.
# Using a value of 0, is disabling maxConsumersPerTopic-limit check.
maxConsumersPerTopic=0
# Max number of consumers allowed to connect to subscription. Once this limit reaches, Broker will reject new consumers
# until the number of connected consumers decrease.
# Using a value of 0, is disabling maxConsumersPerSubscription-limit check.
maxConsumersPerSubscription=0
### --- TLS --- ###
# Deprecated - Use webServicePortTls and brokerServicePortTls instead
tlsEnabled=false
# Tls cert refresh duration in seconds (set 0 to check on every new connection)
tlsCertRefreshCheckDurationSec=300
# Path for the TLS certificate file
tlsCertificateFilePath=
# Path for the TLS private key file
tlsKeyFilePath=
# Path for the trusted TLS certificate file.
# This cert is used to verify that any certs presented by connecting clients
# are signed by a certificate authority. If this verification
# fails, then the certs are untrusted and the connections are dropped.
tlsTrustCertsFilePath=
# Accept untrusted TLS certificate from client.
# If true, a client with a cert which cannot be verified with the
# 'tlsTrustCertsFilePath' cert will allowed to connect to the server,
# though the cert will not be used for client authentication.
tlsAllowInsecureConnection=false
# Specify the tls protocols the broker will use to negotiate during TLS handshake
# (a comma-separated list of protocol names).
# Examples:- [TLSv1.2, TLSv1.1, TLSv1]
tlsProtocols=
# Specify the tls cipher the broker will use to negotiate during TLS Handshake
# (a comma-separated list of ciphers).
# Examples:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]
tlsCiphers=
# Trusted client certificates are required for to connect TLS
# Reject the Connection if the Client Certificate is not trusted.
# In effect, this requires that all connecting clients perform TLS client
# authentication.
tlsRequireTrustedClientCertOnConnect=false
### --- KeyStore TLS config variables --- ###
# Enable TLS with KeyStore type configuration in broker.
tlsEnabledWithKeyStore=false
# TLS Provider for KeyStore type
tlsProvider=
# TLS KeyStore type configuration in broker: JKS, PKCS12
tlsKeyStoreType=JKS
# TLS KeyStore path in broker
tlsKeyStore=
# TLS KeyStore password for broker
tlsKeyStorePassword=
# TLS TrustStore type configuration in broker: JKS, PKCS12
tlsTrustStoreType=JKS
# TLS TrustStore path in broker
tlsTrustStore=
# TLS TrustStore password for broker
tlsTrustStorePassword=
# Whether internal client use KeyStore type to authenticate with Pulsar brokers
brokerClientTlsEnabledWithKeyStore=false
# The TLS Provider used by internal client to authenticate with other Pulsar brokers
brokerClientSslProvider=
# TLS TrustStore type configuration for internal client: JKS, PKCS12
# used by the internal client to authenticate with Pulsar brokers
brokerClientTlsTrustStoreType=JKS
# TLS TrustStore path for internal client
# used by the internal client to authenticate with Pulsar brokers
brokerClientTlsTrustStore=
# TLS TrustStore password for internal client,
# used by the internal client to authenticate with Pulsar brokers
brokerClientTlsTrustStorePassword=
# Specify the tls cipher the internal client will use to negotiate during TLS Handshake
# (a comma-separated list of ciphers)
# e.g. [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256].
# used by the internal client to authenticate with Pulsar brokers
brokerClientTlsCiphers=
# Specify the tls protocols the broker will use to negotiate during TLS handshake
# (a comma-separated list of protocol names).
# e.g. [TLSv1.2, TLSv1.1, TLSv1]
# used by the internal client to authenticate with Pulsar brokers
brokerClientTlsProtocols=
### --- Authentication --- ###
# Role names that are treated as "proxy roles". If the broker sees a request with
#role as proxyRoles - it will demand to see a valid original principal.
proxyRoles=
# If this flag is set then the broker authenticates the original Auth data
# else it just accepts the originalPrincipal and authorizes it (if required).
authenticateOriginalAuthData=false
# Enable authentication
authenticationEnabled=false
# Autentication provider name list, which is comma separated list of class names
authenticationProviders=
# Enforce authorization
authorizationEnabled=false
# Authorization provider fully qualified class-name
authorizationProvider=org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider
# Allow wildcard matching in authorization
# (wildcard matching only applicable if wildcard-char:
# * presents at first or last position eg: *.pulsar.service, pulsar.service.*)
authorizationAllowWildcardsMatching=false
# Role names that are treated as "super-user", meaning they will be able to do all admin
# operations and publish/consume from all topics
superUserRoles=
# Authentication settings of the broker itself. Used when the broker connects to other brokers,
# either in same or other clusters
brokerClientAuthenticationPlugin=
brokerClientAuthenticationParameters=
# Supported Athenz provider domain names(comma separated) for authentication
athenzDomainNames=
# When this parameter is not empty, unauthenticated users perform as anonymousUserRole
anonymousUserRole=
# The token "claim" that will be interpreted as the authentication "role" or "principal" by AuthenticationProviderToken (defaults to "sub" if blank)
tokenAuthClaim=
### --- BookKeeper Client --- ###
# Authentication plugin to use when connecting to bookies
bookkeeperClientAuthenticationPlugin=
# BookKeeper auth plugin implementatation specifics parameters name and values
bookkeeperClientAuthenticationParametersName=
bookkeeperClientAuthenticationParameters=
# Timeout for BK add / read operations
bookkeeperClientTimeoutInSeconds=60
# Speculative reads are initiated if a read request doesn't complete within a certain time
# Using a value of 0, is disabling the speculative reads
bookkeeperClientSpeculativeReadTimeoutInMillis=0
# Enable bookies health check. Bookies that have more than the configured number of failure within
# the interval will be quarantined for some time. During this period, new ledgers won't be created
# on these bookies
bookkeeperClientHealthCheckEnabled=true
bookkeeperClientHealthCheckIntervalSeconds=60
bookkeeperClientHealthCheckErrorThresholdPerInterval=5
bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800
# Enable rack-aware bookie selection policy. BK will chose bookies from different racks when
# forming a new bookie ensemble
bookkeeperClientRackawarePolicyEnabled=true
# Enable region-aware bookie selection policy. BK will chose bookies from
# different regions and racks when forming a new bookie ensemble.
# If enabled, the value of bookkeeperClientRackawarePolicyEnabled is ignored
bookkeeperClientRegionawarePolicyEnabled=false
# Enable/disable reordering read sequence on reading entries.
bookkeeperClientReorderReadSequenceEnabled=false
# Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie
# outside the specified groups will not be used by the broker
bookkeeperClientIsolationGroups=
# Enable bookie secondary-isolation group if bookkeeperClientIsolationGroups doesn't
# have enough bookie available.
bookkeeperClientSecondaryIsolationGroups=
# Minimum bookies that should be available as part of bookkeeperClientIsolationGroups
# else broker will include bookkeeperClientSecondaryIsolationGroups bookies in isolated list.
bookkeeperClientMinAvailableBookiesInIsolationGroups=
# Set the client security provider factory class name.
# Default: org.apache.bookkeeper.tls.TLSContextFactory
bookkeeperTLSProviderFactoryClass=org.apache.bookkeeper.tls.TLSContextFactory
# Enable tls authentication with bookie
bookkeeperTLSClientAuthentication=false
# Supported type: PEM, JKS, PKCS12. Default value: PEM
bookkeeperTLSKeyFileType=PEM
#Supported type: PEM, JKS, PKCS12. Default value: PEM
bookkeeperTLSTrustCertTypes=PEM
# Path to file containing keystore password, if the client keystore is password protected.
bookkeeperTLSKeyStorePasswordPath=
# Path to file containing truststore password, if the client truststore is password protected.
bookkeeperTLSTrustStorePasswordPath=
# Path for the TLS private key file
bookkeeperTLSKeyFilePath=
# Path for the TLS certificate file
bookkeeperTLSCertificateFilePath=
# Path for the trusted TLS certificate file
bookkeeperTLSTrustCertsFilePath=
# Enable/disable disk weight based placement. Default is false
bookkeeperDiskWeightBasedPlacementEnabled=false
# Set the interval to check the need for sending an explicit LAC
# A value of '0' disables sending any explicit LACs. Default is 0.
bookkeeperExplicitLacIntervalInMills=0
### --- Managed Ledger --- ###
# Number of bookies to use when creating a ledger
managedLedgerDefaultEnsembleSize=1
# Number of copies to store for each message
managedLedgerDefaultWriteQuorum=1
# Number of guaranteed copies (acks to wait before write is complete)
managedLedgerDefaultAckQuorum=1
# Default type of checksum to use when writing to BookKeeper. Default is "CRC32C"
# Other possible options are "CRC32", "MAC" or "DUMMY" (no checksum).
managedLedgerDigestType=CRC32C
# Number of threads to be used for managed ledger tasks dispatching
managedLedgerNumWorkerThreads=4
# Number of threads to be used for managed ledger scheduled tasks
managedLedgerNumSchedulerThreads=4
# Amount of memory to use for caching data payload in managed ledger. This memory
# is allocated from JVM direct memory and it's shared across all the topics
# running in the same broker. By default, uses 1/5th of available direct memory
managedLedgerCacheSizeMB=
# Whether we should make a copy of the entry payloads when inserting in cache
managedLedgerCacheCopyEntries=false
# Threshold to which bring down the cache level when eviction is triggered
managedLedgerCacheEvictionWatermark=0.9
# Configure the cache eviction frequency for the managed ledger cache (evictions/sec)
managedLedgerCacheEvictionFrequency=100.0
# All entries that have stayed in cache for more than the configured time, will be evicted
managedLedgerCacheEvictionTimeThresholdMillis=1000
# Configure the threshold (in number of entries) from where a cursor should be considered 'backlogged'
# and thus should be set as inactive.
managedLedgerCursorBackloggedThreshold=1000
# Rate limit the amount of writes generated by consumer acking the messages
managedLedgerDefaultMarkDeleteRateLimit=0.1
# Max number of entries to append to a ledger before triggering a rollover
# A ledger rollover is triggered on these conditions
# * Either the max rollover time has been reached
# * or max entries have been written to the ledged and at least min-time
# has passed
managedLedgerMaxEntriesPerLedger=50000
# Minimum time between ledger rollover for a topic
managedLedgerMinLedgerRolloverTimeMinutes=10
# Maximum time before forcing a ledger rollover for a topic
managedLedgerMaxLedgerRolloverTimeMinutes=240
# Max number of entries to append to a cursor ledger
managedLedgerCursorMaxEntriesPerLedger=50000
# Max time before triggering a rollover on a cursor ledger
managedLedgerCursorRolloverTimeInSeconds=14400
# Max number of "acknowledgment holes" that are going to be persistently stored.
# When acknowledging out of order, a consumer will leave holes that are supposed
# to be quickly filled by acking all the messages. The information of which
# messages are acknowledged is persisted by compressing in "ranges" of messages
# that were acknowledged. After the max number of ranges is reached, the information
# will only be tracked in memory and messages will be redelivered in case of
# crashes.
managedLedgerMaxUnackedRangesToPersist=10000
# Max number of "acknowledgment holes" that can be stored in Zookeeper. If number of unack message range is higher
# than this limit then broker will persist unacked ranges into bookkeeper to avoid additional data overhead into
# zookeeper.
managedLedgerMaxUnackedRangesToPersistInZooKeeper=1000
# Skip reading non-recoverable/unreadable data-ledger under managed-ledger's list. It helps when data-ledgers gets
# corrupted at bookkeeper and managed-cursor is stuck at that ledger.
autoSkipNonRecoverableData=false
# operation timeout while updating managed-ledger metadata.
managedLedgerMetadataOperationsTimeoutSeconds=60
# Read entries timeout when broker tries to read messages from bookkeeper.
managedLedgerReadEntryTimeoutSeconds=0
# Add entry timeout when broker tries to publish message to bookkeeper (0 to disable it).
managedLedgerAddEntryTimeoutSeconds=0
# Use Open Range-Set to cache unacked messages
managedLedgerUnackedRangesOpenCacheSetEnabled=true
### --- Load balancer --- ###
loadManagerClassName=org.apache.pulsar.broker.loadbalance.NoopLoadManager
# Enable load balancer
loadBalancerEnabled=false
# Percentage of change to trigger load report update
loadBalancerReportUpdateThresholdPercentage=10
# maximum interval to update load report
loadBalancerReportUpdateMaxIntervalMinutes=15
# Frequency of report to collect
loadBalancerHostUsageCheckIntervalMinutes=1
# Load shedding interval. Broker periodically checks whether some traffic should be offload from
# some over-loaded broker to other under-loaded brokers
loadBalancerSheddingIntervalMinutes=1
# Prevent the same topics to be shed and moved to other broker more that once within this timeframe
loadBalancerSheddingGracePeriodMinutes=30
# Usage threshold to allocate max number of topics to broker
loadBalancerBrokerMaxTopics=50000
# Interval to flush dynamic resource quota to ZooKeeper
loadBalancerResourceQuotaUpdateIntervalMinutes=15
# enable/disable namespace bundle auto split
loadBalancerAutoBundleSplitEnabled=true
# enable/disable automatic unloading of split bundles
loadBalancerAutoUnloadSplitBundlesEnabled=true
# maximum topics in a bundle, otherwise bundle split will be triggered
loadBalancerNamespaceBundleMaxTopics=1000
# maximum sessions (producers + consumers) in a bundle, otherwise bundle split will be triggered
loadBalancerNamespaceBundleMaxSessions=1000
# maximum msgRate (in + out) in a bundle, otherwise bundle split will be triggered
loadBalancerNamespaceBundleMaxMsgRate=30000
# maximum bandwidth (in + out) in a bundle, otherwise bundle split will be triggered
loadBalancerNamespaceBundleMaxBandwidthMbytes=100
# maximum number of bundles in a namespace
loadBalancerNamespaceMaximumBundles=128
### --- Replication --- ###
# Enable replication metrics
replicationMetricsEnabled=true
# Max number of connections to open for each broker in a remote cluster
# More connections host-to-host lead to better throughput over high-latency
# links.
replicationConnectionsPerBroker=16
# Replicator producer queue size
replicationProducerQueueSize=1000
# Default message retention time
defaultRetentionTimeInMinutes=0
# Default retention size
defaultRetentionSizeInMB=0
# How often to check whether the connections are still alive
keepAliveIntervalSeconds=30
### --- WebSocket --- ###
# Enable the WebSocket API service in broker
webSocketServiceEnabled=true
# Number of IO threads in Pulsar Client used in WebSocket proxy
webSocketNumIoThreads=8
# Number of connections per Broker in Pulsar Client used in WebSocket proxy
webSocketConnectionsPerBroker=8
# Time in milliseconds that idle WebSocket session times out
webSocketSessionIdleTimeoutMillis=300000
### --- Metrics --- ###
# Enable topic level metrics
exposeTopicLevelMetricsInPrometheus=true
# Classname of Pluggable JVM GC metrics logger that can log GC specific metrics
# jvmGCMetricsLoggerClassName=
### --- Broker Web Stats --- ###
# Enable topic level metrics
exposePublisherStats=true
# Enable expose the precise backlog stats.
# Set false to use published counter and consumed counter to calculate, this would be more efficient but may be inaccurate.
# Default is false.
exposePreciseBacklogInPrometheus=false
### --- Deprecated config variables --- ###
# Deprecated. Use configurationStoreServers
globalZookeeperServers=
# Deprecated. Use brokerDeleteInactiveTopicsFrequencySeconds
brokerServicePurgeInactiveFrequencyInSeconds=60
### --- BookKeeper Configuration --- #####
ledgerStorageClass=org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage
# Size of Write Cache. Memory is allocated from JVM direct memory.
# Write cache is used to buffer entries before flushing into the entry log
# For good performance, it should be big enough to hold a substantial amount
# of entries in the flush interval
# By default it will be allocated to 1/4th of the available direct memory
dbStorage_writeCacheMaxSizeMb=
# Size of Read cache. Memory is allocated from JVM direct memory.
# This read cache is pre-filled doing read-ahead whenever a cache miss happens
# By default it will be allocated to 1/4th of the available direct memory
dbStorage_readAheadCacheMaxSizeMb=
# How many entries to pre-fill in cache after a read cache miss
dbStorage_readAheadCacheBatchSize=1000
flushInterval=60000
## RocksDB specific configurations
## DbLedgerStorage uses RocksDB to store the indexes from
## (ledgerId, entryId) -> (entryLog, offset)
# Size of RocksDB block-cache. For best performance, this cache
# should be big enough to hold a significant portion of the index
# database which can reach ~2GB in some cases
# Default is to use 10% of the direct memory size
dbStorage_rocksDB_blockCacheSize=
# Other RocksDB specific tunables
dbStorage_rocksDB_writeBufferSizeMB=4
dbStorage_rocksDB_sstSizeInMB=4
dbStorage_rocksDB_blockSize=4096
dbStorage_rocksDB_bloomFilterBitsPerKey=10
dbStorage_rocksDB_numLevels=-1
dbStorage_rocksDB_numFilesInLevel0=4
dbStorage_rocksDB_maxSizeInLevel1MB=256
# Maximum latency to impose on a journal write to achieve grouping
journalMaxGroupWaitMSec=1
# Should the data be fsynced on journal before acknowledgment.
journalSyncData=false
# For each ledger dir, maximum disk space which can be used.
# Default is 0.95f. i.e. 95% of disk can be used at most after which nothing will
# be written to that partition. If all ledger dir partions are full, then bookie
# will turn to readonly mode if 'readOnlyModeEnabled=true' is set, else it will
# shutdown.
# Valid values should be in between 0 and 1 (exclusive).
diskUsageThreshold=0.99
# The disk free space low water mark threshold.
# Disk is considered full when usage threshold is exceeded.
# Disk returns back to non-full state when usage is below low water mark threshold.
# This prevents it from going back and forth between these states frequently
# when concurrent writes and compaction are happening. This also prevent bookie from
# switching frequently between read-only and read-writes states in the same cases.
diskUsageWarnThreshold=0.99
# Whether the bookie allowed to use a loopback interface as its primary
# interface(i.e. the interface it uses to establish its identity)?
# By default, loopback interfaces are not allowed as the primary
# interface.
# Using a loopback interface as the primary interface usually indicates
# a configuration error. For example, its fairly common in some VPS setups
# to not configure a hostname, or to have the hostname resolve to
# 127.0.0.1. If this is the case, then all bookies in the cluster will
# establish their identities as 127.0.0.1:3181, and only one will be able
# to join the cluster. For VPSs configured like this, you should explicitly
# set the listening interface.
allowLoopback=true
# How long the interval to trigger next garbage collection, in milliseconds
# Since garbage collection is running in background, too frequent gc
# will heart performance. It is better to give a higher number of gc
# interval if there is enough disk capacity.
gcWaitTime=300000
# Enable topic auto creation if new producer or consumer connected (disable auto creation with value false)
allowAutoTopicCreation=true
# The type of topic that is allowed to be automatically created.(partitioned/non-partitioned)
allowAutoTopicCreationType=non-partitioned
# The number of partitioned topics that is allowed to be automatically created if allowAutoTopicCreationType is partitioned.
defaultNumPartitions=1
### --- Transaction config variables --- ###
transactionMetadataStoreProviderClassName=org.apache.pulsar.transaction.coordinator.impl.InMemTransactionMetadataStoreProvider

View File

@ -1,120 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
### --- Web Socket proxy settings --- ###
# Configuration Store connection string
configurationStoreServers=
# Zookeeper session timeout in milliseconds
zooKeeperSessionTimeoutMillis=30000
# Pulsar cluster url to connect to broker (optional if configurationStoreServers present)
serviceUrl=
serviceUrlTls=
brokerServiceUrl=
brokerServiceUrlTls=
# Port to use to server HTTP request
webServicePort=8080
# Port to use to server HTTPS request
webServicePortTls=
# Path for the file used to determine the rotation status for the proxy-instance when responding
# to service discovery health checks
statusFilePath=
# Hostname or IP address the service binds on, default is 0.0.0.0.
bindAddress=0.0.0.0
# Name of the pulsar cluster to connect to
clusterName=
# Number of IO threads in Pulsar Client used in WebSocket proxy
webSocketNumIoThreads=8
# Number of threads to use in HTTP server. Default is Runtime.getRuntime().availableProcessors()
numHttpServerThreads=
# Number of connections per Broker in Pulsar Client used in WebSocket proxy
webSocketConnectionsPerBroker=8
# Time in milliseconds that idle WebSocket session times out
webSocketSessionIdleTimeoutMillis=300000
### --- Authentication --- ###
# Enable authentication
authenticationEnabled=false
# Autentication provider name list, which is comma separated list of class names
authenticationProviders=
# Enforce authorization
authorizationEnabled=false
# Authorization provider fully qualified class-name
authorizationProvider=org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider
# Allow wildcard matching in authorization
# (wildcard matching only applicable if wildcard-char:
# * presents at first or last position eg: *.pulsar.service, pulsar.service.*)
authorizationAllowWildcardsMatching=false
# Role names that are treated as "super-user", meaning they will be able to do all admin
# operations and publish/consume from all topics
superUserRoles=
# Authentication settings of the proxy itself. Used to connect to brokers
brokerClientTlsEnabled=false
brokerClientAuthenticationPlugin=
brokerClientAuthenticationParameters=
brokerClientTrustCertsFilePath=
# When this parameter is not empty, unauthenticated users perform as anonymousUserRole
anonymousUserRole=
### --- TLS --- ###
# Deprecated - use webServicePortTls and brokerClientTlsEnabled instead
tlsEnabled=false
# Accept untrusted TLS certificate from client
tlsAllowInsecureConnection=false
# Path for the TLS certificate file
tlsCertificateFilePath=
# Path for the TLS private key file
tlsKeyFilePath=
# Path for the trusted TLS certificate file
tlsTrustCertsFilePath=
# Specify whether Client certificates are required for TLS
# Reject the Connection if the Client Certificate is not trusted.
tlsRequireTrustedClientCertOnConnect=false
# Tls cert refresh duration in seconds (set 0 to check on every new connection)
tlsCertRefreshCheckDurationSec=300
### --- Deprecated config variables --- ###
# Deprecated. Use configurationStoreServers
globalZookeeperServers=

View File

@ -1,56 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
dataDir=data/zookeeper
# the port at which the clients will connect
clientPort=2181
# the port at which the admin will listen
admin.enableServer=true
admin.serverPort=9990
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
autopurge.purgeInterval=1
# Requires updates to be synced to media of the transaction log before finishing
# processing the update. If this option is set to 'no', ZooKeeper will not require
# updates to be synced to the media.
# WARNING: it's not recommended to run a production ZK cluster with forceSync disabled.
forceSync=yes

View File

@ -1,39 +0,0 @@
<configuration>
<appender name="CONSOLE"
class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n</pattern>
</encoder>
</appender>
<root level="warn">
<appender-ref ref="CONSOLE" />
</root>
<!-- Set up the default logging to be INFO level, then override specific
units -->
<logger name="io.debezium" level="info" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.embedded.EmbeddedEngine$EmbeddedConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.converters.CloudEventsConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="org.apache.kafka.connect.json.JsonConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.confluent"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
</configuration>

View File

@ -1,186 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-redis</artifactId>
<name>Debezium Server Redis Sink Adapter</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-storage-redis</artifactId>
</dependency>
<!-- Target systems -->
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-mysql</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-mysql</artifactId>
<scope>runtime</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<reuseForks>false</reuseForks>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
<resources>
<!-- Apply the properties set in the POM to the resource files -->
<resource>
<filtering>true</filtering>
<directory>src/main/resources</directory>
<includes>
<include>**/build.properties</include>
</includes>
</resource>
</resources>
</build>
<profiles>
<profile>
<id>quick</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>quick</name>
</property>
</activation>
<properties>
<skipITs>true</skipITs>
<docker.skip>true</docker.skip>
</properties>
</profile>
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Do not perform any Docker-related functionality
To use, specify "-DskipITs" on the Maven command line.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<profile>
<id>skip-integration-tests</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>skipITs</name>
</property>
</activation>
<properties>
<docker.skip>true</docker.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,131 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Supplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.storage.redis.RedisClient;
import io.debezium.util.IoUtil;
import io.smallrye.mutiny.tuples.Tuple2;
public class RedisMemoryThreshold {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisMemoryThreshold.class);
private static final String INFO_MEMORY = "memory";
private static final String INFO_MEMORY_SECTION_MAXMEMORY = "maxmemory";
private static final String INFO_MEMORY_SECTION_USEDMEMORY = "used_memory";
private static final Supplier<Boolean> MEMORY_OK = () -> true;
private RedisClient client;
private int memoryThreshold;
private long memoryLimit;
private Supplier<Boolean> isMemoryOk;
public RedisMemoryThreshold(RedisClient client, RedisStreamChangeConsumerConfig config) {
this.client = client;
this.memoryThreshold = config.getMemoryThreshold();
this.memoryLimit = 1024L * 1024 * config.getMemoryLimitMb();
if (memoryThreshold == 0 || memoryTuple(memoryLimit) == null) {
disable();
}
else {
this.isMemoryOk = () -> isMemoryOk();
}
}
public boolean check() {
return isMemoryOk.get();
}
private boolean isMemoryOk() {
Tuple2<Long, Long> memoryTuple = memoryTuple(memoryLimit);
if (memoryTuple == null) {
disable();
return true;
}
long maxMemory = memoryTuple.getItem2();
if (maxMemory > 0) {
long usedMemory = memoryTuple.getItem1();
long percentage = usedMemory * 100 / maxMemory;
if (percentage >= memoryThreshold) {
LOGGER.warn("Memory threshold percentage was reached (current: {}%, configured: {}%, used_memory: {}, maxmemory: {}).", percentage, memoryThreshold,
usedMemory, maxMemory);
return false;
}
}
return true;
}
private Tuple2<Long, Long> memoryTuple(long defaultMaxMemory) {
String memory = client.info(INFO_MEMORY);
Map<String, String> infoMemory = new HashMap<>();
try {
IoUtil.readLines(new ByteArrayInputStream(memory.getBytes(StandardCharsets.UTF_8)), line -> {
String[] pair = line.split(":");
if (pair.length == 2) {
infoMemory.put(pair[0], pair[1]);
}
});
}
catch (IOException e) {
LOGGER.error("Cannot parse Redis 'info memory' result '{}'.", memory, e);
return null;
}
Long usedMemory = parseLong(INFO_MEMORY_SECTION_USEDMEMORY, infoMemory.get(INFO_MEMORY_SECTION_USEDMEMORY));
if (usedMemory == null) {
return null;
}
Long maxMemory = parseLong(INFO_MEMORY_SECTION_MAXMEMORY, infoMemory.get(INFO_MEMORY_SECTION_MAXMEMORY));
if (maxMemory == null) {
if (defaultMaxMemory == 0) {
LOGGER.warn("Memory limit is disabled '{}'.", defaultMaxMemory);
return null;
}
LOGGER.debug("Using memory limit with value '{}'.", defaultMaxMemory);
maxMemory = defaultMaxMemory;
}
else if (maxMemory == 0) {
LOGGER.debug("Redis 'info memory' field '{}' is {}. Consider configuring it.", INFO_MEMORY_SECTION_MAXMEMORY, maxMemory);
if (defaultMaxMemory > 0) {
maxMemory = defaultMaxMemory;
LOGGER.debug("Using memory limit with value '{}'.", defaultMaxMemory);
}
}
return Tuple2.of(usedMemory, maxMemory);
}
private void disable() {
isMemoryOk = MEMORY_OK;
LOGGER.warn("Memory threshold percentage check is disabled!");
}
private Long parseLong(String name, String value) {
try {
return Long.valueOf(value);
}
catch (NumberFormatException e) {
LOGGER.debug("Cannot parse Redis 'info memory' field '{}' with value '{}'.", name, value);
}
return null;
}
}

View File

@ -1,26 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Deprecated and replaced with {@link io.debezium.storage.redis.offset.RedisOffsetBackingStore}
*
*/
@Deprecated
public class RedisOffsetBackingStore extends io.debezium.storage.redis.offset.RedisOffsetBackingStore {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisOffsetBackingStore.class);
public RedisOffsetBackingStore() {
LOGGER.warn("Class '{}' is deprecated and scheduled for removal, please use '{}'",
RedisOffsetBackingStore.class.getName(),
io.debezium.storage.redis.offset.RedisOffsetBackingStore.class.getName());
}
}

View File

@ -1,26 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Deprecated and replaced with {@link io.debezium.storage.redis.history.RedisSchemaHistory}
*
*/
@Deprecated
public final class RedisSchemaHistory extends io.debezium.storage.redis.history.RedisSchemaHistory {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisOffsetBackingStore.class);
public RedisSchemaHistory() {
LOGGER.warn("Class '{}' is deprecated and scheduled for removal, please use '{}'",
RedisSchemaHistory.class.getName(),
io.debezium.storage.redis.history.RedisSchemaHistory.class.getName());
}
}

View File

@ -1,217 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import static io.debezium.server.redis.RedisStreamChangeConsumerConfig.MESSAGE_FORMAT_COMPACT;
import static io.debezium.server.redis.RedisStreamChangeConsumerConfig.MESSAGE_FORMAT_EXTENDED;
import java.time.Duration;
import java.util.AbstractMap.SimpleEntry;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.inject.Named;
import org.eclipse.microprofile.config.ConfigProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
import io.debezium.config.Configuration;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.storage.redis.RedisClient;
import io.debezium.storage.redis.RedisClientConnectionException;
import io.debezium.storage.redis.RedisConnection;
import io.debezium.util.DelayStrategy;
/**
* Implementation of the consumer that delivers the messages into Redis (stream) destination.
*
* @author M Sazzadul Hoque
* @author Yossi Shirizli
*/
@Named("redis")
@Dependent
public class RedisStreamChangeConsumer extends BaseChangeConsumer
implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisStreamChangeConsumer.class);
private static final String DEBEZIUM_REDIS_SINK_CLIENT_NAME = "debezium:redis:sink";
private static final String EXTENDED_MESSAGE_KEY_KEY = "key";
private static final String EXTENDED_MESSAGE_VALUE_KEY = "value";
private RedisClient client;
private BiFunction<String, String, Map<String, String>> recordMapFunction;
private RedisMemoryThreshold isMemoryOk;
private RedisStreamChangeConsumerConfig config;
@PostConstruct
void connect() {
Configuration configuration = Configuration.from(getConfigSubset(ConfigProvider.getConfig(), ""));
config = new RedisStreamChangeConsumerConfig(configuration);
String messageFormat = config.getMessageFormat();
if (MESSAGE_FORMAT_EXTENDED.equals(messageFormat)) {
recordMapFunction = (key, value) -> {
Map<String, String> recordMap = new LinkedHashMap<>(2);
recordMap.put(EXTENDED_MESSAGE_KEY_KEY, key);
recordMap.put(EXTENDED_MESSAGE_VALUE_KEY, value);
return recordMap;
};
}
else if (MESSAGE_FORMAT_COMPACT.equals(messageFormat)) {
recordMapFunction = Collections::singletonMap;
}
RedisConnection redisConnection = new RedisConnection(config.getAddress(), config.getUser(), config.getPassword(), config.getConnectionTimeout(),
config.getSocketTimeout(), config.isSslEnabled());
client = redisConnection.getRedisClient(DEBEZIUM_REDIS_SINK_CLIENT_NAME, config.isWaitEnabled(), config.getWaitTimeout(), config.isWaitRetryEnabled(),
config.getWaitRetryDelay());
isMemoryOk = new RedisMemoryThreshold(client, config);
}
@PreDestroy
void close() {
try {
if (client != null) {
client.close();
}
}
catch (Exception e) {
LOGGER.warn("Exception while closing Jedis: {}", client, e);
}
finally {
client = null;
}
}
/**
* Split collection to batches by batch size using a stream
*/
private <T> Stream<List<T>> batches(List<T> source, int length) {
if (source.isEmpty()) {
return Stream.empty();
}
int size = source.size();
int fullChunks = (size - 1) / length;
return IntStream.range(0, fullChunks + 1).mapToObj(
n -> source.subList(n * length, n == fullChunks ? size : (n + 1) * length));
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records,
RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
DelayStrategy delayStrategy = DelayStrategy.exponential(Duration.ofMillis(config.getInitialRetryDelay()), Duration.ofMillis(config.getMaxRetryDelay()));
LOGGER.trace("Handling a batch of {} records", records.size());
batches(records, config.getBatchSize()).forEach(batch -> {
boolean completedSuccessfully = false;
// Clone the batch and remove the records that have been successfully processed.
// Move to the next batch once this list is empty.
List<ChangeEvent<Object, Object>> clonedBatch = batch.stream().collect(Collectors.toList());
// As long as we failed to execute the current batch to the stream, we should retry if the reason was either a connection error or OOM in Redis.
while (!completedSuccessfully) {
if (client == null) {
// Try to reconnect
try {
connect();
continue; // Managed to establish a new connection to Redis, avoid a redundant retry
}
catch (Exception e) {
close();
LOGGER.error("Can't connect to Redis", e);
}
}
else if (isMemoryOk.check()) {
try {
LOGGER.trace("Preparing a Redis Pipeline of {} records", clonedBatch.size());
List<SimpleEntry<String, Map<String, String>>> recordsMap = new ArrayList<>(clonedBatch.size());
for (ChangeEvent<Object, Object> record : clonedBatch) {
String destination = streamNameMapper.map(record.destination());
String key = (record.key() != null) ? getString(record.key()) : config.getNullKey();
String value = (record.value() != null) ? getString(record.value()) : config.getNullValue();
Map<String, String> recordMap = recordMapFunction.apply(key, value);
recordsMap.add(new SimpleEntry<>(destination, recordMap));
}
List<String> responses = client.xadd(recordsMap);
List<ChangeEvent<Object, Object>> processedRecords = new ArrayList<ChangeEvent<Object, Object>>();
int index = 0;
int totalOOMResponses = 0;
for (String message : responses) {
// When Redis reaches its max memory limitation, an OOM error message will be retrieved.
// In this case, we will retry execute the failed commands, assuming some memory will be freed eventually as result
// of evicting elements from the stream by the target DB.
if (message.contains("OOM command not allowed when used memory > 'maxmemory'")) {
totalOOMResponses++;
}
else {
// Mark the record as processed
ChangeEvent<Object, Object> currentRecord = clonedBatch.get(index);
committer.markProcessed(currentRecord);
processedRecords.add(currentRecord);
}
index++;
}
clonedBatch.removeAll(processedRecords);
if (totalOOMResponses > 0) {
LOGGER.warn("Redis runs OOM, {} command(s) failed", totalOOMResponses);
}
if (clonedBatch.size() == 0) {
completedSuccessfully = true;
}
}
catch (RedisClientConnectionException jce) {
LOGGER.error("Connection error", jce);
close();
}
catch (Exception e) {
LOGGER.error("Unexpected Exception", e);
throw new DebeziumException(e);
}
}
else {
LOGGER.warn("Stopped consuming records!");
}
// Failed to execute the transaction, retry...
delayStrategy.sleepWhen(!completedSuccessfully);
}
});
// Mark the whole batch as finished once the sub batches completed
committer.markBatchFinished();
}
}

View File

@ -1,102 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.util.List;
import java.util.Set;
import io.debezium.config.Configuration;
import io.debezium.config.Field;
import io.debezium.config.Field.RangeValidator;
import io.debezium.storage.redis.RedisCommonConfig;
import io.debezium.util.Collect;
public class RedisStreamChangeConsumerConfig extends RedisCommonConfig {
private static final String PROP_PREFIX = "debezium.sink.";
private static final int DEFAULT_BATCH_SIZE = 500;
private static final Field PROP_BATCH_SIZE = Field.create(CONFIGURATION_FIELD_PREFIX_STRING + "batch.size")
.withDefault(DEFAULT_BATCH_SIZE);
private static final String DEFAULT_NULL_KEY = "default";
private static final Field PROP_NULL_KEY = Field.create(CONFIGURATION_FIELD_PREFIX_STRING + "null.key")
.withDefault(DEFAULT_NULL_KEY);
private static final String DEFAULT_NULL_VALUE = "default";
private static final Field PROP_NULL_VALUE = Field.create(CONFIGURATION_FIELD_PREFIX_STRING + "null.value")
.withDefault(DEFAULT_NULL_VALUE);
static final String MESSAGE_FORMAT_COMPACT = "compact";
static final String MESSAGE_FORMAT_EXTENDED = "extended";
private static final Field PROP_MESSAGE_FORMAT = Field.create(CONFIGURATION_FIELD_PREFIX_STRING + "message.format")
.withAllowedValues(Set.of(MESSAGE_FORMAT_COMPACT, MESSAGE_FORMAT_EXTENDED))
.withDefault(MESSAGE_FORMAT_COMPACT);
private static final int DEFAULT_MEMORY_THRESHOLD_PERCENTAGE = 85;
private static final Field PROP_MEMORY_THRESHOLD_PERCENTAGE = Field.create(CONFIGURATION_FIELD_PREFIX_STRING + "memory.threshold.percentage")
.withDefault(DEFAULT_MEMORY_THRESHOLD_PERCENTAGE)
.withValidation(RangeValidator.between(0, 100));
private static final int DEFAULT_MEMORY_LIMIT_MB = 0;
private static final Field PROP_MEMORY_LIMIT_MB = Field.create(CONFIGURATION_FIELD_PREFIX_STRING + "memory.limit.mb")
.withDefault(DEFAULT_MEMORY_LIMIT_MB)
.withValidation(RangeValidator.atLeast(0));
private int batchSize;
private String nullKey;
private String nullValue;
private String messageFormat;
private int memoryThreshold;
private int memoryLimitMb;
public RedisStreamChangeConsumerConfig(Configuration config) {
super(config, PROP_PREFIX);
}
@Override
protected void init(Configuration config) {
super.init(config);
batchSize = config.getInteger(PROP_BATCH_SIZE);
nullKey = config.getString(PROP_NULL_KEY);
nullValue = config.getString(PROP_NULL_VALUE);
messageFormat = config.getString(PROP_MESSAGE_FORMAT);
memoryThreshold = config.getInteger(PROP_MEMORY_THRESHOLD_PERCENTAGE);
memoryLimitMb = config.getInteger(PROP_MEMORY_LIMIT_MB);
}
@Override
protected List<Field> getAllConfigurationFields() {
List<Field> fields = Collect.arrayListOf(PROP_BATCH_SIZE, PROP_NULL_KEY, PROP_NULL_VALUE, PROP_MESSAGE_FORMAT, PROP_MEMORY_THRESHOLD_PERCENTAGE);
fields.addAll(super.getAllConfigurationFields());
return fields;
}
public int getBatchSize() {
return batchSize;
}
public String getNullKey() {
return nullKey;
}
public String getNullValue() {
return nullValue;
}
public String getMessageFormat() {
return messageFormat;
}
public int getMemoryThreshold() {
return memoryThreshold;
}
public int getMemoryLimitMb() {
return memoryLimitMb;
}
}

View File

@ -1,182 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.util.AbstractMap.SimpleEntry;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.junit.jupiter.api.Test;
import io.debezium.config.Configuration;
import io.debezium.storage.redis.RedisClient;
import io.debezium.util.Collect;
public class RedisMemoryThresholdTest {
private static final String _1MB = String.valueOf(1 * 1024 * 1024);
private static final String _2MB = String.valueOf(2 * 1024 * 1024);
private static final String _3MB = String.valueOf(3 * 1024 * 1024);
private static final String _4MB = String.valueOf(4 * 1024 * 1024);
@Test
public void testThresholdPercentageDisabled() {
int[] thresholdList = { 0 };
int[] limitMbList = { 0, 1, 2, 3, 4 };
String[] usedMemoryList = { "asd3f", "2048L", null, _1MB, _2MB, _3MB, _4MB };
String[] maxMemoryList = { "asd3f", "2048L", null, "0", _1MB, _2MB, _3MB, _4MB };
for (int threshold : thresholdList) {
for (int limit : limitMbList) {
for (String used : usedMemoryList) {
for (String max : maxMemoryList) {
isMemoryOk(threshold, limit, used, max, true);
}
}
}
}
}
@Test
public void testUsedMemoryBad() {
int[] thresholdList = { 1, 24, 25, 26, 49, 50, 52, 74, 75, 76, 99, 100 };
int[] limitMbList = { 0, 1, 2, 3, 4 };
String[] usedMemoryList = { "asd3f", "2048L" };
String[] maxMemoryList = { "asd3f", "2048L", null, "0", _1MB, _2MB, _3MB, _4MB };
for (int threshold : thresholdList) {
for (int limit : limitMbList) {
for (String used : usedMemoryList) {
for (String max : maxMemoryList) {
isMemoryOk(threshold, limit, used, max, true);
}
}
}
}
}
@Test
public void testUsedMemoryNotReported() {
int[] thresholdList = { 1, 24, 25, 26, 49, 50, 52, 74, 75, 76, 99, 100 };
int[] limitMbList = { 0, 1, 2, 3, 4 };
String[] usedMemoryList = { null };
String[] maxMemoryList = { "asd3f", "2048L", null, "0", _1MB, _2MB, _3MB, _4MB };
for (int threshold : thresholdList) {
for (int limit : limitMbList) {
for (String used : usedMemoryList) {
for (String max : maxMemoryList) {
isMemoryOk(threshold, limit, used, max, true);
}
}
}
}
}
@Test
public void testMemoryLimit() {
int[] thresholdList = { 1, 24, 25, 26, 49, 50, 52, 74, 75, 76, 99, 100 };
int[] limitMbList = { 0, 1, 2, 3, 4 };
String[] usedMemoryList = { _1MB, _2MB, _3MB, _4MB };
String[] maxMemoryList = { "asd3f", "2048L", null, "0" };
for (int threshold : thresholdList) {
for (int limit : limitMbList) {
for (String used : usedMemoryList) {
for (String max : maxMemoryList) {
isMemoryOk(threshold, limit, used, max, 0 == limit ? true : Long.parseLong(used) * 100 / (limit * 1024 * 1024) < threshold);
}
}
}
}
}
@Test
public void testMaxMemory() {
int[] thresholdList = { 1, 24, 25, 26, 49, 50, 52, 74, 75, 76, 99, 100 };
int[] limitMbList = { 0, 1, 2, 3, 4 };
String[] usedMemoryList = { _1MB, _2MB, _3MB, _4MB };
String[] maxMemoryList = { _1MB, _2MB, _3MB, _4MB };
for (int threshold : thresholdList) {
for (int limit : limitMbList) {
for (String used : usedMemoryList) {
for (String max : maxMemoryList) {
isMemoryOk(threshold, limit, used, max, Long.parseLong(used) * 100 / Long.parseLong(max) < threshold);
}
}
}
}
}
private void isMemoryOk(int threshold, int memoryLimitMb, String usedMemoryBytes, String maxMemoryBytes, boolean expectedResult) {
Configuration config = Configuration.from(Collect.hashMapOf("debezium.sink.redis.address", "localhost", "debezium.sink.redis.memory.threshold.percentage",
threshold, "debezium.sink.redis.memory.limit.mb", memoryLimitMb));
RedisMemoryThreshold isMemoryOk = new RedisMemoryThreshold(new RedisClientImpl(usedMemoryBytes, maxMemoryBytes), new RedisStreamChangeConsumerConfig(config));
Assert.assertEquals(String.format("isMemoryOk failed for threshold %s, limit %s, used %s, max %s)", threshold, memoryLimitMb, usedMemoryBytes, maxMemoryBytes),
expectedResult, isMemoryOk.check());
}
private static class RedisClientImpl implements RedisClient {
private String infoMemory;
private RedisClientImpl(String usedMemoryBytes, String maxMemoryBytes) {
this.infoMemory = (usedMemoryBytes == null ? "" : "used_memory:" + usedMemoryBytes + "\n") + (maxMemoryBytes == null ? "" : "maxmemory:" + maxMemoryBytes);
}
@Override
public String info(String section) {
return infoMemory;
}
@Override
public void disconnect() {
}
@Override
public void close() {
}
@Override
public String xadd(String key, Map<String, String> hash) {
return null;
}
@Override
public List<String> xadd(List<SimpleEntry<String, Map<String, String>>> hashes) {
return null;
}
@Override
public List<Map<String, String>> xrange(String key) {
return null;
}
@Override
public long xlen(String key) {
return 0;
}
@Override
public Map<String, String> hgetAll(String key) {
return null;
}
@Override
public long hset(byte[] key, byte[] field, byte[] value) {
return 0;
}
@Override
public long waitReplicas(int replicas, long timeout) {
return 0;
}
}
}

View File

@ -1,98 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Map;
import org.junit.jupiter.api.Test;
import io.debezium.connector.postgresql.connection.PostgresConnection;
import io.debezium.doc.FixFor;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusIntegrationTest;
import io.quarkus.test.junit.TestProfile;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Jedis;
/**
* Integration test that verifies reading and writing offsets from Redis key value store
*
* @author Oren Elias
*/
@QuarkusIntegrationTest
@TestProfile(RedisOffsetTestProfile.class)
@QuarkusTestResource(RedisTestResourceLifecycleManager.class)
public class RedisOffsetIT {
private static final int MESSAGE_COUNT = 4;
private static final String STREAM_NAME = "testc.inventory.customers";
private static final String OFFSETS_HASH_NAME = "metadata:debezium:offsets";
protected static Jedis jedis;
@Test
@FixFor("DBZ-4509")
public void testRedisStream() throws Exception {
jedis = new Jedis(HostAndPort.from(RedisTestResourceLifecycleManager.getRedisContainerAddress()));
TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, MESSAGE_COUNT);
Map<String, String> redisOffsets = jedis.hgetAll(OFFSETS_HASH_NAME);
assertThat(redisOffsets.size() > 0).isTrue();
}
/**
* Test retry mechanism when encountering Redis connectivity issues:
* 1. Make Redis to be unavailable while the server is up
* 2. Create a new table named redis_test in PostgreSQL and insert 5 records to it
* 3. Bring Redis up again and make sure the offsets have been written successfully
*/
@Test
@FixFor("DBZ-4509")
public void testRedisConnectionRetry() throws Exception {
Testing.Print.enable();
Jedis jedis = new Jedis(HostAndPort.from(RedisTestResourceLifecycleManager.getRedisContainerAddress()));
// wait until the offsets are written for the first time
TestUtils.awaitHashSizeGte(jedis, OFFSETS_HASH_NAME, 1);
// clear the offsets key
jedis.del(OFFSETS_HASH_NAME);
// pause container
Testing.print("Pausing container");
RedisTestResourceLifecycleManager.pause();
final PostgresConnection connection = TestUtils.getPostgresConnection();
Testing.print("Creating new redis_test table and inserting 5 records to it");
connection.execute(
"CREATE TABLE inventory.redis_test (id INT PRIMARY KEY)",
"INSERT INTO inventory.redis_test VALUES (1)",
"INSERT INTO inventory.redis_test VALUES (2)",
"INSERT INTO inventory.redis_test VALUES (3)",
"INSERT INTO inventory.redis_test VALUES (4)",
"INSERT INTO inventory.redis_test VALUES (5)");
connection.close();
Testing.print("Sleeping for 2 seconds to flush records");
Thread.sleep(2000);
Testing.print("Unpausing container");
RedisTestResourceLifecycleManager.unpause();
Testing.print("Sleeping for 2 seconds to reconnect to redis and write offset");
// wait until the offsets are re-written
TestUtils.awaitHashSizeGte(jedis, OFFSETS_HASH_NAME, 1);
Map<String, String> redisOffsets = jedis.hgetAll(OFFSETS_HASH_NAME);
jedis.close();
assertThat(redisOffsets.size() > 0).isTrue();
}
}

View File

@ -1,31 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.quarkus.test.junit.QuarkusTestProfile;
public class RedisOffsetTestProfile implements QuarkusTestProfile {
@Override
public List<TestResourceEntry> testResources() {
return Arrays.asList(new TestResourceEntry(PostgresTestResourceLifecycleManager.class));
}
@Override
public Map<String, String> getConfigOverrides() {
Map<String, String> config = new HashMap<String, String>();
config.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
config.put("debezium.source.offset.storage", "io.debezium.server.redis.RedisOffsetBackingStore");
return config;
}
}

View File

@ -1,53 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import static org.junit.Assert.assertTrue;
import java.util.Map;
import org.junit.jupiter.api.Test;
import io.quarkus.test.junit.QuarkusIntegrationTest;
import io.quarkus.test.junit.TestProfile;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Jedis;
/**
* Integration tests for secured Redis
*
* @author Oren Elias
*/
@QuarkusIntegrationTest
@TestProfile(RedisSSLStreamTestProfile.class)
public class RedisSSLStreamIT {
/**
* Verifies that all the records of a PostgreSQL table are streamed to Redis
*/
@Test
public void testRedisStream() throws Exception {
HostAndPort address = HostAndPort.from(RedisSSLTestResourceLifecycleManager.getRedisContainerAddress());
Jedis jedis = new Jedis(address.getHost(), address.getPort(), true);
final int MESSAGE_COUNT = 4;
final String STREAM_NAME = "testc.inventory.customers";
final String HASH_NAME = "metadata:debezium:offsets";
TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, MESSAGE_COUNT);
Long streamLength = jedis.xlen(STREAM_NAME);
assertTrue("Redis Basic Stream Test Failed", streamLength == MESSAGE_COUNT);
// wait until the offsets are re-written
TestUtils.awaitHashSizeGte(jedis, HASH_NAME, 1);
Map<String, String> redisOffsets = jedis.hgetAll(HASH_NAME);
assertTrue(redisOffsets.size() > 0);
jedis.close();
}
}

View File

@ -1,41 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.net.URL;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.quarkus.test.junit.QuarkusTestProfile;
public class RedisSSLStreamTestProfile implements QuarkusTestProfile {
@Override
public List<TestResourceEntry> testResources() {
return Arrays.asList(
new TestResourceEntry(PostgresTestResourceLifecycleManager.class),
new TestResourceEntry(RedisSSLTestResourceLifecycleManager.class));
}
public Map<String, String> getConfigOverrides() {
Map<String, String> config = new HashMap<String, String>();
URL keyStoreFile = RedisSSLStreamTestProfile.class.getClassLoader().getResource("ssl/client-keystore.p12");
URL trustStoreFile = RedisSSLStreamTestProfile.class.getClassLoader().getResource("ssl/client-truststore.p12");
config.put("javax.net.ssl.keyStore", keyStoreFile.getPath());
config.put("javax.net.ssl.trustStore", trustStoreFile.getPath());
config.put("javax.net.ssl.keyStorePassword", "secret");
config.put("javax.net.ssl.trustStorePassword", "secret");
config.put("debezium.source.offset.storage", "io.debezium.server.redis.RedisOffsetBackingStore");
config.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
return config;
}
}

View File

@ -1,83 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.testcontainers.containers.BindMode;
import org.testcontainers.containers.GenericContainer;
import io.debezium.server.TestConfigSource;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
public class RedisSSLTestResourceLifecycleManager implements QuarkusTestResourceLifecycleManager {
public static final int REDIS_PORT = 6379;
public static final String REDIS_IMAGE = "redis";
private static final AtomicBoolean running = new AtomicBoolean(false);
private static final GenericContainer<?> container = new GenericContainer<>(REDIS_IMAGE)
.withClasspathResourceMapping("ssl", "/etc/certificates", BindMode.READ_ONLY)
.withCommand(
"redis-server --tls-port 6379 --port 0 --tls-cert-file /etc/certificates/redis.crt --tls-key-file /etc/certificates/redis.key --tls-ca-cert-file /etc/certificates/ca.crt")
.withExposedPorts(REDIS_PORT);
private static synchronized void start(boolean ignored) {
if (!running.get()) {
container.start();
running.set(true);
}
}
@Override
public Map<String, String> start() {
start(true);
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(TestConfigSource.OFFSET_STORE_PATH);
Map<String, String> params = new ConcurrentHashMap<>();
params.put("debezium.sink.type", "redis");
params.put("debezium.source.offset.storage.redis.address", RedisSSLTestResourceLifecycleManager.getRedisContainerAddress());
params.put("debezium.source.offset.storage.redis.ssl.enabled", "true");
params.put("debezium.sink.redis.address", RedisSSLTestResourceLifecycleManager.getRedisContainerAddress());
params.put("debezium.sink.redis.ssl.enabled", "true");
params.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
params.put("debezium.source.offset.flush.interval.ms", "0");
params.put("debezium.source.topic.prefix", "testc");
params.put("debezium.source.schema.include.list", "inventory");
params.put("debezium.source.table.include.list", "inventory.customers,inventory.redis_test,inventory.redis_test2");
return params;
}
@Override
public void stop() {
try {
container.stop();
}
catch (Exception e) {
// ignored
}
running.set(false);
}
public static void pause() {
container.getDockerClient().pauseContainerCmd(container.getContainerId()).exec();
}
public static void unpause() {
container.getDockerClient().unpauseContainerCmd(container.getContainerId()).exec();
}
public static String getRedisContainerAddress() {
start(true);
return String.format("%s:%d", container.getContainerIpAddress(), container.getFirstMappedPort());
}
}

View File

@ -1,131 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.List;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import io.debezium.config.Configuration;
import io.debezium.connector.mysql.MySqlConnection;
import io.debezium.connector.mysql.MySqlConnection.MySqlConnectionConfiguration;
import io.debezium.doc.FixFor;
import io.debezium.relational.history.AbstractSchemaHistoryTest;
import io.debezium.relational.history.SchemaHistory;
import io.debezium.relational.history.SchemaHistoryMetrics;
import io.debezium.testing.testcontainers.MySqlTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusIntegrationTest;
import io.quarkus.test.junit.TestProfile;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.StreamEntryID;
import redis.clients.jedis.resps.StreamEntry;
/**
* Integration test that verifies reading and writing database schema history from Redis key value store
*
* @author Oren Elias
*/
@QuarkusIntegrationTest
@TestProfile(RedisSchemaHistoryTestProfile.class)
@QuarkusTestResource(RedisTestResourceLifecycleManager.class)
public class RedisSchemaHistoryIT extends AbstractSchemaHistoryTest {
private static final String STREAM_NAME = "metadata:debezium:schema_history";
private static final int INIT_HISTORY_SIZE = 16; // Initial number of entries in the schema history stream.
protected static Jedis jedis;
@Override
@BeforeEach
public void beforeEach() {
super.beforeEach();
}
@Override
protected SchemaHistory createHistory() {
SchemaHistory history = new RedisSchemaHistory();
history.configure(Configuration.create()
.with("schema.history.internal.redis.address", HostAndPort.from(RedisTestResourceLifecycleManager.getRedisContainerAddress()))
.build(), null, SchemaHistoryMetrics.NOOP, true);
history.start();
return history;
}
@Test
@FixFor("DBZ-4771")
public void testSchemaHistoryIsSaved() {
jedis = new Jedis(HostAndPort.from(RedisTestResourceLifecycleManager.getRedisContainerAddress()));
TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, INIT_HISTORY_SIZE + 1);
final List<StreamEntry> entries = jedis.xrange(STREAM_NAME, (StreamEntryID) null, (StreamEntryID) null);
assertEquals(INIT_HISTORY_SIZE + 1, entries.size());
assertTrue(entries.stream().anyMatch(item -> item.getFields().get("schema").contains("CREATE TABLE `customers`")));
}
@Test
@FixFor("DBZ-4771")
public void shouldRecordChangesAndRecoverToVariousPoints() {
super.shouldRecordChangesAndRecoverToVariousPoints();
}
/**
* Test retry mechanism when encountering Redis connectivity issues:
* 1. Make Redis unavailable while the server is up
* 2. Create a new table named redis_test in MySQL
* 3. Bring Redis up again and make sure the database schema has been written successfully
*/
@Test
@FixFor("DBZ-4509")
public void testRedisConnectionRetry() throws Exception {
Testing.Print.enable();
Jedis jedis = new Jedis(HostAndPort.from(RedisTestResourceLifecycleManager.getRedisContainerAddress()));
// wait until the db schema history is written for the first time
TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, 1);
// pause container
Testing.print("Pausing container");
RedisTestResourceLifecycleManager.pause();
final MySqlConnection connection = getMySqlConnection();
connection.connect();
Testing.print("Creating new redis_test table and inserting 5 records to it");
connection.execute("CREATE TABLE inventory.redis_test (id INT PRIMARY KEY)");
connection.close();
Testing.print("Sleeping for 2 seconds to flush records");
Thread.sleep(2000);
Testing.print("Unpausing container");
RedisTestResourceLifecycleManager.unpause();
// wait until the db schema history is written for the first time
TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, INIT_HISTORY_SIZE + 1);
final List<StreamEntry> entries = jedis.xrange(STREAM_NAME, (StreamEntryID) null, (StreamEntryID) null);
assertEquals(INIT_HISTORY_SIZE + 1, entries.size());
assertTrue(entries.get(INIT_HISTORY_SIZE).getFields().get("schema").contains("redis_test"));
}
private MySqlConnection getMySqlConnection() {
return new MySqlConnection(new MySqlConnectionConfiguration(Configuration.create()
.with("database.user", MySqlTestResourceLifecycleManager.PRIVILEGED_USER)
.with("database.password", MySqlTestResourceLifecycleManager.PRIVILEGED_PASSWORD)
.with("database.dbname", MySqlTestResourceLifecycleManager.DBNAME)
.with("database.hostname", MySqlTestResourceLifecycleManager.HOST)
.with("database.port", MySqlTestResourceLifecycleManager.getContainer().getMappedPort(MySqlTestResourceLifecycleManager.PORT))
.build()));
}
}

View File

@ -1,36 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import io.debezium.testing.testcontainers.MySqlTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.junit.QuarkusTestProfile;
public class RedisSchemaHistoryTestProfile implements QuarkusTestProfile {
public static final String OFFSETS_FILE = "file-connector-offsets.txt";
public static final Path OFFSET_STORE_PATH = Testing.Files.createTestingPath(OFFSETS_FILE).toAbsolutePath();
public static final String OFFSET_STORAGE_FILE_FILENAME_CONFIG = "offset.storage.file.filename";
@Override
public List<TestResourceEntry> testResources() {
return Arrays.asList(new TestResourceEntry(MySqlTestResourceLifecycleManager.class));
}
public Map<String, String> getConfigOverrides() {
Map<String, String> config = new HashMap<String, String>();
config.put("debezium.source." + OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH.toAbsolutePath().toString());
config.put("debezium.source.schema.history.internal", "io.debezium.server.redis.RedisSchemaHistory");
config.put("debezium.source.database.server.id", "12345");
return config;
}
}

View File

@ -1,144 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import static org.junit.Assert.assertTrue;
import java.util.List;
import java.util.Map;
import org.junit.jupiter.api.Test;
import io.debezium.connector.postgresql.connection.PostgresConnection;
import io.debezium.doc.FixFor;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusIntegrationTest;
import io.quarkus.test.junit.TestProfile;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.StreamEntryID;
import redis.clients.jedis.resps.StreamEntry;
/**
* Integration tests that verify basic reading from PostgreSQL database and writing to Redis stream
* and retry mechanism in case of connectivity issues or OOM in Redis
*
* @author M Sazzadul Hoque
* @author Yossi Shirizli
*/
@QuarkusIntegrationTest
@TestProfile(RedisStreamTestProfile.class)
@QuarkusTestResource(RedisTestResourceLifecycleManager.class)
public class RedisStreamIT {
/**
* Verifies that all the records of a PostgreSQL table are streamed to Redis
*/
@Test
public void testRedisStream() throws Exception {
Jedis jedis = new Jedis(HostAndPort.from(RedisTestResourceLifecycleManager.getRedisContainerAddress()));
final int MESSAGE_COUNT = 4;
final String STREAM_NAME = "testc.inventory.customers";
TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, MESSAGE_COUNT);
Long streamLength = jedis.xlen(STREAM_NAME);
assertTrue("Expected stream length of " + MESSAGE_COUNT, streamLength == MESSAGE_COUNT);
final List<StreamEntry> entries = jedis.xrange(STREAM_NAME, (StreamEntryID) null, (StreamEntryID) null);
for (StreamEntry entry : entries) {
Map<String, String> map = entry.getFields();
assertTrue("Expected map of size 1", map.size() == 1);
Map.Entry<String, String> mapEntry = map.entrySet().iterator().next();
assertTrue("Expected json like key starting with {\"schema\":...", mapEntry.getKey().startsWith("{\"schema\":"));
assertTrue("Expected json like value starting with {\"schema\":...", mapEntry.getValue().startsWith("{\"schema\":"));
}
jedis.close();
}
/**
* Test retry mechanism when encountering Redis connectivity issues:
* 1. Make Redis to be unavailable while the server is up
* 2. Create a new table named redis_test in PostgreSQL and insert 5 records to it
* 3. Bring Redis up again and make sure these records have been streamed successfully
*/
@Test
@FixFor("DBZ-4510")
public void testRedisConnectionRetry() throws Exception {
Testing.Print.enable();
final int MESSAGE_COUNT = 5;
final String STREAM_NAME = "testc.inventory.redis_test";
Jedis jedis = new Jedis(HostAndPort.from(RedisTestResourceLifecycleManager.getRedisContainerAddress()));
Testing.print("Pausing container");
RedisTestResourceLifecycleManager.pause();
final PostgresConnection connection = TestUtils.getPostgresConnection();
Testing.print("Creating new redis_test table and inserting 5 records to it");
connection.execute(
"CREATE TABLE inventory.redis_test (id INT PRIMARY KEY)",
"INSERT INTO inventory.redis_test VALUES (1)",
"INSERT INTO inventory.redis_test VALUES (2)",
"INSERT INTO inventory.redis_test VALUES (3)",
"INSERT INTO inventory.redis_test VALUES (4)",
"INSERT INTO inventory.redis_test VALUES (5)");
connection.close();
Testing.print("Sleeping for 3 seconds to simulate no connection errors");
Thread.sleep(3000);
Testing.print("Unpausing container");
RedisTestResourceLifecycleManager.unpause();
Thread.sleep(2000);
Long streamLength = jedis.xlen(STREAM_NAME);
Testing.print("Entries in " + STREAM_NAME + ":" + streamLength);
jedis.close();
assertTrue("Redis Connection Test Failed", streamLength == MESSAGE_COUNT);
}
/**
* Test retry mechanism when encountering Redis Out of Memory:
* 1. Simulate a Redis OOM by setting its max memory to 1M
* 2. Create a new table named redis_test2 in PostgreSQL and insert 50 records to it
* 3. Sleep for 1 second to simulate Redis OOM (stream does not contain 50 records)
* 4. Unlimit memory and verify that all 50 records have been streamed
*/
@Test
@FixFor("DBZ-4510")
public void testRedisOOMRetry() throws Exception {
Testing.Print.enable();
Jedis jedis = new Jedis(HostAndPort.from(RedisTestResourceLifecycleManager.getRedisContainerAddress()));
final String STREAM_NAME = "testc.inventory.redis_test2";
final int TOTAL_RECORDS = 50;
Testing.print("Setting Redis' maxmemory to 1M");
jedis.configSet("maxmemory", "1M");
PostgresConnection connection = TestUtils.getPostgresConnection();
connection.execute("CREATE TABLE inventory.redis_test2 " +
"(id VARCHAR(100) PRIMARY KEY, " +
"first_name VARCHAR(100), " +
"last_name VARCHAR(100))");
connection.execute(String.format("INSERT INTO inventory.redis_test2 (id,first_name,last_name) " +
"SELECT LEFT(i::text, 10), RANDOM()::text, RANDOM()::text FROM generate_series(1,%d) s(i)", TOTAL_RECORDS));
connection.commit();
Thread.sleep(1000);
Testing.print("Entries in " + STREAM_NAME + ":" + jedis.xlen(STREAM_NAME));
assertTrue(jedis.xlen(STREAM_NAME) < TOTAL_RECORDS);
Thread.sleep(1000);
jedis.configSet("maxmemory", "0");
TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, TOTAL_RECORDS);
long streamLength = jedis.xlen(STREAM_NAME);
assertTrue("Redis OOM Test Failed", streamLength == TOTAL_RECORDS);
}
}

View File

@ -1,58 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import static org.junit.Assert.assertTrue;
import org.junit.jupiter.api.Test;
import io.debezium.connector.postgresql.connection.PostgresConnection;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusIntegrationTest;
import io.quarkus.test.junit.TestProfile;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Jedis;
@QuarkusIntegrationTest
@TestProfile(RedisStreamMemoryThresholdTestProfile.class)
@QuarkusTestResource(RedisTestResourceLifecycleManager.class)
public class RedisStreamMemoryThresholdIT {
@Test
public void testRedisMemoryThreshold() throws Exception {
Testing.Print.enable();
Jedis jedis = new Jedis(HostAndPort.from(RedisTestResourceLifecycleManager.getRedisContainerAddress()));
final String STREAM_NAME = "testc.inventory.redis_test2";
final int TOTAL_RECORDS = 50;
Testing.print("Setting Redis' maxmemory to 1M");
jedis.configSet("maxmemory", "1M");
PostgresConnection connection = TestUtils.getPostgresConnection();
connection.execute("CREATE TABLE inventory.redis_test2 " +
"(id VARCHAR(100) PRIMARY KEY, " +
"first_name VARCHAR(100), " +
"last_name VARCHAR(100))");
connection.execute(String.format("INSERT INTO inventory.redis_test2 (id,first_name,last_name) " +
"SELECT LEFT(i::text, 10), RANDOM()::text, RANDOM()::text FROM generate_series(1,%d) s(i)", TOTAL_RECORDS));
connection.commit();
Thread.sleep(1000);
Testing.print("Entries in " + STREAM_NAME + ":" + jedis.xlen(STREAM_NAME));
assertTrue(jedis.xlen(STREAM_NAME) < TOTAL_RECORDS);
Thread.sleep(1000);
jedis.configSet("maxmemory", "0");
TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, TOTAL_RECORDS);
long streamLength = jedis.xlen(STREAM_NAME);
assertTrue("Redis Memory Threshold Test Failed", streamLength == TOTAL_RECORDS);
}
}

View File

@ -1,19 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.util.Map;
public class RedisStreamMemoryThresholdTestProfile extends RedisStreamTestProfile {
@Override
public Map<String, String> getConfigOverrides() {
Map<String, String> config = super.getConfigOverrides();
config.put("debezium.sink.redis.memory.threshold.percentage", "75");
return config;
}
}

View File

@ -1,62 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import static org.junit.Assert.assertTrue;
import java.util.List;
import java.util.Map;
import org.junit.jupiter.api.Test;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusIntegrationTest;
import io.quarkus.test.junit.TestProfile;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.StreamEntryID;
import redis.clients.jedis.resps.StreamEntry;
/**
* Integration tests that verify basic reading from PostgreSQL database and writing to Redis stream
*
* @author ggaborg
*/
@QuarkusIntegrationTest
@TestProfile(RedisStreamMessageTestProfile.class)
@QuarkusTestResource(RedisTestResourceLifecycleManager.class)
public class RedisStreamMessageIT {
/**
* Verifies that all the records of a PostgreSQL table are streamed to Redis in extended message format
*/
@Test
public void testRedisStreamExtendedMessage() throws Exception {
Testing.Print.enable();
Jedis jedis = new Jedis(HostAndPort.from(RedisTestResourceLifecycleManager.getRedisContainerAddress()));
final int MESSAGE_COUNT = 4;
final String STREAM_NAME = "testc.inventory.customers";
TestUtils.awaitStreamLengthGte(jedis, STREAM_NAME, MESSAGE_COUNT);
Long streamLength = jedis.xlen(STREAM_NAME);
assertTrue("Expected stream length of " + MESSAGE_COUNT, streamLength == MESSAGE_COUNT);
final List<StreamEntry> entries = jedis.xrange(STREAM_NAME, (StreamEntryID) null, (StreamEntryID) null);
for (StreamEntry entry : entries) {
Map<String, String> map = entry.getFields();
assertTrue("Expected map of size 2", map.size() == 2);
assertTrue("Expected key's value starting with {\"schema\":...", map.get("key") != null && map.get("key").startsWith("{\"schema\":"));
assertTrue("Expected values's value starting with {\"schema\":...", map.get("value") != null && map.get("value").startsWith("{\"schema\":"));
}
jedis.close();
}
}

View File

@ -1,19 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.util.Map;
public class RedisStreamMessageTestProfile extends RedisStreamTestProfile {
@Override
public Map<String, String> getConfigOverrides() {
Map<String, String> config = super.getConfigOverrides();
config.put("debezium.sink.redis.message.format", "extended");
return config;
}
}

View File

@ -1,37 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.junit.QuarkusTestProfile;
public class RedisStreamTestProfile implements QuarkusTestProfile {
public static final String OFFSETS_FILE = "file-connector-offsets.txt";
public static final Path OFFSET_STORE_PATH = Testing.Files.createTestingPath(OFFSETS_FILE).toAbsolutePath();
public static final String OFFSET_STORAGE_FILE_FILENAME_CONFIG = "offset.storage.file.filename";
@Override
public List<TestResourceEntry> testResources() {
return Arrays.asList(new TestResourceEntry(PostgresTestResourceLifecycleManager.class));
}
public Map<String, String> getConfigOverrides() {
Map<String, String> config = new HashMap<String, String>();
config.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
config.put("debezium.source." + OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH.toAbsolutePath().toString());
config.put("debezium.sink.redis.memory.threshold.percentage", "0");
return config;
}
}

View File

@ -1,76 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.testcontainers.containers.GenericContainer;
import io.debezium.server.TestConfigSource;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
public class RedisTestResourceLifecycleManager implements QuarkusTestResourceLifecycleManager {
public static final int REDIS_PORT = 6379;
public static final String REDIS_IMAGE = "redis";
private static final AtomicBoolean running = new AtomicBoolean(false);
private static final GenericContainer<?> container = new GenericContainer<>(REDIS_IMAGE)
.withExposedPorts(REDIS_PORT);
private static synchronized void start(boolean ignored) {
if (!running.get()) {
container.start();
running.set(true);
}
}
@Override
public Map<String, String> start() {
start(true);
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(TestConfigSource.OFFSET_STORE_PATH);
Map<String, String> params = new ConcurrentHashMap<>();
params.put("debezium.sink.type", "redis");
params.put("debezium.sink.redis.address", RedisTestResourceLifecycleManager.getRedisContainerAddress());
params.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
params.put("debezium.source.offset.flush.interval.ms", "0");
params.put("debezium.source.topic.prefix", "testc");
params.put("debezium.source.schema.include.list", "inventory");
params.put("debezium.source.table.include.list", "inventory.customers,inventory.redis_test,inventory.redis_test2");
return params;
}
@Override
public void stop() {
try {
container.stop();
}
catch (Exception e) {
// ignored
}
running.set(false);
}
public static void pause() {
container.getDockerClient().pauseContainerCmd(container.getContainerId()).exec();
}
public static void unpause() {
container.getDockerClient().unpauseContainerCmd(container.getContainerId()).exec();
}
public static String getRedisContainerAddress() {
start(true);
return String.format("%s:%d", container.getContainerIpAddress(), container.getFirstMappedPort());
}
}

View File

@ -1,49 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.redis;
import java.time.Duration;
import java.util.function.Supplier;
import org.awaitility.Awaitility;
import io.debezium.connector.postgresql.connection.PostgresConnection;
import io.debezium.jdbc.JdbcConfiguration;
import io.debezium.server.TestConfigSource;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import redis.clients.jedis.Jedis;
public class TestUtils {
private TestUtils() {
}
public static PostgresConnection getPostgresConnection() {
return new PostgresConnection(JdbcConfiguration.create()
.with("user", PostgresTestResourceLifecycleManager.POSTGRES_USER)
.with("password", PostgresTestResourceLifecycleManager.POSTGRES_PASSWORD)
.with("dbname", PostgresTestResourceLifecycleManager.POSTGRES_DBNAME)
.with("hostname", PostgresTestResourceLifecycleManager.POSTGRES_HOST)
.with("port", PostgresTestResourceLifecycleManager.getContainer().getMappedPort(PostgresTestResourceLifecycleManager.POSTGRES_PORT))
.build(), "Debezium Redis Test");
}
public static void awaitStreamLengthGte(Jedis jedis, String streamName, int expectedLength) {
waitBoolean(() -> jedis.xlen(streamName) >= expectedLength);
}
public static void awaitHashSizeGte(Jedis jedis, String hashName, int expectedSize) {
waitBoolean(() -> jedis.hgetAll(hashName).size() >= expectedSize);
}
private static void waitBoolean(Supplier<Boolean> bool) {
Awaitility.await().atMost(Duration.ofSeconds(TestConfigSource.waitForSeconds())).until(() -> {
return Boolean.TRUE.equals(bool.get());
});
}
}

View File

@ -1,31 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFSzCCAzOgAwIBAgIUFb8HMaTVpznGl5NoocWmcPPNfNgwDQYJKoZIhvcNAQEL
BQAwNTETMBEGA1UECgwKUmVkaXMgVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUg
QXV0aG9yaXR5MB4XDTIyMDMyNzEyNDgzOFoXDTMyMDMyNDEyNDgzOFowNTETMBEG
A1UECgwKUmVkaXMgVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9yaXR5
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA6nGxRMQncEmECA/AVepg
WHHCG7/a2NGcNlHwnLk1xpB+o2nm4wwRRQ5+IrybqOOx41gJ1Jg3Ya6elljQdjSn
WMMLm9khIEXJ8CcfYkV+W4yEbOfYi74ufmAkgFrjxMzT5Fn4pBYvYvPIPY+b55KC
JrIsMu1WVkuaRrRylSeIdpePBP9376yfvkw4wjEqI/p03tI5QnzJrTibz2vLGxHC
w6GhpZldcVMuRnG+Ml8l6wQjV1t8DhSL4D+UHYRTeXGNgpppuiqB9J1gRvxuTtFA
u5WChAxoDvHoGzL5LJXFrxyAP0s9PZSP5sTLF9C4LTm6Y0/Ixnc/xCG4mjm+mMm5
H4a02Zu7v8/C7bYe//k79OqHfCy0C6n90iHhND/Y6ab6MJ3R4Jw+bUlpKs1t+GAI
+zsb43+4ql4cxL2fIsfIwj/c5p4hsxiGMDFJkndYDWlByTwKlF+Y9eF0V80GOVEE
1XMP0alQJNiaZ74md3fDHOixLxqo/GBxu3879yf6Bv5y+TXZyBfWP/el0gjb4Fn8
H9d8lVRc2gizEBmh4AN9jUH5BXmpMpCrGEOhwlSpJCWhu2ZTHVb0EjeAj6qDI5Fi
kgIgB4vgIX1xeVWoaZDtOpdsySbnkosfgnpKyaX0jn2YI3qh+PrXV3XRsK1OVJoj
a5opl4VGlohwILiOljFWEEUCAwEAAaNTMFEwHQYDVR0OBBYEFJwAa2lIFh8cVgHC
UEwc2G8NdSbRMB8GA1UdIwQYMBaAFJwAa2lIFh8cVgHCUEwc2G8NdSbRMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAD358vpFM1D+DCizIjHSAvnp
2Qvd21oGZ8mBJIJjyUApgqC0GhCgLokjjKG0gjBdRXMumsB5kYUx8wOtIfMjHNJM
TFk95qgLT5otT7F+Ek8IoaJ/cqA2yh8VOBiJ+1YjEQmX6vhfbF938j53iFkrHfW7
GpiHuiSDiLfZ+sRJnH8UjmcLhPOvKn+eEGp+igb6A3a07MZ3cTVcFXCv6qpMu5gj
I8REVhrtDx1Yya154Kue2zQ+FewkbL1JmGQ8nyHiysjgm29tG7UW92XFZeM/BRZv
eySt2yV8ZBjztGlhP2t63nY58mpHVFD8qFKN3M2N0tybfeFie28ebvfE1uYkb+LD
1sDybrUcPM35WrJvLsv++1KYLqdFqkalzTZZDzartdX5uo4IFvVT3l2luPKZixRi
55MYOVTTb7q5JjkmQaDh2gUcpZZBwF+cjh9AvM/C6X4YIogSGenvp45lnYZtJkLp
W8Vnlm/0LtFmJ8Sm2uKvlfXinphY0t3zDaxwdQKcBKRYjaJkKIUeFfh9ym+zLDoP
fjrdo1XyXHBGDZVTs/5wmDtn86SnYTTZ5BooBuFYPT1UQYOCCArEYnNxW8ENhjyn
tJg8Wzyb2rc2wakJzT/+r7D1+BPErw1J50yVZH04HSfc+beLojNEEsk4+6MQ+rlS
ArUFRwDzMk+Wka2Ne9hW
-----END CERTIFICATE-----

View File

@ -1,23 +0,0 @@
-----BEGIN CERTIFICATE-----
MIID6DCCAdACFEafrLCQlbLnetOxgDXMJTL0/eu9MA0GCSqGSIb3DQEBCwUAMDUx
EzARBgNVBAoMClJlZGlzIFRlc3QxHjAcBgNVBAMMFUNlcnRpZmljYXRlIEF1dGhv
cml0eTAeFw0yMjAzMjcxMjQ4MzhaFw0yMzAzMjcxMjQ4MzhaMCwxEzARBgNVBAoM
ClJlZGlzIFRlc3QxFTATBgNVBAMMDEdlbmVyaWMtY2VydDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBALVF2/lw3FM1p0Av1VP7tyL/Lfzid7FL44SBhaQ3
JL0f9/nGsxcPAINdeeJWUbNrrzQySzEwcqdmwaMx8ccguwAlcAOEJ6Verx3K7Xqd
EZQpyDlRqMdNDz8VkuK7+8jI8GiWU8CjVNmxQE12P2og2cN/y3+xNb6N/5R/dAF2
wUZGbQ5b74HALksSkXLEqoMOeylu3rVDPQw+aQe+fMtJXd2Xlkx7d9nNhrAyxeZm
U+Ye/0WK1k7yC936dvY9Zl8ipljSuHPDdA7t4gayzygYV98nP4zG4yynBJEHQJka
qmHukyopmtfpG6OrCBExTOIp5uVUR3hpf8fenA4V7KlYX5ECAwEAATANBgkqhkiG
9w0BAQsFAAOCAgEAvuBdQAGq95dGzYlBNcaV8SlniElVuhdZx1vhGwTGgQyz7h+x
cl5nUUD4NYYmIIkkPln2dTrB5xXpWYsOpBi8CeT44LYzNscpkMHjb2CeVHPofb6n
oh+M4vMc0qUvVsuft0BhLKw39CW4/c3ncZziL6fjSfXDVY16MNzES6lxhkf0gmVX
VXjHMB5n169/hXJ6ql1Ns12aghhBmqRC5Pl6zZfIYlHszhqlfAWdVy/sYm45stpc
vmEJP/k9AmRTszkw++bbVQrEnfDf4f2juMcNIQy1VKrrQhpZUrUbd4Bh5/JYS/ea
wm0s7af+zhF3U0IYVTIpoogg25LaVAMQtOVN7n4maALdev+h8M92/4K9D6Iqf7pF
WKZiXKm30+WtcTI5Yk2XfVHhHLfhNIo4p+RkPr7YxWQWIo9mH5RTSc2DiUDgunsV
ktaD+ourawgW2s919JT3nNSyXf8mzSj70JlISVcB4ZUrEIERqX+mBI+VVrzRO7v7
qhNo6BpN6UKMZNHRXog0sC1JmcE4JK0E45Sc93jcor7lzXWwq+oqXiwUi03VVKu9
clT1AR/l9BsXE5HfGhlUaacb5GeUCZWqc2gUFBBzBotZouvCWSCr3BC/1OgXVdiF
/10RmMTP/8+JPuMRRZf1AMGg6XlFjwMgtIxNok8VAM7O0vNRMxDbATFk/fo=
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEoAIBAAKCAQEAtUXb+XDcUzWnQC/VU/u3Iv8t/OJ3sUvjhIGFpDckvR/3+caz
Fw8Ag1154lZRs2uvNDJLMTByp2bBozHxxyC7ACVwA4QnpV6vHcrtep0RlCnIOVGo
x00PPxWS4rv7yMjwaJZTwKNU2bFATXY/aiDZw3/Lf7E1vo3/lH90AXbBRkZtDlvv
gcAuSxKRcsSqgw57KW7etUM9DD5pB758y0ld3ZeWTHt32c2GsDLF5mZT5h7/RYrW
TvIL3fp29j1mXyKmWNK4c8N0Du3iBrLPKBhX3yc/jMbjLKcEkQdAmRqqYe6TKima
1+kbo6sIETFM4inm5VRHeGl/x96cDhXsqVhfkQIDAQABAoIBACbops3ExtXpd0vU
uJJ9PhL5+PIF3jElG+HON1RHLdCWcxCe8MPrSW3o+4d0hbbgT0wB25fkUUfTS47u
4nmeceUXJBLtI+FgEHBxpUWjPymxSjPbbkHRLYuZ4txeAy5Wyxt34ORBaq4LWMUH
s5bhFDzdSl3POLz9F7XVIZ5K0jN1/3AHICI6C38ibMa0vmWcegTU9IzhnYFX54HT
YpvPAIRE7k2v0VQdhdl56CBnJRyyg3v/hXbrnllWwTq84pkGOPUx74EW2veLB9qF
qTR1L4j88+KEA7i9Wi4jda7u6cmU7qzkvhq8ozXEzlAgtbj43CEFE35ndssg/u+H
s2fiWsECgYEA7nIjH+lMq6c1HypgngrkezMHCJ9jrprzfV8pBjCrt3lRUgOAJ6vh
rbUfYloggZ6MvFlReulv7OPKEQ/27Jmzes7qGaY8iT2wWObDTEVnab9vj5zHvDQu
oMXKFI488dgy8rgu85qst9OgwaQNIGWpmyYc7NKaMAfgfuW8O5gE/kkCgYEAwp42
qRlZSofN3rnZiYFymdOE8nXG6JAYcLar6Zj3gkMzDoiHcTqzDOIclh3ECaSEvI2S
qtyii8miHGLluccFU8SsiLDpiNjPoH/6ghnOwgHc598cNgskBcCwt+RGL57r1Ihp
EHkdMsvfUyqko/v4zikS6T5L/Jb1ZUTpHb079wkCgYBk6bkj0EC6hNFelzi8qi94
VGgtpgcx5jIWZSefjwmdZKKd6w1ZvMyOD5FwtmP7d3ZoYASz2AeeYekqv27pQ0NK
PjelE02bZ5gyh7cvmE/j2TkFtnxxkkQJlqund2sjjzRAtOJqBpQCWHY4fY1woJXf
K4oiUB8hJLRGAERhlDzIwQJ/WznOZXV8ZXNDDSQQ1GPZVEHSD9UlZ94jlrIJ43XQ
jz7wr99PncPmKiPTQoqxWdPEZJ34Ih9KEjxxwNya0np4BMwsE+bZkUwQByv2QQXX
5Rj6c7YhgzO1rvyj2Y8nVysnYHiQxyf+h9EI3fMfOb7i4cBfHPYWqFAat12V6FrL
wQKBgH1losPTxNKG4FCToIaRfc+fdyCJoftKF2CmzPcgn3p0mpV95aQHJCeoVSRU
npiEGRoe8IAJhPHXcMGcG6gJmEx8P5bIswuXB7oQRD+8Fh8BOvfO3Tw7pGJ66tzR
AgkSMASENjamryHBBPSB4/cU6x9FhPdiRSgad+L8+2Kq20Hg
-----END RSA PRIVATE KEY-----

View File

@ -1,175 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-server</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server-rocketmq</artifactId>
<name>Debezium Server RocketMQ Sink Adapter</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
</dependency>
<dependency>
<groupId>org.apache.rocketmq</groupId>
<artifactId>rocketmq-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.rocketmq</groupId>
<artifactId>rocketmq-tools</artifactId>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-testing-testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jboss.jandex</groupId>
<artifactId>jandex-maven-plugin</artifactId>
<executions>
<execution>
<id>make-index</id>
<goals>
<goal>jandex</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>integration-test</id>
<goals>
<goal>integration-test</goal>
</goals>
</execution>
<execution>
<id>verify</id>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<reuseForks>false</reuseForks>
<skipTests>${skipITs}</skipTests>
<enableAssertions>true</enableAssertions>
<systemProperties>
<test.type>IT</test.type>
</systemProperties>
<runOrder>${runOrder}</runOrder>
</configuration>
</plugin>
</plugins>
<resources>
<!-- Apply the properties set in the POM to the resource files -->
<resource>
<filtering>true</filtering>
<directory>src/main/resources</directory>
<includes>
<include>**/build.properties</include>
</includes>
</resource>
</resources>
</build>
<profiles>
<profile>
<id>quick</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>quick</name>
</property>
</activation>
<properties>
<skipITs>true</skipITs>
<docker.skip>true</docker.skip>
</properties>
</profile>
<profile>
<id>skip-integration-tests</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>skipITs</name>
</property>
</activation>
<properties>
<docker.skip>true</docker.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -1,166 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.rocketmq;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.enterprise.context.Dependent;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import javax.inject.Named;
import org.apache.rocketmq.acl.common.AclClientRPCHook;
import org.apache.rocketmq.acl.common.SessionCredentials;
import org.apache.rocketmq.client.exception.MQClientException;
import org.apache.rocketmq.client.producer.DefaultMQProducer;
import org.apache.rocketmq.client.producer.SendCallback;
import org.apache.rocketmq.client.producer.SendResult;
import org.apache.rocketmq.client.producer.selector.SelectMessageQueueByHash;
import org.apache.rocketmq.common.message.Message;
import org.apache.rocketmq.remoting.RPCHook;
import org.apache.rocketmq.remoting.protocol.LanguageCode;
import org.eclipse.microprofile.config.Config;
import org.eclipse.microprofile.config.ConfigProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.DebeziumException;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.server.BaseChangeConsumer;
import io.debezium.server.CustomConsumerBuilder;
/**
* rocketmq change consumer
*/
@Named("rocketmq")
@Dependent
public class RocketMqChangeConsumer extends BaseChangeConsumer implements DebeziumEngine.ChangeConsumer<ChangeEvent<Object, Object>> {
private static final Logger LOGGER = LoggerFactory.getLogger(RocketMqChangeConsumer.class);
private static final String PROP_PREFIX = "debezium.sink.rocketmq.";
private static final String PROP_PRODUCER_PREFIX = PROP_PREFIX + "producer.";
// acl config
private static final String PROP_PRODUCER_ACL_ENABLE = PROP_PRODUCER_PREFIX + "acl.enabled";
private static final String PROP_PRODUCER_ACCESS_KEY = PROP_PRODUCER_PREFIX + "access.key";
private static final String PROP_PRODUCER_SECRET_KEY = PROP_PRODUCER_PREFIX + "secret.key";
// common config
private static final String PROP_PRODUCER_NAME_SRV_ADDR = PROP_PRODUCER_PREFIX + "name.srv.addr";
private static final String PROP_PRODUCER_GROUP = PROP_PRODUCER_PREFIX + "group";
private static final String PROP_PRODUCER_MAX_MESSAGE_SIZE = PROP_PRODUCER_PREFIX + "max.message.size";
private static final String PROP_PRODUCER_SEND_MSG_TIMEOUT = PROP_PRODUCER_PREFIX + "send.msg.timeout";
@Inject
@CustomConsumerBuilder
Instance<DefaultMQProducer> customRocketMqProducer;
private DefaultMQProducer mqProducer;
@PostConstruct
void connect() {
if (customRocketMqProducer.isResolvable()) {
mqProducer = customRocketMqProducer.get();
startProducer();
LOGGER.info("Obtained custom configured RocketMqProducer '{}'", mqProducer);
return;
}
final Config config = ConfigProvider.getConfig();
// init rocketmq producer
RPCHook rpcHook = null;
Optional<Boolean> aclEnable = config.getOptionalValue(PROP_PRODUCER_ACL_ENABLE, Boolean.class);
if (aclEnable.isPresent() && aclEnable.get()) {
if (config.getOptionalValue(PROP_PRODUCER_ACCESS_KEY, String.class).isEmpty()
|| config.getOptionalValue(PROP_PRODUCER_SECRET_KEY, String.class).isEmpty()) {
throw new DebeziumException("When acl.enabled is true, access key and secret key cannot be empty");
}
rpcHook = new AclClientRPCHook(
new SessionCredentials(
config.getValue(PROP_PRODUCER_ACCESS_KEY, String.class),
config.getValue(PROP_PRODUCER_SECRET_KEY, String.class)));
}
this.mqProducer = new DefaultMQProducer(rpcHook);
this.mqProducer.setNamesrvAddr(config.getValue(PROP_PRODUCER_NAME_SRV_ADDR, String.class));
this.mqProducer.setInstanceName(createUniqInstance(config.getValue(PROP_PRODUCER_NAME_SRV_ADDR, String.class)));
this.mqProducer.setProducerGroup(config.getValue(PROP_PRODUCER_GROUP, String.class));
if (config.getOptionalValue(PROP_PRODUCER_SEND_MSG_TIMEOUT, Integer.class).isPresent()) {
this.mqProducer.setSendMsgTimeout(config.getValue(PROP_PRODUCER_SEND_MSG_TIMEOUT, Integer.class));
}
if (config.getOptionalValue(PROP_PRODUCER_MAX_MESSAGE_SIZE, Integer.class).isPresent()) {
this.mqProducer.setMaxMessageSize(config.getValue(PROP_PRODUCER_MAX_MESSAGE_SIZE, Integer.class));
}
this.mqProducer.setLanguage(LanguageCode.JAVA);
startProducer();
}
private void startProducer() {
try {
this.mqProducer.start();
LOGGER.info("Consumer started...");
}
catch (MQClientException e) {
throw new DebeziumException(e);
}
}
private String createUniqInstance(String prefix) {
return prefix.concat("-").concat(UUID.randomUUID().toString());
}
@PreDestroy
void close() {
// Closed rocketmq producer
LOGGER.info("Consumer destroy...");
if (mqProducer != null) {
mqProducer.shutdown();
}
}
@Override
public void handleBatch(List<ChangeEvent<Object, Object>> records, DebeziumEngine.RecordCommitter<ChangeEvent<Object, Object>> committer)
throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(records.size());
for (ChangeEvent<Object, Object> record : records) {
try {
final String topicName = streamNameMapper.map(record.destination());
String key = getString(record.key());
mqProducer.send(new Message(topicName, null, key, getBytes(record.value())), new SelectMessageQueueByHash(), key, new SendCallback() {
@Override
public void onSuccess(SendResult sendResult) {
LOGGER.debug("Sent message with offset: {}", sendResult.getQueueOffset());
latch.countDown();
}
@Override
public void onException(Throwable throwable) {
LOGGER.error("Failed to send record to {}:", record.destination(), throwable);
throw new DebeziumException(throwable);
}
});
}
catch (Exception e) {
throw new DebeziumException(e);
}
}
// Messages have set default send timeout, so this will not block forever.
latch.await();
for (ChangeEvent<Object, Object> record : records) {
committer.markProcessed(record);
}
committer.markBatchFinished();
}
}

View File

@ -1,72 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.rocketmq;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.utility.DockerImageName;
import com.github.dockerjava.api.command.InspectContainerResponse;
/**
* rocketmq container
*/
public class RocketMqContainer extends GenericContainer<RocketMqContainer> {
private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("apache/rocketmq:4.9.4");
private static final int defaultBrokerPermission = 6;
public static final int NAMESRV_PORT = 9876;
public static final int BROKER_PORT = 10911;
public RocketMqContainer() {
super(DEFAULT_IMAGE_NAME);
withExposedPorts(NAMESRV_PORT, BROKER_PORT, BROKER_PORT - 2);
}
@Override
protected void configure() {
String command = "#!/bin/bash\n";
command += "./mqnamesrv &\n";
command += "./mqbroker -n localhost:" + NAMESRV_PORT;
withCommand("sh", "-c", command);
}
@Override
protected void containerIsStarted(InspectContainerResponse containerInfo) {
List<String> updateBrokerConfigCommands = new ArrayList<>();
updateBrokerConfigCommands.add(updateBrokerConfig("brokerIP1", getHost()));
updateBrokerConfigCommands.add(updateBrokerConfig("listenPort", getMappedPort(BROKER_PORT)));
updateBrokerConfigCommands.add(updateBrokerConfig("brokerPermission", defaultBrokerPermission));
final String command = String.join(" && ", updateBrokerConfigCommands);
ExecResult result = null;
try {
result = execInContainer(
"/bin/sh",
"-c",
command);
}
catch (IOException | InterruptedException e) {
throw new RuntimeException(e);
}
if (result != null && result.getExitCode() != 0) {
throw new IllegalStateException(result.toString());
}
}
private String updateBrokerConfig(final String key, final Object val) {
final String brokerAddr = "localhost:" + BROKER_PORT;
return "./mqadmin updateBrokerConfig -b " + brokerAddr + " -k " + key + " -v " + val;
}
public String getNamesrvAddr() {
return String.format("%s:%s", getHost(), getMappedPort(NAMESRV_PORT));
}
}

View File

@ -1,74 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.rocketmq;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import javax.enterprise.event.Observes;
import org.apache.rocketmq.client.consumer.DefaultLitePullConsumer;
import org.apache.rocketmq.common.consumer.ConsumeFromWhere;
import org.apache.rocketmq.common.message.MessageExt;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Test;
import io.debezium.server.TestConfigSource;
import io.debezium.server.events.ConnectorCompletedEvent;
import io.debezium.testing.testcontainers.PostgresTestResourceLifecycleManager;
import io.debezium.util.Testing;
import io.quarkus.test.common.QuarkusTestResource;
import io.quarkus.test.junit.QuarkusTest;
@QuarkusTest
@QuarkusTestResource(PostgresTestResourceLifecycleManager.class)
@QuarkusTestResource(RocketMqTestResourceLifecycleManager.class)
public class RocketMqIT {
private static final int MESSAGE_COUNT = 4;
private static DefaultLitePullConsumer consumer = null;
{
Testing.Files.delete(TestConfigSource.OFFSET_STORE_PATH);
Testing.Files.createTestingFile(RocketMqTestConfigSource.OFFSET_STORE_PATH);
}
@AfterAll
static void stop() {
if (consumer != null) {
consumer.shutdown();
}
}
void connectorCompleted(@Observes ConnectorCompletedEvent event) throws Exception {
if (!event.isSuccess()) {
throw new RuntimeException(event.getError().get());
}
}
@Test
public void testRocketMQ() throws Exception {
// start consumer
consumer = new DefaultLitePullConsumer("consumer-group");
consumer.setNamesrvAddr(RocketMqTestResourceLifecycleManager.getNameSrvAddr());
consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET);
consumer.subscribe(RocketMqTestConfigSource.TOPIC_NAME, "*");
consumer.start();
// consume record
final List<MessageExt> records = new ArrayList<>();
Awaitility.await().atMost(Duration.ofSeconds(RocketMqTestConfigSource.waitForSeconds())).until(() -> {
records.addAll(this.consumer.poll(5000));
return records.size() >= MESSAGE_COUNT;
});
assertThat(records.size()).isGreaterThanOrEqualTo(MESSAGE_COUNT);
}
}

View File

@ -1,42 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.rocketmq;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import io.debezium.server.TestConfigSource;
public class RocketMqTestConfigSource extends TestConfigSource {
public static final String TOPIC_NAME = "testc-inventory-customers";
public RocketMqTestConfigSource() {
final Map<String, String> rocketmqConfig = new HashMap<>();
rocketmqConfig.put("debezium.sink.type", "rocketmq");
rocketmqConfig.put("debezium.source.connector.class", "io.debezium.connector.postgresql.PostgresConnector");
rocketmqConfig.put("debezium.source." + StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, OFFSET_STORE_PATH.toAbsolutePath().toString());
rocketmqConfig.put("debezium.source.offset.flush.interval.ms", "0");
rocketmqConfig.put("debezium.source.topic.prefix", "testc");
rocketmqConfig.put("debezium.source.schema.include.list", "inventory");
rocketmqConfig.put("debezium.source.table.include.list", "inventory.customers");
rocketmqConfig.put("debezium.transforms", "Reroute");
rocketmqConfig.put("debezium.transforms.Reroute.type", "io.debezium.transforms.ByLogicalTableRouter");
rocketmqConfig.put("debezium.transforms.Reroute.topic.regex", "(.*)");
rocketmqConfig.put("debezium.transforms.Reroute.topic.replacement", TOPIC_NAME);
config = rocketmqConfig;
}
@Override
public int getOrdinal() {
// Configuration property precedence is based on ordinal values and since we override the
// properties in TestConfigSource, we should give this a higher priority.
return super.getOrdinal() + 1;
}
}

View File

@ -1,53 +0,0 @@
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.server.rocketmq;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import io.quarkus.test.common.QuarkusTestResourceLifecycleManager;
/**
* Manages the lifecycle of a RocketMQ cluster test resource.
*/
public class RocketMqTestResourceLifecycleManager implements QuarkusTestResourceLifecycleManager {
public static RocketMqContainer container = new RocketMqContainer();
private static final AtomicBoolean running = new AtomicBoolean(false);
private static synchronized void init() {
if (!running.get()) {
container.start();
running.set(true);
}
}
@Override
public Map<String, String> start() {
init();
Map<String, String> params = new ConcurrentHashMap<>();
params.put("debezium.sink.rocketmq.producer.name.srv.addr", getNameSrvAddr());
params.put("debezium.sink.rocketmq.producer.group", "producer-group");
return params;
}
@Override
public void stop() {
try {
if (container != null) {
container.stop();
}
}
catch (Exception e) {
// ignored
}
}
public static String getNameSrvAddr() {
return container.getNamesrvAddr();
}
}

View File

@ -1,39 +0,0 @@
<configuration>
<appender name="CONSOLE"
class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n</pattern>
</encoder>
</appender>
<root level="warn">
<appender-ref ref="CONSOLE" />
</root>
<!-- Set up the default logging to be INFO level, then override specific
units -->
<logger name="io.debezium" level="info" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.embedded.EmbeddedEngine$EmbeddedConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.debezium.converters.CloudEventsConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="org.apache.kafka.connect.json.JsonConverterConfig"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
<logger
name="io.confluent"
level="warn" additivity="false">
<appender-ref ref="CONSOLE" />
</logger>
</configuration>

View File

@ -1,137 +0,0 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>io.debezium</groupId>
<artifactId>debezium-parent</artifactId>
<version>2.2.0-SNAPSHOT</version>
<relativePath>../debezium-parent/pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>debezium-server</artifactId>
<name>Debezium Server Parent</name>
<packaging>pom</packaging>
<modules>
<module>debezium-server-bom</module>
<module>debezium-server-core</module>
<module>debezium-server-kinesis</module>
<module>debezium-server-pubsub</module>
<module>debezium-server-pulsar</module>
<module>debezium-server-eventhubs</module>
<module>debezium-server-http</module>
<module>debezium-server-redis</module>
<module>debezium-server-dist</module>
<module>debezium-server-kafka</module>
<module>debezium-server-pravega</module>
<module>debezium-server-nats-streaming</module>
<module>debezium-server-nats-jetstream</module>
<module>debezium-server-infinispan</module>
<module>debezium-server-rocketmq</module>
</modules>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-server-bom</artifactId>
<version>${project.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<!-- Quarkus extensions -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-core</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-smallrye-health</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-resteasy</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-resteasy-jackson</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-api</artifactId>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-embedded</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- Testing -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-core</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-avro-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.apicurio</groupId>
<artifactId>apicurio-registry-utils-converter</artifactId>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-connect-protobuf-converter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-postgres</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.debezium</groupId>
<artifactId>debezium-connector-mysql</artifactId>
<scope>test</scope>
</dependency>
<!-- Aligning versions/fixing scopes -->
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<scope>runtime</scope>
</dependency>
</dependencies>
</project>

View File

@ -164,7 +164,6 @@
<module>debezium-microbenchmark-oracle</module>
<module>debezium-quarkus-outbox</module>
<module>debezium-scripting</module>
<module>debezium-server</module>
<module>debezium-testing</module>
<module>debezium-connect-rest-extension</module>
<module>debezium-schema-generator</module>