DBZ-2265 Apicurio Registry deployment infrastracture
This commit is contained in:
parent
802b088868
commit
04401295a0
@ -21,6 +21,7 @@
|
||||
<version.junit5.pioneer>0.5.1</version.junit5.pioneer>
|
||||
<version.assertj>3.11.1</version.assertj>
|
||||
<version.db2.driver>11.5.4.0</version.db2.driver>
|
||||
<version.apicurio.converter>1.3.2.Final</version.apicurio.converter>
|
||||
|
||||
<image.registry>quay.io</image.registry>
|
||||
<image.name>debezium/testing-openshift-connect:kafka-${version.kafka}-${project.version}</image.name>
|
||||
@ -28,13 +29,18 @@
|
||||
|
||||
<!--OCP configuration-->
|
||||
<ocp.project.debezium>debezium</ocp.project.debezium>
|
||||
<ocp.project.registry>debezium-registry</ocp.project.registry>
|
||||
<ocp.project.mysql>debezium-mysql</ocp.project.mysql>
|
||||
<ocp.project.postgresql>debezium-postgresql</ocp.project.postgresql>
|
||||
<ocp.project.mongo>debezium-mongo</ocp.project.mongo>
|
||||
<ocp.project.sqlserver>debezium-sqlserver</ocp.project.sqlserver>
|
||||
<ocp.project.db2>debezium-db2</ocp.project.db2>
|
||||
|
||||
|
||||
<!--Strimzi configuration-->
|
||||
<strimzi.operator.connectors>true</strimzi.operator.connectors>
|
||||
|
||||
|
||||
<!--MySQL configuration-->
|
||||
<database.mysql.port>3306</database.mysql.port>
|
||||
<database.mysql.username>mysqluser</database.mysql.username>
|
||||
@ -90,11 +96,28 @@
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>jitpack.io</id>
|
||||
<url>https://jitpack.io</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.fabric8</groupId>
|
||||
<artifactId>openshift-client</artifactId>
|
||||
<version>${version.fabric8.client}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>com.squareup.okhttp3</groupId>
|
||||
<artifactId>okhttp</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.squareup.okhttp3</groupId>
|
||||
<artifactId>logging-interceptor</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
@ -141,6 +164,13 @@
|
||||
<artifactId>okhttp</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.squareup.okhttp3</groupId>
|
||||
<artifactId>logging-interceptor</artifactId>
|
||||
<version>${version.okhttp}</version>
|
||||
</dependency>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-core</artifactId>
|
||||
@ -223,6 +253,12 @@
|
||||
<artifactId>jcc</artifactId>
|
||||
<version>${version.db2.driver}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.github.jcechace.apicurio-model-generator</groupId>
|
||||
<artifactId>kubernetes-apicurio-registry-model</artifactId>
|
||||
<version>v0.0.4</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<profiles>
|
||||
@ -349,9 +385,12 @@
|
||||
<test.ocp.password>${ocp.password}</test.ocp.password>
|
||||
<test.ocp.pull.secret.paths>${ocp.pull.secret.paths}</test.ocp.pull.secret.paths>
|
||||
<test.ocp.project.debezium>${ocp.project.debezium}</test.ocp.project.debezium>
|
||||
<test.ocp.project.registry>${ocp.project.registry}</test.ocp.project.registry>
|
||||
<test.ocp.project.mysql>${ocp.project.mysql}</test.ocp.project.mysql>
|
||||
<test.ocp.project.postgresql>${ocp.project.postgresql}</test.ocp.project.postgresql>
|
||||
<test.ocp.project.mongo>${ocp.project.mongo}</test.ocp.project.mongo>
|
||||
<test.ocp.project.sqlserver>${ocp.project.sqlserver}</test.ocp.project.sqlserver>
|
||||
<test.ocp.project.db2>${ocp.project.db2}</test.ocp.project.db2>
|
||||
<test.strimzi.operator.connectors>${strimzi.operator.connectors}</test.strimzi.operator.connectors>
|
||||
|
||||
<test.database.mysql.host>${database.mysql.host}</test.database.mysql.host>
|
||||
|
@ -23,6 +23,7 @@ public class ConfigProperties {
|
||||
public static final String OCP_USERNAME = stringProperty("test.ocp.username");
|
||||
public static final String OCP_PASSWORD = stringProperty("test.ocp.password");
|
||||
public static final String OCP_PROJECT_DBZ = stringProperty("test.ocp.project.debezium");
|
||||
public static final String OCP_PROJECT_REGISTRY = System.getProperty("test.ocp.project.registry", "debezium-registry");
|
||||
public static final String OCP_PROJECT_MYSQL = System.getProperty("test.ocp.project.mysql", "debezium-mysql");
|
||||
public static final String OCP_PROJECT_POSTGRESQL = System.getProperty("test.ocp.project.postgresql", "debezium-postgresql");
|
||||
public static final String OCP_PROJECT_SQLSERVER = System.getProperty("test.ocp.project.sqlserver", "debezium-sqlserver");
|
||||
@ -30,7 +31,7 @@ public class ConfigProperties {
|
||||
public static final String OCP_PROJECT_DB2 = System.getProperty("test.ocp.project.db2", "debezium-db2");
|
||||
public static final Optional<String> OCP_PULL_SECRET_PATHS = stringOptionalProperty("test.ocp.pull.secret.paths");
|
||||
|
||||
public static final boolean STRIMZI_OPERATOR_CONNECTORS = booleanProperty("test.strimzi.operator.connectors");
|
||||
public static final boolean STRIMZI_OPERATOR_CONNECTORS = booleanProperty("test.strimzi.operator.connectors", "true");
|
||||
|
||||
public static final int DATABASE_MYSQL_PORT = Integer.parseInt(System.getProperty("test.database.mysql.port", "3306"));
|
||||
public static final String DATABASE_MYSQL_USERNAME = System.getProperty("test.database.mysql.username", "mysqluser");
|
||||
@ -73,8 +74,15 @@ public class ConfigProperties {
|
||||
public static final String DATABASE_DB2_CDC_SCHEMA = System.getProperty("test.database.db2.cdc.schema", "ASNCDC");
|
||||
public static final Optional<String> DATABASE_DB2_HOST = stringOptionalProperty("test.database.sqlserver.host");
|
||||
|
||||
public static final boolean TEST_AVRO_SERIALISATION = booleanProperty("test.avro.serialisation", "true");
|
||||
public static final boolean DEPLOY_SERVICE_REGISTRY = booleanProperty("test.registry.deploy", String.valueOf(TEST_AVRO_SERIALISATION));
|
||||
|
||||
private static boolean booleanProperty(String key) {
|
||||
String value = System.getProperty(key);
|
||||
return booleanProperty(key, value);
|
||||
}
|
||||
|
||||
private static boolean booleanProperty(String key, String value) {
|
||||
if (value == null || value.isEmpty() || value.equalsIgnoreCase("false") || value.equalsIgnoreCase("0")) {
|
||||
return false;
|
||||
}
|
||||
|
@ -0,0 +1,132 @@
|
||||
/*
|
||||
* Copyright Debezium Authors.
|
||||
*
|
||||
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
|
||||
*/
|
||||
package io.debezium.testing.openshift.tools;
|
||||
|
||||
import static io.debezium.testing.openshift.tools.WaitConditions.scaled;
|
||||
import static java.util.concurrent.TimeUnit.MINUTES;
|
||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import io.fabric8.kubernetes.api.model.Container;
|
||||
import io.fabric8.kubernetes.api.model.EnvVar;
|
||||
import io.fabric8.kubernetes.api.model.LocalObjectReference;
|
||||
import io.fabric8.kubernetes.api.model.apps.Deployment;
|
||||
import io.fabric8.openshift.client.OpenShiftClient;
|
||||
|
||||
/**
|
||||
* This class provides control over Strimzi Cluster Operator deployed in OpenShift
|
||||
* @author Jakub Cechacek
|
||||
*/
|
||||
public class OperatorController {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(OperatorController.class);
|
||||
|
||||
protected final OpenShiftClient ocp;
|
||||
protected final OpenShiftUtils ocpUtils;
|
||||
protected final Map<String, String> podLabels;
|
||||
protected String project;
|
||||
protected Deployment operator;
|
||||
protected String name;
|
||||
|
||||
public OperatorController(Deployment operator, Map<String, String> podLabels, OpenShiftClient ocp) {
|
||||
this.operator = operator;
|
||||
this.podLabels = podLabels;
|
||||
this.name = operator.getMetadata().getName();
|
||||
this.project = operator.getMetadata().getNamespace();
|
||||
this.ocp = ocp;
|
||||
this.ocpUtils = new OpenShiftUtils(ocp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disables Strimzi cluster operator by scaling it to ZERO
|
||||
*/
|
||||
public void disable() {
|
||||
LOGGER.info("Disabling Operator");
|
||||
setNumberOfReplicas(0);
|
||||
operator = ocp.apps().deployments().inNamespace(project).withName(name).createOrReplace(operator);
|
||||
await()
|
||||
.atMost(scaled(30), SECONDS)
|
||||
.pollDelay(5, SECONDS)
|
||||
.pollInterval(3, SECONDS)
|
||||
.until(() -> ocp.pods().inNamespace(project).withLabels(podLabels).list().getItems().isEmpty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Enables Strimzi cluster operator by scaling it to ONE
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
public void enable() throws InterruptedException {
|
||||
LOGGER.info("Enabling Operator");
|
||||
setNumberOfReplicas(1);
|
||||
updateOperator();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets number of replicas
|
||||
* @param replicas number of replicas
|
||||
*/
|
||||
public void setNumberOfReplicas(int replicas) {
|
||||
LOGGER.info("Scaling Operator replicas to " + replicas);
|
||||
operator.getSpec().setReplicas(replicas);
|
||||
}
|
||||
|
||||
/**
|
||||
* Semantic shortcut for calling {@link #setNumberOfReplicas(int)} with {@code 1} as value
|
||||
*/
|
||||
public void setSingleReplica() {
|
||||
setNumberOfReplicas(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets image pull secret for operator's {@link Deployment} resource
|
||||
* @param secret name of the secret
|
||||
*/
|
||||
public void setImagePullSecret(String secret) {
|
||||
LOGGER.info("Using " + secret + " as image pull secret for deployment '" + name + "'");
|
||||
List<LocalObjectReference> pullSecrets = Collections.singletonList(new LocalObjectReference(secret));
|
||||
ocpUtils.ensureHasPullSecret(operator, secret);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets pull policy of the operator to 'Always'
|
||||
*/
|
||||
public void setAlwaysPullPolicy() {
|
||||
LOGGER.info("Using 'Always' pull policy for all containers of deployment " + name + "'");
|
||||
List<Container> containers = operator.getSpec().getTemplate().getSpec().getContainers();
|
||||
containers.forEach(c -> c.setImagePullPolicy("Always"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set environment variable on all containers of operator's deployment
|
||||
* @param name variable's name
|
||||
* @param val variable's value
|
||||
*/
|
||||
public void setEnvVar(String name, String val) {
|
||||
LOGGER.info("Setting variable " + name + "='" + val + "' on deployment '" + this.name + "'");
|
||||
ocpUtils.ensureHasEnv(operator, new EnvVar(name, val, null));
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates Operator's {@link Deployment} resource
|
||||
* @return {@link Deployment} resource of the operator
|
||||
*/
|
||||
public Deployment updateOperator() throws InterruptedException {
|
||||
operator = ocp.apps().deployments().inNamespace(project).createOrReplace(operator);
|
||||
operator = waitForAvailable();
|
||||
return operator;
|
||||
}
|
||||
|
||||
private Deployment waitForAvailable() throws InterruptedException {
|
||||
return ocp.apps().deployments().inNamespace(project).withName(name).waitUntilCondition(WaitConditions::deploymentAvailableCondition, scaled(5), MINUTES);
|
||||
}
|
||||
|
||||
}
|
@ -13,6 +13,8 @@
|
||||
import io.fabric8.kubernetes.api.model.apps.Deployment;
|
||||
import io.fabric8.kubernetes.api.model.apps.DeploymentCondition;
|
||||
import io.fabric8.kubernetes.api.model.apps.DeploymentStatus;
|
||||
import io.fabric8.openshift.api.model.DeploymentConfig;
|
||||
import io.fabric8.openshift.api.model.DeploymentConfigStatus;
|
||||
import io.strimzi.api.kafka.model.status.HasStatus;
|
||||
import io.strimzi.api.kafka.model.status.Status;
|
||||
|
||||
@ -52,6 +54,20 @@ public static boolean deploymentAvailableCondition(Deployment resource) {
|
||||
return conditions.anyMatch(c -> c.getType().equalsIgnoreCase("Available") && c.getStatus().equalsIgnoreCase("True"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait condition for deployment configs
|
||||
* @param resource dc resource
|
||||
* @return true when dc becomes available
|
||||
*/
|
||||
public static boolean deploymentAvailableCondition(DeploymentConfig resource) {
|
||||
DeploymentConfigStatus status = resource.getStatus();
|
||||
if (status == null) {
|
||||
return false;
|
||||
}
|
||||
Stream<io.fabric8.openshift.api.model.DeploymentCondition> conditions = status.getConditions().stream();
|
||||
return conditions.anyMatch(c -> c.getType().equalsIgnoreCase("Available") && c.getStatus().equalsIgnoreCase("True"));
|
||||
}
|
||||
|
||||
public static long scaled(long amount) {
|
||||
long scaled = ConfigProperties.WAIT_SCALE_FACTOR * amount;
|
||||
LOGGER.debug("Waiting amount: " + scaled);
|
||||
|
@ -73,7 +73,7 @@ public void execute(String database, Commands<MongoDatabase, RuntimeException> c
|
||||
|
||||
public void execute(String database, String collection, Commands<MongoCollection<Document>, RuntimeException> commands) {
|
||||
execute(database, db -> {
|
||||
MongoCollection col = db.getCollection(collection);
|
||||
MongoCollection<Document> col = db.getCollection(collection);
|
||||
commands.execute(col);
|
||||
});
|
||||
}
|
||||
|
@ -36,6 +36,27 @@ public Map<String, Object> get() {
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds all properties required to enable Avro serialisation via Apicurio Registry.
|
||||
* @param apicurioUrl Apicurio REST endpoint
|
||||
* @return this builder
|
||||
*/
|
||||
public ConnectorConfigBuilder addApicurioAvroSupport(String apicurioUrl) {
|
||||
config.put("key.converter", "io.apicurio.registry.utils.converter.AvroConverter");
|
||||
config.put("key.converter.apicurio.registry.url", apicurioUrl);
|
||||
config.put("key.converter.apicurio.registry.converter.serializer", "io.apicurio.registry.utils.serde.AvroKafkaSerializer");
|
||||
config.put("key.converter.apicurio.registry.converter.deserializer", "io.apicurio.registry.utils.serde.AvroKafkaDeserializer");
|
||||
config.put("key.converter.apicurio.registry.global-id", "io.apicurio.registry.utils.serde.strategy.AutoRegisterIdStrategy");
|
||||
|
||||
config.put("value.converter", "io.apicurio.registry.utils.converter.AvroConverter");
|
||||
config.put("value.converter.apicurio.registry.url", apicurioUrl);
|
||||
config.put("value.converter.apicurio.registry.converter.serializer", "io.apicurio.registry.utils.serde.AvroKafkaSerializer");
|
||||
config.put("value.converter.apicurio.registry.converter.deserializer", "io.apicurio.registry.utils.serde.AvroKafkaDeserializer");
|
||||
config.put("value.converter.apicurio.registry.global-id", "io.apicurio.registry.utils.serde.strategy.AutoRegisterIdStrategy");
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get configuration as JSON string
|
||||
* @return JSON string of connector config
|
||||
|
@ -128,7 +128,7 @@ public NetworkPolicy allowServiceAccess() {
|
||||
labels.put("strimzi.io/kind", "KafkaConnect");
|
||||
labels.put("strimzi.io/name", kafkaConnect.getMetadata().getName() + "-connect");
|
||||
|
||||
List<NetworkPolicyPort> ports = Stream.of(8083, 8404)
|
||||
List<NetworkPolicyPort> ports = Stream.of(8083, 8404, 9404)
|
||||
.map(IntOrString::new)
|
||||
.map(p -> new NetworkPolicyPortBuilder().withProtocol("TCP").withPort(p).build())
|
||||
.collect(Collectors.toList());
|
||||
|
@ -5,15 +5,24 @@
|
||||
*/
|
||||
package io.debezium.testing.openshift.tools.kafka;
|
||||
|
||||
import static java.util.concurrent.TimeUnit.MINUTES;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import io.debezium.testing.openshift.tools.OpenShiftUtils;
|
||||
import io.debezium.testing.openshift.tools.WaitConditions;
|
||||
import io.debezium.testing.openshift.tools.YAML;
|
||||
import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;
|
||||
import io.fabric8.kubernetes.client.dsl.Resource;
|
||||
import io.fabric8.openshift.client.OpenShiftClient;
|
||||
import io.strimzi.api.kafka.Crds;
|
||||
import io.strimzi.api.kafka.KafkaTopicList;
|
||||
import io.strimzi.api.kafka.model.DoneableKafkaTopic;
|
||||
import io.strimzi.api.kafka.model.Kafka;
|
||||
import io.strimzi.api.kafka.model.KafkaTopic;
|
||||
import io.strimzi.api.kafka.model.status.ListenerAddress;
|
||||
import io.strimzi.api.kafka.model.status.ListenerStatus;
|
||||
|
||||
@ -52,6 +61,12 @@ public String getKafkaBootstrapAddress() {
|
||||
return address.getHost() + ":" + address.getPort();
|
||||
}
|
||||
|
||||
public KafkaTopic deployTopic(String yamlPath) throws InterruptedException {
|
||||
LOGGER.info("Deploying Kafka topic from " + yamlPath);
|
||||
KafkaTopic topic = topicOperation().createOrReplace(YAML.fromResource(yamlPath, KafkaTopic.class));
|
||||
return waitForKafkaTopic(topic.getMetadata().getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Undeploy this Kafka cluster by deleted related KafkaConnect CR
|
||||
* @return true if the CR was found and deleted
|
||||
@ -59,4 +74,18 @@ public String getKafkaBootstrapAddress() {
|
||||
public boolean undeployCluster() {
|
||||
return Crds.kafkaOperation(ocp).delete(kafka);
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits until topic is properly deployed.
|
||||
* @param name name of the topic
|
||||
* @throws InterruptedException on wait error
|
||||
* @throws IllegalArgumentException when deployment doesn't use custom resources
|
||||
*/
|
||||
public KafkaTopic waitForKafkaTopic(String name) throws InterruptedException {
|
||||
return topicOperation().withName(name).waitUntilCondition(WaitConditions::kafkaReadyCondition, 5, MINUTES);
|
||||
}
|
||||
|
||||
private NonNamespaceOperation<KafkaTopic, KafkaTopicList, DoneableKafkaTopic, Resource<KafkaTopic, DoneableKafkaTopic>> topicOperation() {
|
||||
return Crds.topicOperation(ocp).inNamespace(project);
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Copyright Debezium Authors.
|
||||
*
|
||||
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
|
||||
*/
|
||||
package io.debezium.testing.openshift.tools.registry;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import io.apicurio.registry.operator.api.model.ApicurioRegistry;
|
||||
import io.apicurio.registry.operator.api.model.ApicurioRegistryList;
|
||||
import io.apicurio.registry.operator.api.model.DoneableApicurioRegistry;
|
||||
import io.debezium.testing.openshift.tools.ConfigProperties;
|
||||
import io.debezium.testing.openshift.tools.OpenShiftUtils;
|
||||
import io.fabric8.kubernetes.api.model.Service;
|
||||
import io.fabric8.kubernetes.api.model.apiextensions.CustomResourceDefinition;
|
||||
import io.fabric8.openshift.api.model.Route;
|
||||
import io.fabric8.openshift.client.OpenShiftClient;
|
||||
|
||||
import okhttp3.OkHttpClient;
|
||||
|
||||
/**
|
||||
* This class provides control over Kafka instance deployed in OpenShift
|
||||
* @author Jakub Cechacek
|
||||
*/
|
||||
public class RegistryController {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(RegistryController.class);
|
||||
|
||||
private final ApicurioRegistry registry;
|
||||
private final OpenShiftClient ocp;
|
||||
private final OkHttpClient http;
|
||||
private final String project;
|
||||
private final OpenShiftUtils ocpUtils;
|
||||
|
||||
public RegistryController(ApicurioRegistry registry, OpenShiftClient ocp, OkHttpClient http) {
|
||||
this.registry = registry;
|
||||
this.ocp = ocp;
|
||||
this.http = http;
|
||||
this.project = registry.getMetadata().getNamespace();
|
||||
this.ocpUtils = new OpenShiftUtils(ocp);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return registry url
|
||||
*/
|
||||
public String getRegistryAddress() {
|
||||
Service s = getRegistryService();
|
||||
return "http://" + s.getMetadata().getName() + "." + ConfigProperties.OCP_PROJECT_REGISTRY + ".svc.cluster.local:8080";
|
||||
}
|
||||
|
||||
/**
|
||||
* @return registry url
|
||||
*/
|
||||
public String getRegistryApiAddress() {
|
||||
return getRegistryAddress() + "/api";
|
||||
}
|
||||
|
||||
/**
|
||||
* @return registry public url
|
||||
*/
|
||||
public String getPublicRegistryAddress() {
|
||||
List<Route> items = ocp.routes().inNamespace(project).withLabel("apicur.io/name=", registry.getMetadata().getName()).list().getItems();
|
||||
if (items.isEmpty()) {
|
||||
throw new IllegalStateException("No route for registry '" + registry.getMetadata().getName() + "'");
|
||||
}
|
||||
String host = items.get(0).getSpec().getHost();
|
||||
return "http://" + host;
|
||||
}
|
||||
|
||||
private Service getRegistryService() {
|
||||
List<Service> items = ocp.services().inNamespace(project).withLabel("apicur.io/name=", registry.getMetadata().getName()).list().getItems();
|
||||
if (items.isEmpty()) {
|
||||
throw new IllegalStateException("No service for registry '" + registry.getMetadata().getName() + "'");
|
||||
}
|
||||
|
||||
return items.get(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Undeploy this registry by deleted related ApicurioRegistry CR
|
||||
* @return true if the CR was found and deleted
|
||||
*/
|
||||
public boolean undeployRegistry() {
|
||||
CustomResourceDefinition crd = ocp.customResourceDefinitions().load(RegistryDeployer.class.getResourceAsStream("/apicur.io_apicurioregistries_crd.yaml")).get();
|
||||
return ocp.customResources(crd, ApicurioRegistry.class, ApicurioRegistryList.class, DoneableApicurioRegistry.class).inNamespace(project).delete(registry);
|
||||
}
|
||||
}
|
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright Debezium Authors.
|
||||
*
|
||||
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
|
||||
*/
|
||||
package io.debezium.testing.openshift.tools.registry;
|
||||
|
||||
import static io.debezium.testing.openshift.tools.WaitConditions.scaled;
|
||||
import static java.util.concurrent.TimeUnit.MINUTES;
|
||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import io.apicurio.registry.operator.api.model.ApicurioRegistry;
|
||||
import io.apicurio.registry.operator.api.model.ApicurioRegistryList;
|
||||
import io.apicurio.registry.operator.api.model.DoneableApicurioRegistry;
|
||||
import io.debezium.testing.openshift.tools.OpenShiftUtils;
|
||||
import io.debezium.testing.openshift.tools.OperatorController;
|
||||
import io.debezium.testing.openshift.tools.WaitConditions;
|
||||
import io.debezium.testing.openshift.tools.YAML;
|
||||
import io.debezium.testing.openshift.tools.kafka.KafkaController;
|
||||
import io.fabric8.kubernetes.api.model.apiextensions.CustomResourceDefinition;
|
||||
import io.fabric8.kubernetes.api.model.apps.Deployment;
|
||||
import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;
|
||||
import io.fabric8.kubernetes.client.dsl.Resource;
|
||||
import io.fabric8.kubernetes.internal.KubernetesDeserializer;
|
||||
import io.fabric8.openshift.api.model.DeploymentConfig;
|
||||
import io.fabric8.openshift.client.OpenShiftClient;
|
||||
|
||||
import okhttp3.OkHttpClient;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Jakub Cechacek
|
||||
*/
|
||||
public class RegistryDeployer {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(RegistryDeployer.class);
|
||||
|
||||
private final OpenShiftClient ocp;
|
||||
private final OkHttpClient http;
|
||||
private final OpenShiftUtils ocpUtils;
|
||||
private final String project;
|
||||
private final KafkaController kafkaController;
|
||||
|
||||
public RegistryDeployer(String project, OpenShiftClient ocp, OkHttpClient http, KafkaController kafkaController) {
|
||||
this.project = project;
|
||||
this.ocp = ocp;
|
||||
this.http = http;
|
||||
this.ocpUtils = new OpenShiftUtils(ocp);
|
||||
this.kafkaController = kafkaController;
|
||||
}
|
||||
|
||||
/**
|
||||
* Accessor for operator controller.
|
||||
* @return {@link OperatorController} instance for cluster operator in {@link #project}
|
||||
*/
|
||||
public OperatorController getOperator() {
|
||||
Deployment operator = ocp.apps().deployments().inNamespace(project).withName("apicurio-registry-operator").get();
|
||||
return new OperatorController(operator, Collections.singletonMap("name", "apicurio-registry-operator"), ocp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deploys Kafka Cluster
|
||||
* @param yamlPath path to CR descriptor (must be available on class path)
|
||||
* @return {@link RegistryController} instance for deployed registry
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
public RegistryController deployRegistry(String yamlPath, String storageTopicYamlPath, String idTopicYamlPath) throws InterruptedException {
|
||||
LOGGER.info("Deploying Apicurio Registry from " + yamlPath);
|
||||
kafkaController.deployTopic(storageTopicYamlPath);
|
||||
kafkaController.deployTopic(idTopicYamlPath);
|
||||
|
||||
ApicurioRegistry registry = YAML.fromResource(yamlPath, ApicurioRegistry.class);
|
||||
registry = registryOperation().createOrReplace(registry);
|
||||
registry = waitForRegistry(registry.getMetadata().getName());
|
||||
|
||||
return new RegistryController(registry, ocp, http);
|
||||
}
|
||||
|
||||
public NonNamespaceOperation<ApicurioRegistry, ApicurioRegistryList, DoneableApicurioRegistry, Resource<ApicurioRegistry, DoneableApicurioRegistry>> registryOperation() {
|
||||
CustomResourceDefinition crd = ocp.customResourceDefinitions().load(RegistryDeployer.class.getResourceAsStream("/crds/apicur.io_apicurioregistries_crd.yaml"))
|
||||
.get();
|
||||
KubernetesDeserializer.registerCustomKind("apicur.io/v1alpha1", "ApicurioRegistry", ApicurioRegistry.class);
|
||||
return ocp.customResources(crd, ApicurioRegistry.class, ApicurioRegistryList.class, DoneableApicurioRegistry.class).inNamespace(project);
|
||||
}
|
||||
|
||||
public ApicurioRegistry waitForRegistry(String name) throws InterruptedException {
|
||||
LOGGER.info("Waiting for deployments of registry '" + name + "'");
|
||||
await()
|
||||
.atMost(scaled(1), MINUTES)
|
||||
.pollInterval(5, SECONDS)
|
||||
.until(() -> !getRegistryDeployments(name).isEmpty());
|
||||
|
||||
DeploymentConfig dc = getRegistryDeployments(name).get(0);
|
||||
ocp.deploymentConfigs()
|
||||
.inNamespace(project)
|
||||
.withName(dc.getMetadata().getName())
|
||||
.waitUntilCondition(WaitConditions::deploymentAvailableCondition, scaled(5), MINUTES);
|
||||
|
||||
return registryOperation().withName(name).get();
|
||||
}
|
||||
|
||||
private List<DeploymentConfig> getRegistryDeployments(String name) {
|
||||
return ocp.deploymentConfigs().inNamespace(project).withLabel("apicur.io/name", name).list().getItems();
|
||||
}
|
||||
}
|
@ -0,0 +1,777 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: apicurioregistries.apicur.io
|
||||
spec:
|
||||
group: apicur.io
|
||||
names:
|
||||
kind: ApicurioRegistry
|
||||
listKind: ApicurioRegistryList
|
||||
plural: apicurioregistries
|
||||
singular: apicurioregistry
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: ApicurioRegistry is the Schema for the apicurioregistries API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: ApicurioRegistrySpec defines the desired state of ApicurioRegistry
|
||||
properties:
|
||||
configuration:
|
||||
properties:
|
||||
dataSource:
|
||||
properties:
|
||||
password:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
userName:
|
||||
type: string
|
||||
type: object
|
||||
infinispan:
|
||||
properties:
|
||||
clusterName:
|
||||
type: string
|
||||
type: object
|
||||
kafka:
|
||||
properties:
|
||||
bootstrapServers:
|
||||
type: string
|
||||
type: object
|
||||
logLevel:
|
||||
type: string
|
||||
persistence:
|
||||
enum:
|
||||
- mem
|
||||
- jpa
|
||||
- kafka
|
||||
- streams
|
||||
- infinispan
|
||||
type: string
|
||||
streams:
|
||||
properties:
|
||||
applicationId:
|
||||
type: string
|
||||
applicationServerPort:
|
||||
type: string
|
||||
bootstrapServers:
|
||||
type: string
|
||||
security:
|
||||
properties:
|
||||
scram:
|
||||
properties:
|
||||
mechanism:
|
||||
type: string
|
||||
passwordSecretName:
|
||||
type: string
|
||||
truststoreSecretName:
|
||||
type: string
|
||||
user:
|
||||
type: string
|
||||
type: object
|
||||
tls:
|
||||
properties:
|
||||
keystoreSecretName:
|
||||
type: string
|
||||
truststoreSecretName:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
ui:
|
||||
properties:
|
||||
readOnly:
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
deployment:
|
||||
properties:
|
||||
affinity:
|
||||
description: Affinity is a group of affinity scheduling rules.
|
||||
properties:
|
||||
nodeAffinity:
|
||||
description: Describes node affinity scheduling rules for the
|
||||
pod.
|
||||
properties:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
description: The scheduler will prefer to schedule pods
|
||||
to nodes that satisfy the affinity expressions specified
|
||||
by this field, but it may choose a node that violates
|
||||
one or more of the expressions. The node that is most
|
||||
preferred is the one with the greatest sum of weights,
|
||||
i.e. for each node that meets all of the scheduling requirements
|
||||
(resource request, requiredDuringScheduling affinity expressions,
|
||||
etc.), compute a sum by iterating through the elements
|
||||
of this field and adding "weight" to the sum if the node
|
||||
matches the corresponding matchExpressions; the node(s)
|
||||
with the highest sum are the most preferred.
|
||||
items:
|
||||
description: An empty preferred scheduling term matches
|
||||
all objects with implicit weight 0 (i.e. it's a no-op).
|
||||
A null preferred scheduling term matches no objects
|
||||
(i.e. is also a no-op).
|
||||
properties:
|
||||
preference:
|
||||
description: A node selector term, associated with
|
||||
the corresponding weight.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of node selector requirements
|
||||
by node's labels.
|
||||
items:
|
||||
description: A node selector requirement is
|
||||
a selector that contains values, a key, and
|
||||
an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: Represents a key's relationship
|
||||
to a set of values. Valid operators are
|
||||
In, NotIn, Exists, DoesNotExist. Gt, and
|
||||
Lt.
|
||||
type: string
|
||||
values:
|
||||
description: An array of string values.
|
||||
If the operator is In or NotIn, the values
|
||||
array must be non-empty. If the operator
|
||||
is Exists or DoesNotExist, the values
|
||||
array must be empty. If the operator is
|
||||
Gt or Lt, the values array must have a
|
||||
single element, which will be interpreted
|
||||
as an integer. This array is replaced
|
||||
during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchFields:
|
||||
description: A list of node selector requirements
|
||||
by node's fields.
|
||||
items:
|
||||
description: A node selector requirement is
|
||||
a selector that contains values, a key, and
|
||||
an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: Represents a key's relationship
|
||||
to a set of values. Valid operators are
|
||||
In, NotIn, Exists, DoesNotExist. Gt, and
|
||||
Lt.
|
||||
type: string
|
||||
values:
|
||||
description: An array of string values.
|
||||
If the operator is In or NotIn, the values
|
||||
array must be non-empty. If the operator
|
||||
is Exists or DoesNotExist, the values
|
||||
array must be empty. If the operator is
|
||||
Gt or Lt, the values array must have a
|
||||
single element, which will be interpreted
|
||||
as an integer. This array is replaced
|
||||
during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
weight:
|
||||
description: Weight associated with matching the corresponding
|
||||
nodeSelectorTerm, in the range 1-100.
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- preference
|
||||
- weight
|
||||
type: object
|
||||
type: array
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
description: If the affinity requirements specified by this
|
||||
field are not met at scheduling time, the pod will not
|
||||
be scheduled onto the node. If the affinity requirements
|
||||
specified by this field cease to be met at some point
|
||||
during pod execution (e.g. due to an update), the system
|
||||
may or may not try to eventually evict the pod from its
|
||||
node.
|
||||
properties:
|
||||
nodeSelectorTerms:
|
||||
description: Required. A list of node selector terms.
|
||||
The terms are ORed.
|
||||
items:
|
||||
description: A null or empty node selector term matches
|
||||
no objects. The requirements of them are ANDed.
|
||||
The TopologySelectorTerm type implements a subset
|
||||
of the NodeSelectorTerm.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of node selector requirements
|
||||
by node's labels.
|
||||
items:
|
||||
description: A node selector requirement is
|
||||
a selector that contains values, a key, and
|
||||
an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: Represents a key's relationship
|
||||
to a set of values. Valid operators are
|
||||
In, NotIn, Exists, DoesNotExist. Gt, and
|
||||
Lt.
|
||||
type: string
|
||||
values:
|
||||
description: An array of string values.
|
||||
If the operator is In or NotIn, the values
|
||||
array must be non-empty. If the operator
|
||||
is Exists or DoesNotExist, the values
|
||||
array must be empty. If the operator is
|
||||
Gt or Lt, the values array must have a
|
||||
single element, which will be interpreted
|
||||
as an integer. This array is replaced
|
||||
during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchFields:
|
||||
description: A list of node selector requirements
|
||||
by node's fields.
|
||||
items:
|
||||
description: A node selector requirement is
|
||||
a selector that contains values, a key, and
|
||||
an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: Represents a key's relationship
|
||||
to a set of values. Valid operators are
|
||||
In, NotIn, Exists, DoesNotExist. Gt, and
|
||||
Lt.
|
||||
type: string
|
||||
values:
|
||||
description: An array of string values.
|
||||
If the operator is In or NotIn, the values
|
||||
array must be non-empty. If the operator
|
||||
is Exists or DoesNotExist, the values
|
||||
array must be empty. If the operator is
|
||||
Gt or Lt, the values array must have a
|
||||
single element, which will be interpreted
|
||||
as an integer. This array is replaced
|
||||
during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- nodeSelectorTerms
|
||||
type: object
|
||||
type: object
|
||||
podAffinity:
|
||||
description: Describes pod affinity scheduling rules (e.g. co-locate
|
||||
this pod in the same node, zone, etc. as some other pod(s)).
|
||||
properties:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
description: The scheduler will prefer to schedule pods
|
||||
to nodes that satisfy the affinity expressions specified
|
||||
by this field, but it may choose a node that violates
|
||||
one or more of the expressions. The node that is most
|
||||
preferred is the one with the greatest sum of weights,
|
||||
i.e. for each node that meets all of the scheduling requirements
|
||||
(resource request, requiredDuringScheduling affinity expressions,
|
||||
etc.), compute a sum by iterating through the elements
|
||||
of this field and adding "weight" to the sum if the node
|
||||
has pods which matches the corresponding podAffinityTerm;
|
||||
the node(s) with the highest sum are the most preferred.
|
||||
items:
|
||||
description: The weights of all of the matched WeightedPodAffinityTerm
|
||||
fields are added per-node to find the most preferred
|
||||
node(s)
|
||||
properties:
|
||||
podAffinityTerm:
|
||||
description: Required. A pod affinity term, associated
|
||||
with the corresponding weight.
|
||||
properties:
|
||||
labelSelector:
|
||||
description: A label query over a set of resources,
|
||||
in this case pods.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of
|
||||
label selector requirements. The requirements
|
||||
are ANDed.
|
||||
items:
|
||||
description: A label selector requirement
|
||||
is a selector that contains values, a
|
||||
key, and an operator that relates the
|
||||
key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that
|
||||
the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's
|
||||
relationship to a set of values. Valid
|
||||
operators are In, NotIn, Exists and
|
||||
DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string
|
||||
values. If the operator is In or NotIn,
|
||||
the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This
|
||||
array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator is
|
||||
"In", and the values array contains only
|
||||
"value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
namespaces:
|
||||
description: namespaces specifies which namespaces
|
||||
the labelSelector applies to (matches against);
|
||||
null or empty list means "this pod's namespace"
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
topologyKey:
|
||||
description: This pod should be co-located (affinity)
|
||||
or not co-located (anti-affinity) with the pods
|
||||
matching the labelSelector in the specified
|
||||
namespaces, where co-located is defined as running
|
||||
on a node whose value of the label with key
|
||||
topologyKey matches that of any node on which
|
||||
any of the selected pods is running. Empty topologyKey
|
||||
is not allowed.
|
||||
type: string
|
||||
required:
|
||||
- topologyKey
|
||||
type: object
|
||||
weight:
|
||||
description: weight associated with matching the corresponding
|
||||
podAffinityTerm, in the range 1-100.
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- podAffinityTerm
|
||||
- weight
|
||||
type: object
|
||||
type: array
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
description: If the affinity requirements specified by this
|
||||
field are not met at scheduling time, the pod will not
|
||||
be scheduled onto the node. If the affinity requirements
|
||||
specified by this field cease to be met at some point
|
||||
during pod execution (e.g. due to a pod label update),
|
||||
the system may or may not try to eventually evict the
|
||||
pod from its node. When there are multiple elements, the
|
||||
lists of nodes corresponding to each podAffinityTerm are
|
||||
intersected, i.e. all terms must be satisfied.
|
||||
items:
|
||||
description: Defines a set of pods (namely those matching
|
||||
the labelSelector relative to the given namespace(s))
|
||||
that this pod should be co-located (affinity) or not
|
||||
co-located (anti-affinity) with, where co-located is
|
||||
defined as running on a node whose value of the label
|
||||
with key <topologyKey> matches that of any node on which
|
||||
a pod of the set of pods is running
|
||||
properties:
|
||||
labelSelector:
|
||||
description: A label query over a set of resources,
|
||||
in this case pods.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label
|
||||
selector requirements. The requirements are
|
||||
ANDed.
|
||||
items:
|
||||
description: A label selector requirement is
|
||||
a selector that contains values, a key, and
|
||||
an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's
|
||||
relationship to a set of values. Valid
|
||||
operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string
|
||||
values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If
|
||||
the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array
|
||||
is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator is "In",
|
||||
and the values array contains only "value".
|
||||
The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
namespaces:
|
||||
description: namespaces specifies which namespaces
|
||||
the labelSelector applies to (matches against);
|
||||
null or empty list means "this pod's namespace"
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
topologyKey:
|
||||
description: This pod should be co-located (affinity)
|
||||
or not co-located (anti-affinity) with the pods
|
||||
matching the labelSelector in the specified namespaces,
|
||||
where co-located is defined as running on a node
|
||||
whose value of the label with key topologyKey matches
|
||||
that of any node on which any of the selected pods
|
||||
is running. Empty topologyKey is not allowed.
|
||||
type: string
|
||||
required:
|
||||
- topologyKey
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
podAntiAffinity:
|
||||
description: Describes pod anti-affinity scheduling rules (e.g.
|
||||
avoid putting this pod in the same node, zone, etc. as some
|
||||
other pod(s)).
|
||||
properties:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
description: The scheduler will prefer to schedule pods
|
||||
to nodes that satisfy the anti-affinity expressions specified
|
||||
by this field, but it may choose a node that violates
|
||||
one or more of the expressions. The node that is most
|
||||
preferred is the one with the greatest sum of weights,
|
||||
i.e. for each node that meets all of the scheduling requirements
|
||||
(resource request, requiredDuringScheduling anti-affinity
|
||||
expressions, etc.), compute a sum by iterating through
|
||||
the elements of this field and adding "weight" to the
|
||||
sum if the node has pods which matches the corresponding
|
||||
podAffinityTerm; the node(s) with the highest sum are
|
||||
the most preferred.
|
||||
items:
|
||||
description: The weights of all of the matched WeightedPodAffinityTerm
|
||||
fields are added per-node to find the most preferred
|
||||
node(s)
|
||||
properties:
|
||||
podAffinityTerm:
|
||||
description: Required. A pod affinity term, associated
|
||||
with the corresponding weight.
|
||||
properties:
|
||||
labelSelector:
|
||||
description: A label query over a set of resources,
|
||||
in this case pods.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of
|
||||
label selector requirements. The requirements
|
||||
are ANDed.
|
||||
items:
|
||||
description: A label selector requirement
|
||||
is a selector that contains values, a
|
||||
key, and an operator that relates the
|
||||
key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that
|
||||
the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's
|
||||
relationship to a set of values. Valid
|
||||
operators are In, NotIn, Exists and
|
||||
DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string
|
||||
values. If the operator is In or NotIn,
|
||||
the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This
|
||||
array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator is
|
||||
"In", and the values array contains only
|
||||
"value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
namespaces:
|
||||
description: namespaces specifies which namespaces
|
||||
the labelSelector applies to (matches against);
|
||||
null or empty list means "this pod's namespace"
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
topologyKey:
|
||||
description: This pod should be co-located (affinity)
|
||||
or not co-located (anti-affinity) with the pods
|
||||
matching the labelSelector in the specified
|
||||
namespaces, where co-located is defined as running
|
||||
on a node whose value of the label with key
|
||||
topologyKey matches that of any node on which
|
||||
any of the selected pods is running. Empty topologyKey
|
||||
is not allowed.
|
||||
type: string
|
||||
required:
|
||||
- topologyKey
|
||||
type: object
|
||||
weight:
|
||||
description: weight associated with matching the corresponding
|
||||
podAffinityTerm, in the range 1-100.
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- podAffinityTerm
|
||||
- weight
|
||||
type: object
|
||||
type: array
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
description: If the anti-affinity requirements specified
|
||||
by this field are not met at scheduling time, the pod
|
||||
will not be scheduled onto the node. If the anti-affinity
|
||||
requirements specified by this field cease to be met at
|
||||
some point during pod execution (e.g. due to a pod label
|
||||
update), the system may or may not try to eventually evict
|
||||
the pod from its node. When there are multiple elements,
|
||||
the lists of nodes corresponding to each podAffinityTerm
|
||||
are intersected, i.e. all terms must be satisfied.
|
||||
items:
|
||||
description: Defines a set of pods (namely those matching
|
||||
the labelSelector relative to the given namespace(s))
|
||||
that this pod should be co-located (affinity) or not
|
||||
co-located (anti-affinity) with, where co-located is
|
||||
defined as running on a node whose value of the label
|
||||
with key <topologyKey> matches that of any node on which
|
||||
a pod of the set of pods is running
|
||||
properties:
|
||||
labelSelector:
|
||||
description: A label query over a set of resources,
|
||||
in this case pods.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label
|
||||
selector requirements. The requirements are
|
||||
ANDed.
|
||||
items:
|
||||
description: A label selector requirement is
|
||||
a selector that contains values, a key, and
|
||||
an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's
|
||||
relationship to a set of values. Valid
|
||||
operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string
|
||||
values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If
|
||||
the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array
|
||||
is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator is "In",
|
||||
and the values array contains only "value".
|
||||
The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
namespaces:
|
||||
description: namespaces specifies which namespaces
|
||||
the labelSelector applies to (matches against);
|
||||
null or empty list means "this pod's namespace"
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
topologyKey:
|
||||
description: This pod should be co-located (affinity)
|
||||
or not co-located (anti-affinity) with the pods
|
||||
matching the labelSelector in the specified namespaces,
|
||||
where co-located is defined as running on a node
|
||||
whose value of the label with key topologyKey matches
|
||||
that of any node on which any of the selected pods
|
||||
is running. Empty topologyKey is not allowed.
|
||||
type: string
|
||||
required:
|
||||
- topologyKey
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
host:
|
||||
type: string
|
||||
replicas:
|
||||
format: int32
|
||||
type: integer
|
||||
tolerations:
|
||||
items:
|
||||
description: The pod this Toleration is attached to tolerates
|
||||
any taint that matches the triple <key,value,effect> using the
|
||||
matching operator <operator>.
|
||||
properties:
|
||||
effect:
|
||||
description: Effect indicates the taint effect to match. Empty
|
||||
means match all taint effects. When specified, allowed values
|
||||
are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
type: string
|
||||
key:
|
||||
description: Key is the taint key that the toleration applies
|
||||
to. Empty means match all taint keys. If the key is empty,
|
||||
operator must be Exists; this combination means to match
|
||||
all values and all keys.
|
||||
type: string
|
||||
operator:
|
||||
description: Operator represents a key's relationship to the
|
||||
value. Valid operators are Exists and Equal. Defaults to
|
||||
Equal. Exists is equivalent to wildcard for value, so that
|
||||
a pod can tolerate all taints of a particular category.
|
||||
type: string
|
||||
tolerationSeconds:
|
||||
description: TolerationSeconds represents the period of time
|
||||
the toleration (which must be of effect NoExecute, otherwise
|
||||
this field is ignored) tolerates the taint. By default,
|
||||
it is not set, which means tolerate the taint forever (do
|
||||
not evict). Zero and negative values will be treated as
|
||||
0 (evict immediately) by the system.
|
||||
format: int64
|
||||
type: integer
|
||||
value:
|
||||
description: Value is the taint value the toleration matches
|
||||
to. If the operator is Exists, the value should be empty,
|
||||
otherwise just a regular string.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
image:
|
||||
properties:
|
||||
name:
|
||||
description: Registry string `json:"registry,omitempty"` Version string
|
||||
`json:"version,omitempty"` Override string `json:"override,omitempty"`
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: ApicurioRegistryStatus defines the observed state of ApicurioRegistry
|
||||
properties:
|
||||
deploymentName:
|
||||
type: string
|
||||
host:
|
||||
type: string
|
||||
image:
|
||||
type: string
|
||||
ingressName:
|
||||
type: string
|
||||
replicaCount:
|
||||
format: int32
|
||||
type: integer
|
||||
serviceName:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
Loading…
Reference in New Issue
Block a user