Added MySQL ingest module with support for reading DDL statements.

This commit is contained in:
Randall Hauch 2016-01-22 14:15:55 -06:00
parent 8e6c615644
commit 71e90b5a69
37 changed files with 4533 additions and 276 deletions

View File

@ -14,7 +14,8 @@
* {@link Consumer} for {@code boolean}. Unlike most other functional interfaces,
* {@code BooleanConsumer} is expected to operate via side-effects.
*
* <p>This is a functional interface
* <p>
* This is a functional interface
* whose functional method is {@link #accept(boolean)}.
*
* @see Consumer
@ -44,6 +45,9 @@ public interface BooleanConsumer {
*/
default BooleanConsumer andThen(BooleanConsumer after) {
Objects.requireNonNull(after);
return (boolean t) -> { accept(t); after.accept(t); };
return (boolean t) -> {
accept(t);
after.accept(t);
};
}
}

View File

@ -13,6 +13,10 @@
*/
public class Predicates {
public static <R> Predicate<R> not(Predicate<R> predicate) {
return predicate.negate();
}
public static <T> Predicate<T> notNull() {
return new Predicate<T>() {
@Override

View File

@ -72,9 +72,20 @@ public static ColumnEditor editor() {
* Determine whether this column is optional.
*
* @return {@code true} if it is optional, or {@code false} otherwise
* @see #isRequired()
*/
boolean isOptional();
/**
* Determine whether this column is required. This is equivalent to calling {@code !isOptional()}.
*
* @return {@code true} if it is required (not optional), or {@code false} otherwise
* @see #isOptional()
*/
default boolean isRequired() {
return !isOptional();
}
/**
* Determine whether this column's values are automatically incremented by the database.
*

View File

@ -76,6 +76,11 @@ public boolean isGenerated() {
return generated;
}
@Override
public int hashCode() {
return name.hashCode();
}
@Override
public boolean equals(Object obj) {
if ( obj == this ) return true;
@ -105,9 +110,9 @@ public String toString() {
}
sb.append(')');
}
if ( optional ) sb.append(" optional");
if ( autoIncremented ) sb.append(" autoIncr");
if ( generated ) sb.append(" generated");
if ( !optional ) sb.append(" NOT NULL");
if ( autoIncremented ) sb.append(" AUTO_INCREMENTED");
if ( generated ) sb.append(" GENERATED");
return sb.toString();
}

View File

@ -63,6 +63,15 @@ default List<Column> filterColumns( Predicate<Column> predicate ) {
return columns().stream().filter(predicate).collect(Collectors.toList());
}
/**
* Utility to obtain a copy of a list of the names of those columns that satisfy the specified predicate.
* @param predicate the filter predicate; may not be null
* @return the list of names of those columns that satisfy the predicate; never null but possibly empty
*/
default List<String> filterColumnNames( Predicate<Column> predicate ) {
return columns().stream().filter(predicate).map(Column::name).collect(Collectors.toList());
}
/**
* The list of column names that make up this table.
*

View File

@ -71,6 +71,15 @@ default List<String> columnNames() {
*/
List<String> primaryKeyColumnNames();
/**
* Determine whether this table has a primary key.
* @return {@code true} if this table has at least one {@link #primaryKeyColumnNames() primary key column}, or {@code false}
* if there are no primary key columns
*/
default boolean hasPrimaryKey() {
return !primaryKeyColumnNames().isEmpty();
}
/**
* Add one columns to this table, regardless of the {@link Column#position() position} of the supplied
* columns. However, if an existing column definition matches a supplied column, the new column definition will replace
@ -140,6 +149,15 @@ default TableEditor addColumn(Column column) {
*/
TableEditor reorderColumn(String columnName, String afterColumnName);
/**
* Rename the column with the given name to the new specified name.
*
* @param existingName the existing name of the column to be renamed; may not be null
* @param newName the new name of the column; may not be null
* @return this editor so callers can chain methods together
*/
TableEditor renameColumn(String existingName, String newName);
/**
* Set the columns that make up this table's primary key.
*
@ -168,6 +186,13 @@ default TableEditor addColumn(Column column) {
*/
TableEditor setUniqueValues();
/**
* Determine whether this table's primary key contains all columns (via {@link #setUniqueValues()}) such that all rows
* within the table are unique.
* @return {@code true} if {@link #setUniqueValues()} was last called on this table, or {@code false} otherwise
*/
boolean hasUniqueValues();
/**
* Obtain an immutable table definition representing the current state of this editor. This editor can be reused
* after this method, since the resulting table definition no longer refers to any of the data used in this editor.

View File

@ -110,6 +110,7 @@ public TableEditor setPrimaryKeyNames(String... pkColumnNames) {
throw new IllegalArgumentException("The primary key cannot reference a non-existant column'" + pkColumnName + "'");
}
}
uniqueValues = false;
this.pkColumnNames.clear();
for (String pkColumnName : pkColumnNames) {
this.pkColumnNames.add(pkColumnName);
@ -126,6 +127,7 @@ public TableEditor setPrimaryKeyNames(List<String> pkColumnNames) {
}
this.pkColumnNames.clear();
this.pkColumnNames.addAll(pkColumnNames);
uniqueValues = false;
return this;
}
@ -136,6 +138,11 @@ public TableEditor setUniqueValues() {
return this;
}
@Override
public boolean hasUniqueValues() {
return uniqueValues;
}
@Override
public TableEditor removeColumn(String columnName) {
Column existing = sortedColumns.remove(columnName.toLowerCase());
@ -178,6 +185,27 @@ public TableEditor reorderColumn(String columnName, String afterColumnName) {
return this;
}
@Override
public TableEditor renameColumn(String existingName, String newName) {
final Column existing = columnWithName(existingName);
if (existing == null) throw new IllegalArgumentException("No column with name '" + existingName + "'");
Column newColumn = existing.edit().name(newName).create();
// Determine the primary key names ...
List<String> newPkNames = null;
if ( !hasUniqueValues() && primaryKeyColumnNames().contains(existing.name())) {
newPkNames = new ArrayList<>(primaryKeyColumnNames());
newPkNames.replaceAll(name->existing.name().equals(name) ? newName : name);
}
// Add the new column, move it before the existing column, and remove the old column ...
addColumn(newColumn);
reorderColumn(newColumn.name(), existing.name());
removeColumn(existing.name());
if (newPkNames != null) {
setPrimaryKeyNames(newPkNames);
}
return this;
}
protected void updatePositions() {
AtomicInteger position = new AtomicInteger(1);
sortedColumns.replaceAll((name, defn) -> {

View File

@ -501,7 +501,20 @@ protected ValueConverter createValueConverterFor(Column column, Field fieldDefn)
};
case Types.NUMERIC:
case Types.DECIMAL:
return (data) -> Decimal.fromLogical(fieldDefn.schema(), (BigDecimal) data);
return (data) -> {
BigDecimal decimal = null;
if ( data instanceof BigDecimal) decimal = (BigDecimal)data;
else if (data instanceof Boolean) decimal = new BigDecimal(((Boolean)data).booleanValue() ? 1 : 0);
else if (data instanceof Short) decimal = new BigDecimal(((Short)data).intValue());
else if (data instanceof Integer) decimal = new BigDecimal(((Integer)data).intValue());
else if (data instanceof Long) decimal = BigDecimal.valueOf(((Long)data).longValue());
else if (data instanceof Float) decimal = BigDecimal.valueOf(((Float)data).doubleValue());
else if (data instanceof Double) decimal = BigDecimal.valueOf(((Double)data).doubleValue());
else {
handleUnknownData(column, fieldDefn, data);
}
return decimal;
};
// String values
case Types.CHAR: // variable-length
@ -606,7 +619,7 @@ protected Object convertTimestampWithZone(Field fieldDefn, Object data) {
// An unexpected
dateTime = unexpectedTimestampWithZone(data, fieldDefn);
}
return dateTime == null ? null : IsoTimestamp.fromLogical(fieldDefn.schema(), dateTime);
return dateTime;
}
/**
@ -661,7 +674,7 @@ protected Object convertTimeWithZone(Field fieldDefn, Object data) {
// An unexpected
time = unexpectedTimeWithZone(data, fieldDefn);
}
return time == null ? null : IsoTime.fromLogical(fieldDefn.schema(), time);
return time;
}
/**
@ -711,7 +724,7 @@ protected Object convertTimestamp(Field fieldDefn, Object data) {
// An unexpected
date = unexpectedTimestamp(data, fieldDefn);
}
return date == null ? null : Timestamp.fromLogical(fieldDefn.schema(), date);
return date;
}
/**
@ -763,7 +776,7 @@ protected Object convertTime(Field fieldDefn, Object data) {
// An unexpected
date = unexpectedTime(data, fieldDefn);
}
return date == null ? null : Time.fromLogical(fieldDefn.schema(), date);
return date;
}
/**
@ -815,7 +828,7 @@ protected Object convertDate(Field fieldDefn, Object data) {
// An unexpected
date = unexpectedDate(data, fieldDefn);
}
return date == null ? null : Date.fromLogical(fieldDefn.schema(), date);
return date;
}
/**

View File

@ -6,7 +6,9 @@
package io.debezium.relational;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -65,6 +67,7 @@ public static interface ColumnFilter {
private final FunctionalReadWriteLock lock = FunctionalReadWriteLock.reentrant();
private final Map<TableId, TableImpl> tablesByTableId = new HashMap<>();
private final Set<TableId> changes = new HashSet<>();
/**
* Create an empty set of definitions.
@ -81,6 +84,15 @@ public int size() {
return lock.read(tablesByTableId::size);
}
public Set<TableId> drainChanges() {
return lock.write(() -> {
if (changes.isEmpty()) return Collections.emptySet();
Set<TableId> result = new HashSet<>(changes);
changes.clear();
return result;
});
}
/**
* Add or update the definition for the identified table.
*
@ -92,7 +104,11 @@ public int size() {
public Table overwriteTable(TableId tableId, List<Column> columnDefs, List<String> primaryKeyColumnNames) {
return lock.write(() -> {
TableImpl updated = new TableImpl(tableId, columnDefs, primaryKeyColumnNames);
try {
return tablesByTableId.put(tableId, updated);
} finally {
changes.add(tableId);
}
});
}
@ -105,7 +121,33 @@ public Table overwriteTable(TableId tableId, List<Column> columnDefs, List<Strin
public Table overwriteTable(Table table) {
return lock.write(() -> {
TableImpl updated = new TableImpl(table);
try {
return tablesByTableId.put(updated.id(), updated);
} finally {
changes.add(updated.id());
}
});
}
/**
* Rename an existing table.
*
* @param existingTableId the identifier of the existing table to be renamed; may not be null
* @param newTableId the new identifier for the table; may not be null
* @return the previous table definition, or null if there was no prior table definition
*/
public Table renameTable(TableId existingTableId, TableId newTableId) {
return lock.write(() -> {
Table existing = forTable(existingTableId);
if (existing == null) return null;
tablesByTableId.remove(existing);
TableImpl updated = new TableImpl(newTableId, existing.columns(), existing.primaryKeyColumnNames());
try {
return tablesByTableId.put(updated.id(), updated);
} finally {
changes.add(existingTableId);
changes.add(updated.id());
}
});
}
@ -124,6 +166,7 @@ public Table updateTable(TableId tableId, Function<Table, Table> changer) {
if (updated != existing) {
tablesByTableId.put(tableId, new TableImpl(tableId, updated.columns(), updated.primaryKeyColumnNames()));
}
changes.add(tableId);
return existing;
});
}
@ -144,6 +187,7 @@ public Table updateTable(TableId tableId, TableChanger changer) {
changer.rewrite(columns, pkColumnNames);
TableImpl updated = new TableImpl(tableId, columns, pkColumnNames);
tablesByTableId.put(tableId, updated);
changes.add(tableId);
return existing;
});
}
@ -159,7 +203,10 @@ public static interface TableChanger {
* @return the existing table definition that was removed, or null if there was no prior table definition
*/
public Table removeTable(TableId tableId) {
return lock.write(() -> tablesByTableId.remove(tableId));
return lock.write(() -> {
changes.add(tableId);
return tablesByTableId.remove(tableId);
});
}
/**
@ -259,11 +306,11 @@ public String toString() {
return lock.read(() -> {
StringBuilder sb = new StringBuilder();
sb.append("Tables {").append(System.lineSeparator());
for (Map.Entry<TableId, TableImpl> entry : tablesByTableId.entrySet()) {
sb.append(" ").append(entry.getKey()).append(": {").append(System.lineSeparator());
entry.getValue().toString(sb, " ");
tablesByTableId.forEach((tableId,table)->{
sb.append(" ").append(tableId).append(": {").append(System.lineSeparator());
table.toString(sb, " ");
sb.append(" }").append(System.lineSeparator());
}
});
sb.append("}");
return sb.toString();
});

View File

@ -37,8 +37,10 @@
* <li>{@code BIT}</li>
* <li>{@code BIT(3)} will match when the length is exactly 3 and will not match {@code BIT(2)}.</li>
* <li>{@code DECIMAL(L[,S])} will match {@code DECIMAL(5)} and {@code DECIMAL (10,3)}</li>
* <li><code>INTEGER{n}</code> will match {@code INTEGER[3]}, which is an array of integers.<li>
* <li><code>ENUM(...)</code> will match {@code ENUM(a,b,c,d)} and {@code ENUM(a)}.<li>
* <li><code>INTEGER{n}</code> will match {@code INTEGER[3]}, which is an array of integers.
* <li>
* <li><code>ENUM(...)</code> will match {@code ENUM(a,b,c,d)} and {@code ENUM(a)}.
* <li>
* </ul>
*
* @author Randall Hauch
@ -175,10 +177,10 @@ protected Pattern parseLength(TokenStream stream) throws ParsingException {
stream.consume('(');
Pattern result = new LiteralPattern("(", false);
if (stream.canConsume(".",".",".")) {
if (stream.canConsume(".", ".", ".")) {
// This is a list pattern ...
result = new AndPattern(result, new ListPattern());
} else if (stream.canConsumeAnyOf("L","M","P","N") ) {
} else if (stream.canConsumeAnyOf("L", "M", "P", "N")) {
// specifies length, mantissa, precision, or number ...
result = new AndPattern(result, new LengthPattern());
} else {
@ -468,7 +470,7 @@ public boolean match(TokenStream stream, DataTypeBuilder builder, Consumer<Parsi
return true;
}
stream.consume(); // first item
while ( stream.matches(delimiter) ) {
while (stream.matches(delimiter)) {
stream.consume();
}
return true;
@ -494,6 +496,7 @@ public OptionalPattern(Pattern pattern) {
@Override
public boolean match(TokenStream stream, DataTypeBuilder builder, Consumer<ParsingException> error) {
if (stream.hasNext()) {
Marker marker = stream.mark();
try {
if (!pattern.match(stream, builder, error)) {
@ -503,6 +506,7 @@ public boolean match(TokenStream stream, DataTypeBuilder builder, Consumer<Parsi
error.accept(e);
stream.rewind(marker);
}
}
return true;
}
@ -575,7 +579,6 @@ public boolean match(TokenStream stream, DataTypeBuilder builder, Consumer<Parsi
builder.length(literal);
return true;
}
;
return false;
}

View File

@ -86,11 +86,15 @@ public DataType parse(TokenStream stream, Consumer<Collection<ParsingException>>
ErrorCollector errors = new ErrorCollector();
Marker mostReadMarker = null;
DataType mostReadType = null;
for (DataTypePattern pattern : matchingPatterns) {
Marker marker = stream.mark();
for (DataTypePattern pattern : matchingPatterns) {
DataType result = pattern.match(stream, errors::record);
if (result != null) {
// We found a match, so record it if it is better than our previous best ...
if (!stream.hasNext()) {
// There's no more to read, so we should be done ...
return result;
}
Marker endMarker = stream.mark();
if (mostReadMarker == null || endMarker.compareTo(mostReadMarker) > 0) {
mostReadMarker = endMarker;

View File

@ -5,14 +5,22 @@
*/
package io.debezium.relational.ddl;
import java.math.BigDecimal;
import java.sql.Types;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.annotation.NotThreadSafe;
import io.debezium.relational.Column;
import io.debezium.relational.ColumnEditor;
import io.debezium.relational.Table;
import io.debezium.relational.TableId;
import io.debezium.relational.Tables;
import io.debezium.text.MultipleParsingExceptions;
@ -43,18 +51,30 @@ default void add(String firstToken, String... additionalTokens) {
private final Set<String> statementStarts = new HashSet<>();
private final String terminator;
private String currentSchema = null;
protected final boolean skipViews;
protected final Logger logger = LoggerFactory.getLogger(getClass());
protected final DataTypeParser dataTypeParser = new DataTypeParser();
protected Tables databaseTables;
protected TokenStream tokens;
/**
* Create a new parser that uses the supplied {@link DataTypeParser}.
* Create a new parser that uses the supplied {@link DataTypeParser}, but that does not include view definitions.
*
* @param terminator the terminator character sequence; may be null if the default terminator ({@code ;}) should be used
*/
public DdlParser(String terminator) {
this(terminator,false);
}
/**
* Create a new parser that uses the supplied {@link DataTypeParser}.
*
* @param terminator the terminator character sequence; may be null if the default terminator ({@code ;}) should be used
* @param includeViews {@code true} if view definitions should be included, or {@code false} if they should be skipped
*/
public DdlParser(String terminator, boolean includeViews) {
this.terminator = terminator != null ? terminator : ";";
this.skipViews = !includeViews;
initializeDataTypes(dataTypeParser);
initializeKeywords(keywords::add);
initializeStatementStarts(statementStarts::add);
@ -195,6 +215,8 @@ public final void parse(TokenStream ddlContent, Tables databaseTables) throws Pa
} catch (ParsingException e) {
ddlContent.rewind(marker);
throw e;
} catch (Throwable t) {
parsingFailed(ddlContent.nextPosition(), "Unexpected exception (" + t.getMessage() + ") parsing", t);
}
}
@ -321,7 +343,9 @@ protected void consumeStatement() throws ParsingException {
protected void consumeRemainingStatement(Marker start) {
while (tokens.hasNext()) {
if (tokens.matches(DdlTokenizer.STATEMENT_KEY)) break;
if (tokens.matches(DdlTokenizer.STATEMENT_TERMINATOR)) {
if (tokens.canConsume("BEGIN")) {
tokens.consumeThrough("END");
} else if (tokens.matches(DdlTokenizer.STATEMENT_TERMINATOR)) {
tokens.consume();
break;
}
@ -367,7 +391,19 @@ protected String consumeQuotedString() {
* @param msg the leading portion of the message; may not be null
*/
protected void parsingFailed(Position position, String msg) {
parsingFailed(position, null, msg);
parsingFailed(position, msg);
}
/**
* Generate a {@link ParsingException} with the supplied message, which is appended by this method with additional
* information about the position's line and column.
*
* @param position the position at which the error occurred; may not be null
* @param msg the leading portion of the message; may not be null
* @param t the exception that occurred; may be null
*/
protected void parsingFailed(Position position, String msg, Throwable t) {
throw new ParsingException(position, msg + " at line " + position.line() + ", column " + position.column(), t);
}
/**
@ -384,4 +420,345 @@ protected void parsingFailed(Position position, Collection<ParsingException> err
}
throw new MultipleParsingExceptions(msg + " at line " + position.line() + ", column " + position.column(), errors);
}
protected Object parseLiteral(Marker start) {
if (tokens.canConsume('_')) { // introducer
// This is a character literal beginning with a character set ...
parseCharacterSetName(start);
return parseCharacterLiteral(start);
}
if (tokens.canConsume('N')) {
return parseCharacterLiteral(start);
}
if (tokens.canConsume("U", "&")) {
return parseCharacterLiteral(start);
}
if (tokens.canConsume('X')) {
return parseCharacterLiteral(start);
}
if (tokens.matchesAnyOf(DdlTokenizer.DOUBLE_QUOTED_STRING, DdlTokenizer.SINGLE_QUOTED_STRING)) {
return tokens.consume();
}
if (tokens.canConsume("DATE")) {
return parseDateLiteral(start);
}
if (tokens.canConsume("TIME")) {
return parseDateLiteral(start);
}
if (tokens.canConsume("TIMESTAMP")) {
return parseDateLiteral(start);
}
if (tokens.canConsume("TRUE")) {
return Boolean.TRUE;
}
if (tokens.canConsume("FALSE")) {
return Boolean.FALSE;
}
if (tokens.canConsume("UNKNOWN")) {
return Boolean.FALSE;
}
// Otherwise, it's just a numeric literal ...
return parseNumericLiteral(start, true);
}
protected Object parseNumericLiteral(Marker start, boolean signed) {
StringBuilder sb = new StringBuilder();
boolean decimal = false;
if (signed && tokens.matches("+", "-")) {
sb.append(tokens.consumeAnyOf("+", "-"));
}
if (!tokens.canConsume('.')) {
sb.append(tokens.consumeInteger());
}
if (tokens.canConsume('.')) {
sb.append(tokens.consumeInteger());
decimal = true;
}
if (!tokens.canConsume('E')) {
if (decimal) return Double.parseDouble(sb.toString());
return Integer.parseInt(sb.toString());
}
sb.append('E');
if (tokens.matches("+", "-")) {
sb.append(tokens.consumeAnyOf("+", "-"));
}
sb.append(tokens.consumeInteger());
return new BigDecimal(sb.toString());
}
protected String parseCharacterLiteral(Marker start) {
StringBuilder sb = new StringBuilder();
while (true) {
if (tokens.matches(DdlTokenizer.COMMENT)) {
parseComment(start);
} else if (tokens.matchesAnyOf(DdlTokenizer.SINGLE_QUOTED_STRING, DdlTokenizer.DOUBLE_QUOTED_STRING)) {
if (sb.length() != 0) sb.append(' ');
sb.append(tokens.consume());
} else {
break;
}
}
if (tokens.canConsume("ESCAPE")) {
tokens.consume();
}
return sb.toString();
}
protected String parseCharacterSetName(Marker start) {
String name = tokens.consume();
if (tokens.canConsume('.')) {
// The name was actually a schema name ...
String id = tokens.consume();
return name + "." + id;
}
return name;
}
protected String parseDateLiteral(Marker start) {
return consumeQuotedString();
}
protected String parseTimeLiteral(Marker start) {
return consumeQuotedString();
}
protected String parseTimestampLiteral(Marker start) {
return consumeQuotedString();
}
/**
* Parse the column information in the SELECT clause. This statement stops before consuming the FROM clause.
*
* @param start the start of the statement
* @return the map of resolved Columns keyed by the column alias (or name) used in the SELECT statement; never null but
* possibly
* empty if we couldn't parse the SELECT clause correctly
*/
protected Map<String, Column> parseColumnsInSelectClause(Marker start) {
// Parse the column names ...
Map<String, String> tableAliasByColumnAlias = new LinkedHashMap<>();
Map<String, String> columnNameByAliases = new LinkedHashMap<>();
parseColumnName(start, tableAliasByColumnAlias, columnNameByAliases);
while (tokens.canConsume(',')) {
parseColumnName(start, tableAliasByColumnAlias, columnNameByAliases);
}
// Parse the FROM clause, but we'll back up to the start of this before we return ...
Marker startOfFrom = tokens.mark();
Map<String, Column> columnsByName = new LinkedHashMap<>();
Map<String, Table> fromTablesByAlias = parseSelectFromClause(start);
Table singleTable = fromTablesByAlias.size() == 1 ? fromTablesByAlias.values().stream().findFirst().get() : null;
tableAliasByColumnAlias.forEach((columnAlias, tableAlias) -> {
// Resolve the alias into the actual column name in the referenced table ...
String columnName = columnNameByAliases.getOrDefault(columnAlias, columnAlias);
Column column = null;
if (tableAlias == null) {
// The column was not qualified with a table, so there should be a single table ...
column = singleTable == null ? null : singleTable.columnWithName(columnName);
} else {
// The column was qualified with a table, so look it up ...
Table table = fromTablesByAlias.get(tableAlias);
column = table == null ? null : table.columnWithName(columnName);
}
if (column == null) {
// Check to see whether the column name contains a constant value, in which case we need to create an
// artificial column ...
column = createColumnFromConstant(columnAlias, columnName);
}
columnsByName.put(columnAlias, column); // column may be null
});
tokens.rewind(startOfFrom);
return columnsByName;
}
protected Column createColumnFromConstant(String columnName, String constantValue) {
ColumnEditor column = Column.editor().name(columnName);
try {
if (constantValue.startsWith("'") || constantValue.startsWith("\"")) {
column.typeName("CHAR");
column.jdbcType(Types.CHAR);
column.length(constantValue.length() - 2);
} else if (constantValue.equalsIgnoreCase("TRUE") || constantValue.equalsIgnoreCase("FALSE")) {
column.typeName("BOOLEAN");
column.jdbcType(Types.BOOLEAN);
} else {
setTypeInfoForConstant(constantValue, column);
}
} catch (Throwable t) {
logger.debug("Unable to create an artificial column for the constant: " + constantValue);
}
return column.create();
}
protected void setTypeInfoForConstant(String constantValue, ColumnEditor column) {
try {
Integer.parseInt(constantValue);
column.typeName("INTEGER");
column.jdbcType(Types.INTEGER);
} catch (NumberFormatException e) {
}
try {
Long.parseLong(constantValue);
column.typeName("BIGINT");
column.jdbcType(Types.BIGINT);
} catch (NumberFormatException e) {
}
try {
Float.parseFloat(constantValue);
column.typeName("FLOAT");
column.jdbcType(Types.FLOAT);
} catch (NumberFormatException e) {
}
try {
Double.parseDouble(constantValue);
column.typeName("DOUBLE");
column.jdbcType(Types.DOUBLE);
int precision = 0;
int scale = 0;
boolean foundDecimalPoint = false;
for (int i = 0; i < constantValue.length(); i++) {
char c = constantValue.charAt(i);
if (c == '+' || c == '-') {
continue;
} else if (c == '.') {
foundDecimalPoint = true;
} else if ( Character.isDigit(c) ) {
if ( foundDecimalPoint ) ++scale;
else ++precision;
} else {
break;
}
}
column.length(precision);
column.scale(scale);
} catch (NumberFormatException e) {
}
try {
BigDecimal decimal = new BigDecimal(constantValue);
column.typeName("DECIMAL");
column.jdbcType(Types.DECIMAL);
column.length(decimal.precision());
column.scale(decimal.precision());
} catch (NumberFormatException e) {
}
}
protected String determineTypeNameForConstant(long value) {
return "BIGINT";
}
protected String determineTypeNameForConstant(float value) {
return "FLOAT";
}
protected String determineTypeNameForConstant(double value) {
return "DECIMAL";
}
protected String determineTypeNameForConstant(BigDecimal value) {
return "BIGINT";
}
/**
* Parse the potentially qualified and aliased column information, and add the information to the supplied maps.
*
* @param start the start of the statement
* @param tableAliasByColumnAliases the map to which is added the column's alias (or name) keyed by the alias of the table
* in which the column should appear; may not be null
* @param columnNameByAliases the map to which is added the column's name keyed by the its alias (or itself if there is no
* alias); may not be null
*/
protected void parseColumnName(Marker start, Map<String, String> tableAliasByColumnAliases, Map<String, String> columnNameByAliases) {
try {
String tableName = tokens.consume();
String columnName = null;
if (tokens.canConsume('.')) {
columnName = tokens.consume();
} else {
// Just an unqualified column name ...
columnName = tableName;
tableName = null;
}
String alias = columnName;
if (tokens.canConsume("AS")) {
alias = tokens.consume();
}
columnNameByAliases.put(alias, columnName);
tableAliasByColumnAliases.put(alias, tableName);
} catch (ParsingException e) {
// do nothing, and don't rewind ...
}
}
/**
* Returns the tables keyed by their aliases that appear in a SELECT clause's "FROM" list. This method handles the
* {@link #canConsumeJoin(Marker) various standard joins}.
*
* @param start the start of the statement
* @return the map of resolved tables keyed by the alias (or table name) used in the SELECT statement; never null but possibly
* empty if we couldn't parse the from clause correctly
*/
protected Map<String, Table> parseSelectFromClause(Marker start) {
Map<String, Table> tablesByAlias = new HashMap<>();
if (tokens.canConsume("FROM")) {
try {
parseAliasedTableInFrom(start, tablesByAlias);
while (tokens.canConsume(',') || canConsumeJoin(start)) {
parseAliasedTableInFrom(start, tablesByAlias);
canConsumeJoinCondition(start);
}
} catch (ParsingException e) {
// do nothing ...
}
}
return tablesByAlias;
}
protected boolean canConsumeJoin(Marker start) {
return tokens.canConsume("JOIN") ||
tokens.canConsume("INNER", "JOIN") ||
tokens.canConsume("OUTER", "JOIN") ||
tokens.canConsume("CROSS", "JOIN") ||
tokens.canConsume("RIGHT", "OUTER", "JOIN") ||
tokens.canConsume("LEFT", "OUTER", "JOIN") ||
tokens.canConsume("FULL", "OUTER", "JOIN");
}
protected boolean canConsumeJoinCondition(Marker start) {
if (tokens.canConsume("ON")) {
try {
parseSchemaQualifiedName(start);
while (tokens.canConsume(DdlTokenizer.SYMBOL)) {
}
parseSchemaQualifiedName(start);
return true;
} catch (ParsingException e) {
// do nothing
}
}
return false;
}
/**
* Parse a potentially qualified table name along with an optional alias.
*
* @param start the start of the statement
* @param tablesByAlias the map to which this method should add the table keyed by its alias (or name if there is no alias);
* may not be null
*/
private void parseAliasedTableInFrom(Marker start, Map<String, Table> tablesByAlias) {
Table fromTable = databaseTables.forTable(parseQualifiedTableName(start));
// Aliases in JOIN clauses don't have to be preceded by AS, but can simply be the alias followed by the 'ON' clause
if (tokens.matches("AS", TokenStream.ANY_VALUE, "ON") || tokens.matches(TokenStream.ANY_VALUE, "ON")) {
tokens.canConsume("AS");
String alias = tokens.consume();
if (fromTable != null) {
tablesByAlias.put(alias, fromTable);
return;
}
}
if (fromTable != null) tablesByAlias.put(fromTable.id().table(), fromTable);
}
}

View File

@ -5,7 +5,6 @@
*/
package io.debezium.relational.ddl;
import java.math.BigDecimal;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
@ -22,6 +21,9 @@
/**
* A parser for DDL statements.
* <p>
* See the <a href="http://savage.net.au/SQL/sql-2003-2.bnf.html">BNF Grammar for ISO/IEC 9075-2:2003</a> for the grammar
* supported by this parser.
*
* @author Randall Hauch
*/
@ -29,12 +31,20 @@
public class DdlParserSql2003 extends DdlParser {
/**
* Create a new DDL parser for SQL-2003.
* Create a new DDL parser for SQL-2003 that does not include view definitions.
*/
public DdlParserSql2003() {
super(";");
}
/**
* Create a new DDL parser for SQL-2003.
* @param includeViews {@code true} if view definitions should be included, or {@code false} if they should be skipped
*/
public DdlParserSql2003( boolean includeViews ) {
super(";",includeViews);
}
@Override
protected void initializeDataTypes(DataTypeParser dataTypes) {
dataTypes.register(Types.CHAR, "CHARACTER[(L)]");
@ -426,109 +436,12 @@ protected String parseDomainName(Marker start) {
return parseSchemaQualifiedName(start);
}
@Override
protected Object parseLiteral(Marker start) {
if (tokens.canConsume('_')) { // introducer
// This is a character literal beginning with a character set ...
parseCharacterSetName(start);
return parseCharacterLiteral(start);
}
if (tokens.canConsume('N')) {
return parseCharacterLiteral(start);
}
if (tokens.canConsume("U", "&")) {
return parseCharacterLiteral(start);
}
if (tokens.canConsume('X')) {
return parseCharacterLiteral(start);
}
if (tokens.canConsume("DATE")) {
return parseDateLiteral(start);
}
if (tokens.canConsume("TIME")) {
return parseDateLiteral(start);
}
if (tokens.canConsume("TIMESTAMP")) {
return parseDateLiteral(start);
}
if (tokens.canConsume("INTERVAL")) {
return parseIntervalLiteral(start);
}
if (tokens.canConsume("TRUE")) {
return Boolean.TRUE;
}
if (tokens.canConsume("FALSE")) {
return Boolean.FALSE;
}
if (tokens.canConsume("UNKNOWN")) {
return Boolean.FALSE;
}
// Otherwise, it's just a numeric literal ...
return parseNumericLiteral(start, true);
}
protected Object parseNumericLiteral(Marker start, boolean signed) {
StringBuilder sb = new StringBuilder();
boolean decimal = false;
if (signed && tokens.matches("+", "-")) {
sb.append(tokens.consumeAnyOf("+", "-"));
}
if (!tokens.canConsume('.')) {
sb.append(tokens.consumeInteger());
}
if (tokens.canConsume('.')) {
sb.append(tokens.consumeInteger());
decimal = true;
}
if (!tokens.canConsume('E')) {
if (decimal) return Double.parseDouble(sb.toString());
return Integer.parseInt(sb.toString());
}
sb.append('E');
if (tokens.matches("+", "-")) {
sb.append(tokens.consumeAnyOf("+", "-"));
}
sb.append(tokens.consumeInteger());
return new BigDecimal(sb.toString());
}
protected String parseCharacterLiteral(Marker start) {
StringBuilder sb = new StringBuilder();
while (true) {
if (tokens.matches(DdlTokenizer.COMMENT)) {
parseComment(start);
} else if (tokens.matches(DdlTokenizer.SINGLE_QUOTED_STRING)) {
if (sb.length() != 0) sb.append(' ');
sb.append(tokens.consume());
} else {
break;
}
}
if (tokens.canConsume("ESCAPE")) {
tokens.consume();
}
return sb.toString();
}
protected String parseCharacterSetName(Marker start) {
String name = tokens.consume();
if (tokens.canConsume('.')) {
// The name was actually a schema name ...
String id = tokens.consume();
return name + "." + id;
}
return name;
}
protected String parseDateLiteral(Marker start) {
return consumeQuotedString();
}
protected String parseTimeLiteral(Marker start) {
return consumeQuotedString();
}
protected String parseTimestampLiteral(Marker start) {
return consumeQuotedString();
return super.parseLiteral(start);
}
protected String parseIntervalLiteral(Marker start) {
@ -588,11 +501,18 @@ protected void parseReferencesScopeCheck(Marker start, String columnName, TokenS
}
protected void parseCreateView(Marker start) {
if ( skipViews ) {
// We don't care about the rest ...
consumeRemainingStatement(start);
debugSkipped(start);
return;
}
tokens.canConsume("RECURSIVE");
tokens.consume("VIEW");
TableId tableId = parseQualifiedTableName(start);
TableEditor table = databaseTables.editOrCreateTable(tableId);
List<String> columnNames = null;
if (tokens.canConsume("OF")) {
// Read the qualified name ...
parseSchemaQualifiedName(start);
@ -601,15 +521,22 @@ protected void parseCreateView(Marker start) {
parseSchemaQualifiedName(start);
}
if (tokens.matches('(')) {
parseColumnNameList(start);
columnNames = parseColumnNameList(start);
}
} else if (tokens.matches('(')) {
parseColumnNameList(start);
columnNames = parseColumnNameList(start);
}
tokens.canConsume("AS");
// We don't care about the rest ...
consumeRemainingStatement(start);
if ( columnNames != null ) {
// We know nothing other than the names ...
columnNames.forEach(name->{
table.addColumn(Column.editor().name(name).create());
});
}
// Update the table definition ...
databaseTables.overwriteTable(table.create());
}

View File

@ -356,7 +356,7 @@ protected Marker(Position position, int index) {
}
/**
* Get the position of this marker, or null if this is at the start of the token stream.
* Get the position of this marker, or null if this is at the start or end of the token stream.
*
* @return the position.
*/
@ -477,6 +477,9 @@ public void rewind() {
* @throws NoSuchElementException if there are no more tokens
*/
public Marker mark() {
if ( completed ) {
return new Marker(null, tokenIterator.previousIndex());
}
Token currentToken = currentToken();
Position currentPosition = currentToken != null ? currentToken.position() : null;
return new Marker(currentPosition, tokenIterator.previousIndex());
@ -974,10 +977,12 @@ public TokenStream consumeUntil(String expected, String skipMatchingTokens) thro
}
Marker start = mark();
int remaining = 0;
while (hasNext() && !matches(expected)) {
while (hasNext()) {
if ( skipMatchingTokens != null && matches(skipMatchingTokens)) ++remaining;
if ( matches(expected) ) {
if ( remaining == 0 ) break;
if ( remaining == 0 ) {
break;
}
--remaining;
}
consume();
@ -1491,7 +1496,7 @@ public boolean matches(int[] typesForNextTokens) throws IllegalStateException {
*
* @param firstOption the first option for the value of the current token
* @param additionalOptions the additional options for the value of the current token
* @return true if the current token's value did match one of the suplied options, or false otherwise
* @return true if the current token's value did match one of the supplied options, or false otherwise
* @throws IllegalStateException if this method was called before the stream was {@link #start() started}
*/
public boolean matchesAnyOf(String firstOption,
@ -1509,7 +1514,7 @@ public boolean matchesAnyOf(String firstOption,
* Determine if the next token matches one of the supplied values.
*
* @param options the options for the value of the current token
* @return true if the current token's value did match one of the suplied options, or false otherwise
* @return true if the current token's value did match one of the supplied options, or false otherwise
* @throws IllegalStateException if this method was called before the stream was {@link #start() started}
*/
public boolean matchesAnyOf(String[] options) throws IllegalStateException {
@ -1525,7 +1530,7 @@ public boolean matchesAnyOf(String[] options) throws IllegalStateException {
* Determine if the next token matches one of the supplied values.
*
* @param options the options for the value of the current token
* @return true if the current token's value did match one of the suplied options, or false otherwise
* @return true if the current token's value did match one of the supplied options, or false otherwise
* @throws IllegalStateException if this method was called before the stream was {@link #start() started}
*/
public boolean matchesAnyOf(Iterable<String> options) throws IllegalStateException {

View File

@ -12,9 +12,6 @@
import static org.fest.assertions.Assertions.assertThat;
import io.debezium.relational.Column;
import io.debezium.relational.ColumnEditor;
public class ColumnEditorTest {
private ColumnEditor editor;
@ -59,10 +56,10 @@ public void shouldCreateColumnWithAllFieldsSetToDefaults() {
assertThat(column.name()).isNull();
assertThat(column.typeName()).isNull();
assertThat(column.jdbcType()).isEqualTo(Types.INTEGER);
assertThat(column.length()).isEqualTo(0);
assertThat(column.length()).isEqualTo(-1);
assertThat(column.scale()).isEqualTo(-1);
assertThat(column.position()).isEqualTo(1);
assertThat(column.isOptional()).isFalse();
assertThat(column.isOptional()).isTrue();
assertThat(column.isAutoIncremented()).isFalse();
assertThat(column.isGenerated()).isFalse();
}

View File

@ -13,12 +13,6 @@
import static org.fest.assertions.Assertions.assertThat;
import io.debezium.relational.Column;
import io.debezium.relational.ColumnEditor;
import io.debezium.relational.Table;
import io.debezium.relational.TableEditor;
import io.debezium.relational.TableId;
public class TableEditorTest {
private final TableId id = new TableId("catalog", "schema", "table");

View File

@ -20,7 +20,7 @@
public class TableSchemaBuilderTest {
private final TableId id = new TableId("catalog", "schema", "table");
private final Object[] data = new Object[] { "c1value", 3.14d, java.sql.Date.valueOf("2001-10-31"), 4 };
private final Object[] data = new Object[] { "c1value", 3.142d, java.sql.Date.valueOf("2001-10-31"), 4 };
private Table table;
private Column c1;
private Column c2;
@ -35,6 +35,7 @@ public void beforeEach() {
.tableId(id)
.addColumns(Column.editor().name("C1")
.typeName("VARCHAR").jdbcType(Types.VARCHAR).length(10)
.optional(false)
.generated(true)
.create(),
Column.editor().name("C2")
@ -86,14 +87,13 @@ public void shouldBuildTableSchemaFromTableWithoutPrimaryKey() {
assertThat(schema.keyFromColumnData(data)).isNull();
// Check the values ...
Schema values = schema.valueSchema();
System.out.println("Value schema: " + values);
assertThat(values).isNotNull();
assertThat(values.field("C1").name()).isEqualTo("C1");
assertThat(values.field("C1").index()).isEqualTo(0);
assertThat(values.field("C1").schema()).isEqualTo(Schema.STRING_SCHEMA);
assertThat(values.field("C1").schema()).isEqualTo(SchemaBuilder.string().build());
assertThat(values.field("C2").name()).isEqualTo("C2");
assertThat(values.field("C2").index()).isEqualTo(1);
assertThat(values.field("C2").schema()).isEqualTo(Decimal.schema(3)); // scale of 3
assertThat(values.field("C2").schema()).isEqualTo(Decimal.builder(3).optional().build()); // scale of 3
assertThat(values.field("C3").name()).isEqualTo("C3");
assertThat(values.field("C3").index()).isEqualTo(2);
assertThat(values.field("C3").schema()).isEqualTo(Date.builder().optional().build()); // optional date

View File

@ -28,9 +28,11 @@ public void beforeEach() {
.addColumns(Column.editor().name("C1")
.typeName("VARCHAR").jdbcType(Types.VARCHAR).length(10)
.generated(true)
.optional(false)
.create(),
Column.editor().name("C2")
.typeName("NUMBER").jdbcType(Types.NUMERIC).length(5)
.optional(false)
.create(),
Column.editor().name("C3")
.typeName("DATE").jdbcType(Types.DATE).length(4)

View File

@ -8,4 +8,4 @@ log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %m (%c)%n
log4j.rootLogger=INFO, stdout
# Set up the default logging to be INFO level, then override specific units
log4j.logger.io.debezium=DEBUG
log4j.logger.io.debezium=INFO

View File

@ -29,6 +29,10 @@
<groupId>com.github.shyiko</groupId>
<artifactId>mysql-binlog-connector-java</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>connect-api</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>

View File

@ -0,0 +1,39 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql;
import io.debezium.config.Configuration;
import io.debezium.config.Configuration.Field;
/**
* The configuration properties.
*/
public class MySqlConfiguration {
public static final Field USER = Configuration.field("database.user",
"Name of the database user to be used when connecting to the database");
public static final Field PASSWORD = Configuration.field("database.password",
"Password to be used when connecting to the database");
public static final Field HOSTNAME = Configuration.field("database.hostname", "IP address of the database");
public static final Field PORT = Configuration.field("database.port", "Port of the database", 5432);
public static final Field SERVER_ID = Configuration.field("connect.id",
"ID of this database client, which must be unique across all database processes in the cluster.");
public static final Field CONNECTION_TIMEOUT_MS = Configuration.field("connect.timeout.ms",
"Maximum time in milliseconds to wait after trying to connect to the database before timing out.",
30 * 1000);
public static final Field KEEP_ALIVE = Configuration.field("connect.keep.alive",
"Whether a separate thread should be used to ensure the connection is kept alive.",
true);
public static final Field MAX_QUEUE_SIZE = Configuration.field("max.queue.size",
"Maximum size of the queue for change events read from the database log but not yet recorded or forwarded. Should be larger than the maximum batch size.",
2048);
public static final Field MAX_BATCH_SIZE = Configuration.field("max.batch.size", "Maximum size of each batch of source records.",
1024);
public static final Field POLL_INTERVAL_MS = Configuration.field("poll.interval.ms",
"Frequency in milliseconds to poll for new change events", 1 * 1000);
public static final Field LOGICAL_ID = Configuration.field("database.logical.id",
"Logical unique identifier for this database. Defaults to host:port");
}

View File

@ -0,0 +1,998 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql;
import java.sql.Types;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import io.debezium.annotation.NotThreadSafe;
import io.debezium.relational.Column;
import io.debezium.relational.ColumnEditor;
import io.debezium.relational.Table;
import io.debezium.relational.TableEditor;
import io.debezium.relational.TableId;
import io.debezium.relational.ddl.DataType;
import io.debezium.relational.ddl.DataTypeParser;
import io.debezium.relational.ddl.DdlParser;
import io.debezium.relational.ddl.DdlTokenizer;
import io.debezium.text.ParsingException;
import io.debezium.text.TokenStream;
import io.debezium.text.TokenStream.Marker;
/**
* A parser for DDL statements.
* <p>
* See the <a href="http://dev.mysql.com/doc/refman/5.7/en/sql-syntax-data-definition.html">MySQL SQL Syntax documentation</a> for
* the grammar supported by this parser.
*
* @author Randall Hauch
*/
@NotThreadSafe
public class MySqlDdlParser extends DdlParser {
/**
* Create a new DDL parser for MySQL that does not include view definitions.
*/
public MySqlDdlParser() {
super(";");
}
/**
* Create a new DDL parser for MySQL.
*
* @param includeViews {@code true} if view definitions should be included, or {@code false} if they should be skipped
*/
public MySqlDdlParser(boolean includeViews) {
super(";", includeViews);
}
@Override
protected void initializeDataTypes(DataTypeParser dataTypes) {
dataTypes.register(Types.BIT, "BIT[(L)]");
dataTypes.register(Types.INTEGER, "TINYINT[(L)] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.INTEGER, "SMALLINT[(L)] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.INTEGER, "MEDIUMINT[(L)] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.INTEGER, "INT[(L)] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.INTEGER, "INTEGER[(L)] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.BIGINT, "BIGINT[(L)] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.REAL, "REAL[(M[,D])] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.DOUBLE, "DOUBLE[(M[,D])] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.FLOAT, "FLOAT[(M[,D])] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.DECIMAL, "DECIMAL[(M[,D])] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.NUMERIC, "NUMERIC[(M[,D])] [UNSIGNED] [ZEROFILL]");
dataTypes.register(Types.DATE, "DATE");
dataTypes.register(Types.TIME, "TIME[(L)]");
dataTypes.register(Types.TIMESTAMP, "TIMESTAMP[(L)]");
dataTypes.register(Types.TIMESTAMP, "DATETIME[(L)]");
dataTypes.register(Types.DATE, "YEAR[(2|4)]");
dataTypes.register(Types.BLOB, "CHAR[(L)] BINARY [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.BLOB, "VARCHAR(L) BINARY [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.VARCHAR, "CHAR[(L)] [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.VARCHAR, "VARCHAR(L) [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.CHAR, "BINARY[(L)]");
dataTypes.register(Types.VARBINARY, "VARBINARY(L)");
dataTypes.register(Types.BLOB, "TINYBLOB");
dataTypes.register(Types.BLOB, "BLOB");
dataTypes.register(Types.BLOB, "MEDIUMBLOB");
dataTypes.register(Types.BLOB, "LONGBLOB");
dataTypes.register(Types.BLOB, "TINYTEXT BINARY [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.BLOB, "TEXT BINARY [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.BLOB, "MEDIUMTEXT BINARY [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.BLOB, "LONGTEXT BINARY [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.VARCHAR, "TINYTEXT [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.VARCHAR, "TEXT [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.VARCHAR, "MEDIUMTEXT [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.VARCHAR, "LONGTEXT [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.CHAR, "ENUM(...) [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.CHAR, "SET(...) [CHARACTER SET charset_name] [COLLATE collation_name]");
dataTypes.register(Types.OTHER, "JSON");
}
@Override
protected void initializeKeywords(TokenSet keywords) {
}
@Override
protected void initializeStatementStarts(TokenSet statementStartTokens) {
statementStartTokens.add("CREATE", "ALTER", "DROP", "INSERT", "SET", "GRANT", "REVOKE");
}
@Override
protected void parseNextStatement(Marker marker) {
if (tokens.matches(DdlTokenizer.COMMENT)) {
parseComment(marker);
} else if (tokens.matches("CREATE")) {
parseCreate(marker);
} else if (tokens.matches("ALTER")) {
parseAlter(marker);
} else if (tokens.matches("DROP")) {
parseDrop(marker);
} else if (tokens.matches("RENAME")) {
parseRename(marker);
} else {
parseUnknownStatement(marker);
}
}
@SuppressWarnings("unchecked")
@Override
protected void parseCreate(Marker marker) {
tokens.consume("CREATE");
if (tokens.matches("TABLE") || tokens.matches("TEMPORARY", "TABLE")) {
parseCreateTable(marker);
} else if (tokens.matches("VIEW")) {
parseCreateView(marker);
} else if (tokens.matchesAnyOf("DATABASE", "SCHEMA")) {
parseCreateUnknown(marker);
} else if (tokens.matchesAnyOf("EVENT")) {
parseCreateUnknown(marker);
} else if (tokens.matchesAnyOf("FUNCTION", "PROCEDURE")) {
parseCreateUnknown(marker);
} else if (tokens.matchesAnyOf("UNIQUE", "FULLTEXT", "SPATIAL", "INDEX")) {
parseCreateIndex(marker);
} else if (tokens.matchesAnyOf("SERVER")) {
parseCreateUnknown(marker);
} else if (tokens.matchesAnyOf("TABLESPACE")) {
parseCreateUnknown(marker);
} else if (tokens.matchesAnyOf("TRIGGER")) {
parseCreateUnknown(marker);
} else {
// It could be several possible things (including more elaborate forms of those matches tried above),
sequentially(this::parseCreateView,
this::parseCreateUnknown);
}
}
protected void parseCreateTable(Marker start) {
tokens.canConsume("TEMPORARY");
tokens.consume("TABLE");
boolean onlyIfNotExists = tokens.canConsume("IF", "NOT", "EXISTS");
TableId tableId = parseQualifiedTableName(start);
if (onlyIfNotExists && databaseTables.forTable(tableId) != null) {
// The table does exist, so we should do nothing ...
consumeRemainingStatement(start);
debugParsed(start);
return;
}
TableEditor table = databaseTables.editOrCreateTable(tableId);
// create_definition ...
if (tokens.matches('(')) parseCreateDefinitionList(start, table);
// table_options ...
parseTableOptions(start, table);
// partition_options ...
if (tokens.matches("PARTITION")) {
parsePartitionOptions(start, table);
}
// select_statement
if (tokens.canConsume("AS") || tokens.canConsume("IGNORE", "AS") || tokens.canConsume("REPLACE", "AS")) {
parseAsSelectStatement(start, table);
}
// Update the table definition ...
databaseTables.overwriteTable(table.create());
debugParsed(start);
}
protected void parseTableOptions(Marker start, TableEditor table) {
while (parseTableOption(start, table)) {
}
}
protected boolean parseTableOption(Marker start, TableEditor table) {
if (tokens.canConsume("AUTO_INCREMENT")) {
// Sets the auto-incremented value for the next incremented value ...
tokens.canConsume('=');
tokens.consume();
return true;
} else if (tokens.canConsumeAnyOf("CHECKSUM", "ENGINE", "AVG_ROW_LENGTH", "MAX_ROWS", "MIN_ROWS", "ROW_FORMAT",
"DELAY_KEY_WRITE", "INSERT_METHOD", "KEY_BLOCK_SIZE", "PACK_KEYS",
"STATS_AUTO_RECALC", "STATS_PERSISTENT", "STATS_SAMPLE_PAGES")) {
// One option token followed by '=' by a single value
tokens.canConsume('=');
tokens.consume();
return true;
} else if (tokens.canConsume("DEFAULT", "CHARACTER", "SET") || tokens.canConsume("CHARACTER", "SET")) {
tokens.canConsume('=');
tokens.consume();
return true;
} else if (tokens.canConsume("DEFAULT", "COLLATE") || tokens.canConsume("COLLATE")) {
tokens.canConsume('=');
tokens.consume();
return true;
} else if (tokens.canConsumeAnyOf("COMMENT", "COMPRESSION", "CONNECTION", "ENCRYPTION", "PASSWORD")) {
tokens.canConsume('=');
consumeQuotedString();
return true;
} else if (tokens.canConsume("DATA", "DIRECTORY") || tokens.canConsume("INDEX", "DIRECTORY")) {
tokens.canConsume('=');
consumeQuotedString();
return true;
} else if (tokens.canConsume("TABLESPACE")) {
tokens.consume();
return true;
} else if (tokens.canConsumeAnyOf("STORAGE", "ENGINE")) {
tokens.consume(); // storage engine name
return true;
} else if (tokens.canConsume("UNION")) {
tokens.canConsume('=');
tokens.consume();
while (tokens.canConsume(',')) {
tokens.consume();
}
return true;
}
return false;
}
protected void parsePartitionOptions(Marker start, TableEditor table) {
tokens.consume("PARTITION", "BY");
if (tokens.canConsume("LINEAR", "HASH") || tokens.canConsume("HASH")) {
consumeExpression(start);
} else if (tokens.canConsume("LINEAR", "KEY") || tokens.canConsume("KEY")) {
if (tokens.canConsume("ALGORITHM")) {
tokens.consume("=");
tokens.consumeAnyOf("1", "2");
}
parseColumnNameList(start);
} else if (tokens.canConsumeAnyOf("RANGE", "LIST")) {
if (tokens.canConsume("COLUMNS")) {
parseColumnNameList(start);
} else {
consumeExpression(start);
}
}
if (tokens.canConsume("PARTITIONS")) {
tokens.consume();
}
if (tokens.canConsume("SUBPARTITION", "BY")) {
if (tokens.canConsume("LINEAR", "HASH") || tokens.canConsume("HASH")) {
consumeExpression(start);
} else if (tokens.canConsume("LINEAR", "KEY") || tokens.canConsume("KEY")) {
if (tokens.canConsume("ALGORITHM")) {
tokens.consume("=");
tokens.consumeAnyOf("1", "2");
}
parseColumnNameList(start);
}
if (tokens.canConsume("SUBPARTITIONS")) {
tokens.consume();
}
}
if (tokens.canConsume('(')) {
do {
parsePartitionDefinition(start, table);
} while (tokens.canConsume(','));
tokens.consume(')');
}
}
protected void parsePartitionDefinition(Marker start, TableEditor table) {
tokens.consume("PARTITION");
tokens.consume(); // name
if (tokens.canConsume("VALUES")) {
if (tokens.canConsume("LESS", "THAN")) {
if (!tokens.canConsume("MAXVALUE")) {
consumeExpression(start);
}
} else {
tokens.consume("IN");
consumeValueList(start);
}
} else if (tokens.canConsume("STORAGE", "ENGINE") || tokens.canConsume("ENGINE")) {
tokens.canConsume('=');
tokens.consume();
} else if (tokens.canConsumeAnyOf("COMMENT")) {
tokens.canConsume('=');
consumeQuotedString();
} else if (tokens.canConsumeAnyOf("DATA", "INDEX") && tokens.canConsume("DIRECTORY")) {
tokens.canConsume('=');
consumeQuotedString();
} else if (tokens.canConsumeAnyOf("MAX_ROWS", "MIN_ROWS", "TABLESPACE")) {
tokens.canConsume('=');
tokens.consume();
} else if (tokens.canConsume('(')) {
do {
parseSubpartitionDefinition(start, table);
} while (tokens.canConsume(','));
tokens.consume(')');
}
}
protected void parseSubpartitionDefinition(Marker start, TableEditor table) {
tokens.consume("SUBPARTITION");
tokens.consume(); // name
if (tokens.canConsume("STORAGE", "ENGINE") || tokens.canConsume("ENGINE")) {
tokens.canConsume('=');
tokens.consume();
} else if (tokens.canConsumeAnyOf("COMMENT")) {
tokens.canConsume('=');
consumeQuotedString();
} else if (tokens.canConsumeAnyOf("DATA", "INDEX") && tokens.canConsume("DIRECTORY")) {
tokens.canConsume('=');
consumeQuotedString();
} else if (tokens.canConsumeAnyOf("MAX_ROWS", "MIN_ROWS", "TABLESPACE")) {
tokens.canConsume('=');
tokens.consume();
}
}
protected void parseAsSelectStatement(Marker start, TableEditor table) {
tokens.consume("SELECT");
consumeRemainingStatement(start);
}
protected void parseCreateDefinitionList(Marker start, TableEditor table) {
tokens.consume('(');
parseCreateDefinition(start, table);
while (tokens.canConsume(',')) {
parseCreateDefinition(start, table);
}
tokens.consume(')');
}
protected void parseCreateDefinition(Marker start, TableEditor table) {
// Try to parse the constraints first ...
if (tokens.canConsume("CHECK")) {
consumeExpression(start);
} else if (tokens.canConsume("CONSTRAINT", TokenStream.ANY_VALUE, "PRIMARY", "KEY") || tokens.canConsume("PRIMARY", "KEY")) {
if (tokens.canConsume("USING")) {
parseIndexType(start);
}
List<String> pkColumnNames = parseIndexColumnNames(start);
table.setPrimaryKeyNames(pkColumnNames);
parseIndexOptions(start);
// MySQL does not allow a primary key to have nullable columns, so let's make sure we model that correctly ...
pkColumnNames.forEach(name -> {
Column c = table.columnWithName(name);
if (c.isOptional()) {
table.addColumn(c.edit().optional(false).create());
}
});
} else if (tokens.canConsume("CONSTRAINT", TokenStream.ANY_VALUE, "UNIQUE") || tokens.canConsume("UNIQUE")) {
tokens.canConsumeAnyOf("KEY", "INDEX");
if (!tokens.matches('(')) {
if (!tokens.matches("USING")) {
tokens.consume(); // name of unique index ...
}
if (tokens.matches("USING")) {
parseIndexType(start);
}
}
List<String> uniqueKeyColumnNames = parseIndexColumnNames(start);
if (table.primaryKeyColumnNames().isEmpty()) {
table.setPrimaryKeyNames(uniqueKeyColumnNames); // this may eventually get overwritten by a real PK
}
parseIndexOptions(start);
} else if (tokens.canConsume("CONSTRAINT", TokenStream.ANY_VALUE, "FOREIGN", "KEY") || tokens.canConsume("FOREIGN", "KEY")) {
if (!tokens.matches('(')) {
tokens.consume(); // name of foreign key
}
parseIndexColumnNames(start);
if (tokens.matches("REFERENCES")) {
parseReferenceDefinition(start);
}
} else if (tokens.canConsumeAnyOf("INDEX", "KEY")) {
if (!tokens.matches('(')) {
if (!tokens.matches("USING")) {
tokens.consume(); // name of unique index ...
}
if (tokens.matches("USING")) {
parseIndexType(start);
}
}
parseIndexColumnNames(start);
parseIndexOptions(start);
} else if (tokens.canConsume("FULLTEXT", "SPATIAL")) {
tokens.canConsumeAnyOf("INDEX", "KEY");
if (!tokens.matches('(')) {
tokens.consume(); // name of unique index ...
}
parseIndexColumnNames(start);
parseIndexOptions(start);
} else {
tokens.canConsume("COLUMN"); // optional in ALTER TABLE but never CREATE TABLE
// Obtain the column editor ...
String columnName = tokens.consume();
parseCreateColumn(start, table, columnName);
}
}
protected Column parseCreateColumn(Marker start, TableEditor table, String columnName) {
// Obtain the column editor ...
Column existingColumn = table.columnWithName(columnName);
ColumnEditor column = existingColumn != null ? existingColumn.edit() : Column.editor().name(columnName);
AtomicBoolean isPrimaryKey = new AtomicBoolean(false);
parseColumnDefinition(start, columnName, tokens, table, column, isPrimaryKey);
// Update the table ...
Column newColumnDefn = column.create();
table.addColumns(newColumnDefn);
if (isPrimaryKey.get()) {
table.setPrimaryKeyNames(newColumnDefn.name());
}
return table.columnWithName(newColumnDefn.name());
}
protected void parseColumnDefinition(Marker start, String columnName, TokenStream tokens, TableEditor table, ColumnEditor column,
AtomicBoolean isPrimaryKey) {
// Parse the data type, which must be at this location ...
List<ParsingException> errors = new ArrayList<>();
Marker dataTypeStart = tokens.mark();
DataType dataType = dataTypeParser.parse(tokens, errors::addAll);
if (dataType == null) {
String dataTypeName = parseDomainName(start);
if (dataTypeName != null) dataType = DataType.userDefinedType(dataTypeName);
}
if (dataType == null) {
// No data type was found
parsingFailed(dataTypeStart.position(), errors, "Unable to read the data type");
return;
}
column.jdbcType(dataType.jdbcType());
column.typeName(dataType.name());
if (dataType.length() > -1) column.length((int) dataType.length());
if (dataType.scale() > -1) column.scale(dataType.scale());
if (tokens.canConsume("AS") || tokens.canConsume("GENERATED", "ALWAYS", "AS")) {
consumeExpression(start);
tokens.canConsumeAnyOf("VIRTUAL", "STORED");
if (tokens.canConsume("UNIQUE")) {
tokens.canConsume("KEY");
}
if (tokens.canConsume("COMMENT")) {
consumeQuotedString();
}
tokens.canConsume("NOT", "NULL");
tokens.canConsume("NULL");
tokens.canConsume("PRIMARY", "KEY");
tokens.canConsume("KEY");
} else {
while (tokens.matchesAnyOf("NOT", "NULL", "DEFAULT", "AUTO_INCREMENT", "UNIQUE", "PRIMARY", "KEY", "COMMENT",
"REFERENCES", "COLUMN_FORMAT", "ON")) {
// Nullability ...
if (tokens.canConsume("NOT", "NULL")) {
column.optional(false);
} else if (tokens.canConsume("NULL")) {
column.optional(true);
}
// Default value ...
if (tokens.matches("DEFAULT")) {
parseDefaultClause(start);
}
if (tokens.canConsume("ON")) {
if (tokens.canConsumeAnyOf("UPDATE", "DELETE")) {
tokens.consume(); // e.g., "ON UPATE CURRENT_TIMESTAMP"
}
column.autoIncremented(true);
}
// Other options ...
if (tokens.canConsume("AUTO_INCREMENT")) {
column.autoIncremented(true);
column.generated(true);
}
if (tokens.canConsume("UNIQUE", "KEY") || tokens.canConsume("UNIQUE")) {
if (table.primaryKeyColumnNames().isEmpty() && !column.isOptional()) {
// The table has no primary key (yet) but this is a non-null column and therefore will have all unique
// values (MySQL allows UNIQUE indexes with some nullable columns, but in that case allows duplicate
// rows),
// so go ahead and set it to this column as it's a unique key
isPrimaryKey.set(true);
}
}
if (tokens.canConsume("PRIMARY", "KEY") || tokens.canConsume("KEY")) {
// Always set this column as the primary key
column.optional(false); // MySQL primary key columns may not be null
isPrimaryKey.set(true);
}
if (tokens.canConsume("COMMENT")) {
consumeQuotedString();
}
if (tokens.canConsume("COLUMN_FORMAT")) {
tokens.consumeAnyOf("FIXED", "DYNAMIC", "DEFAULT");
}
if (tokens.matches("REFERENCES")) {
parseReferenceDefinition(start);
}
}
}
}
protected String parseDomainName(Marker start) {
return parseSchemaQualifiedName(start);
}
protected List<String> parseIndexColumnNames(Marker start) {
List<String> names = new ArrayList<>();
tokens.consume('(');
parseIndexColumnName(names::add);
while (tokens.canConsume(',')) {
parseIndexColumnName(names::add);
}
tokens.consume(')');
return names;
}
private void parseIndexColumnName(Consumer<String> name) {
name.accept(tokens.consume());
if (tokens.canConsume('(')) {
tokens.consume(); // length
tokens.consume(')');
}
tokens.canConsumeAnyOf("ASC", "DESC");
}
protected void parseIndexType(Marker start) {
tokens.consume("USING");
tokens.consumeAnyOf("BTREE", "HASH");
}
protected void parseIndexOptions(Marker start) {
while (true) {
if (tokens.matches("USING")) {
parseIndexType(start);
} else if (tokens.canConsume("COMMENT")) {
consumeQuotedString();
} else if (tokens.canConsume("KEY_BLOCK_SIZE")) {
tokens.consume("=");
tokens.consume();
} else if (tokens.canConsume("WITH", "PARSER")) {
tokens.consume();
} else {
break;
}
}
}
protected void parseReferenceDefinition(Marker start) {
tokens.consume("REFERENCES");
parseSchemaQualifiedName(start); // table name
parseColumnNameList(start);
if (tokens.canConsume("MATCH")) {
tokens.consumeAnyOf("FULL", "PARTIAL", "SIMPLE");
if (tokens.canConsume("ON")) {
tokens.consumeAnyOf("DELETE", "UPDATE");
parseReferenceOption(start);
}
}
}
protected void parseReferenceOption(Marker start) {
if (tokens.canConsume("RESTRICT")) {
} else if (tokens.canConsume("CASCADE")) {
} else if (tokens.canConsume("SET", "NULL")) {
} else {
tokens.canConsume("NO", "ACTION");
}
}
protected void parseCreateView(Marker start) {
if (skipViews) {
// We don't care about the rest ...
consumeRemainingStatement(start);
debugSkipped(start);
return;
}
tokens.canConsume("OR", "REPLACE");
if (tokens.canConsume("ALGORITHM")) {
tokens.consume('=');
tokens.consumeAnyOf("UNDEFINED", "MERGE", "TEMPTABLE");
}
if (tokens.canConsume("DEFINER")) {
tokens.consume('=');
tokens.consume(); // user or CURRENT_USER
}
if (tokens.canConsume("SQL", "SECURITY")) {
tokens.consumeAnyOf("DEFINER", "INVOKER");
}
tokens.consume("VIEW");
TableId tableId = parseQualifiedTableName(start);
TableEditor table = databaseTables.editOrCreateTable(tableId);
if (tokens.matches('(')) {
List<String> columnNames = parseColumnNameList(start);
// We know nothing other than the names ...
columnNames.forEach(name -> {
table.addColumn(Column.editor().name(name).create());
});
}
tokens.canConsume("AS");
// We should try to discover the types of the columns by looking at this select
if (tokens.canConsume("SELECT")) {
// If the SELECT clause is selecting qualified column names or columns names from a single table, then
// we can look up the columns and use those to set the type and nullability of the view's columns ...
Map<String, Column> selectedColumnsByAlias = parseColumnsInSelectClause(start);
if (table.columns().isEmpty()) {
selectedColumnsByAlias.forEach((columnName, fromTableColumn) -> {
if (fromTableColumn != null && columnName != null) table.addColumn(fromTableColumn.edit().name(columnName).create());
});
} else {
List<Column> changedColumns = new ArrayList<>();
table.columns().forEach(column -> {
// Find the column from the SELECT statement defining the view ...
Column selectedColumn = selectedColumnsByAlias.get(column.name());
if (selectedColumn != null) {
changedColumns.add(column.edit()
.jdbcType(selectedColumn.jdbcType())
.typeName(selectedColumn.typeName())
.length(selectedColumn.length())
.scale(selectedColumn.scale())
.autoIncremented(selectedColumn.isAutoIncremented())
.generated(selectedColumn.isGenerated())
.optional(selectedColumn.isOptional()).create());
}
});
changedColumns.forEach(table::addColumn);
}
// Parse the FROM clause to see if the view is only referencing a single table, and if so then update the view
// with an equivalent primary key ...
Map<String, Table> fromTables = parseSelectFromClause(start);
if (fromTables.size() == 1) {
Table fromTable = fromTables.values().stream().findFirst().get();
List<String> fromTablePkColumnNames = fromTable.columnNames();
List<String> viewPkColumnNames = new ArrayList<>();
selectedColumnsByAlias.forEach((viewColumnName, fromTableColumn) -> {
if (fromTablePkColumnNames.contains(fromTableColumn)) {
viewPkColumnNames.add(viewColumnName);
}
});
if (viewPkColumnNames.size() == fromTablePkColumnNames.size()) {
table.setPrimaryKeyNames(viewPkColumnNames);
}
}
}
// We don't care about the rest ...
consumeRemainingStatement(start);
// Update the table definition ...
databaseTables.overwriteTable(table.create());
debugParsed(start);
}
protected void parseCreateIndex(Marker start) {
if (tokens.canConsume("UNIQUE")) {
// This is a unique index, and we can mark the index's columns as the primary key iff there is not already
// a primary key on the table. (Should a PK be created later via an alter, then it will overwrite this.)
tokens.consume("INDEX");
tokens.consume(); // index name
if (tokens.canConsume("USING")) {
parseIndexType(start);
}
if (tokens.canConsume("ON")) {
// Usually this is required, but in some cases ON is not required
TableId tableName = parseQualifiedTableName(start);
TableEditor table = databaseTables.editTable(tableName);
if (table != null && !table.hasPrimaryKey()) {
List<String> names = parseIndexColumnNames(start);
if (table.columns().stream().allMatch(Column::isRequired)) {
databaseTables.overwriteTable(table.setPrimaryKeyNames(names).create());
}
}
}
}
// We don't care about any other statements or the rest of this statement ...
consumeRemainingStatement(start);
debugParsed(start);
}
protected void parseCreateUnknown(Marker start) {
consumeRemainingStatement(start);
}
@Override
protected void parseAlter(Marker marker) {
tokens.consume("ALTER");
if (tokens.matches("TABLE") || tokens.matches("IGNORE", "TABLE")) {
parseAlterTable(marker);
debugParsed(marker);
} else {
parseAlterUnknown(marker);
}
}
protected void parseAlterTable(Marker start) {
tokens.canConsume("IGNORE");
tokens.consume("TABLE");
TableId tableId = parseQualifiedTableName(start);
TableEditor table = databaseTables.editTable(tableId);
if (table != null) {
AtomicReference<TableId> newTableName = new AtomicReference<>(null);
if (!tokens.matches(terminator()) && !tokens.matches("PARTITION")) {
parseAlterSpecificationList(start, table, newTableName::set);
}
if (tokens.matches("PARTITION")) {
parsePartitionOptions(start, table);
}
databaseTables.overwriteTable(table.create());
if (newTableName.get() != null) {
// the table was renamed ...
databaseTables.renameTable(tableId, newTableName.get());
}
} else {
// We don't know about this table ...
consumeRemainingStatement(start);
}
}
protected void parseAlterSpecificationList(Marker start, TableEditor table, Consumer<TableId> newTableName) {
parseAlterSpecification(start, table, newTableName);
while (tokens.canConsume(',')) {
parseAlterSpecification(start, table, newTableName);
}
}
protected void parseAlterSpecification(Marker start, TableEditor table, Consumer<TableId> newTableName) {
parseTableOptions(start, table);
if (tokens.canConsume("ADD")) {
if (tokens.matches("COLUMN", "(") || tokens.matches('(')) {
tokens.canConsume("COLUMN");
parseCreateDefinitionList(start, table);
} else if (tokens.canConsume("PARTITION", "(")) {
parsePartitionDefinition(start, table);
tokens.consume(')');
} else {
parseCreateDefinition(start, table);
}
} else if (tokens.canConsume("DROP")) {
if (tokens.canConsume("PRIMARY", "KEY")) {
table.setPrimaryKeyNames();
} else if (tokens.canConsume("FOREIGN", "KEY")) {
tokens.consume(); // foreign key symbol
} else if (tokens.canConsumeAnyOf("INDEX", "KEY")) {
tokens.consume(); // index name
} else if (tokens.canConsume("PARTITION")) {
parsePartitionNames(start);
} else {
tokens.canConsume("COLUMN");
String columnName = tokens.consume();
table.removeColumn(columnName);
}
} else if (tokens.canConsume("ALTER")) {
tokens.canConsume("COLUMN");
tokens.consume(); // column name
if (!tokens.canConsume("DROP", "DEFAULT")) {
tokens.consume("SET", "DEFAULT");
parseDefaultClause(start);
}
} else if (tokens.canConsume("CHANGE")) {
tokens.canConsume("COLUMN");
String oldName = tokens.consume();
String newName = tokens.consume();
parseCreateColumn(start, table, oldName); // replaces the old definition but keeps old name
table.renameColumn(oldName, newName);
if (tokens.canConsume("FIRST")) {
table.reorderColumn(newName, null);
} else if (tokens.canConsume("AFTER")) {
table.reorderColumn(newName, tokens.consume());
}
} else if (tokens.canConsume("MODIFY")) {
tokens.canConsume("COLUMN");
String columnName = tokens.consume();
parseCreateColumn(start, table, columnName);
if (tokens.canConsume("FIRST")) {
table.reorderColumn(columnName, null);
} else if (tokens.canConsume("AFTER")) {
table.reorderColumn(columnName, tokens.consume());
}
} else if (tokens.canConsumeAnyOf("ALGORITHM", "LOCK")) {
tokens.canConsume('=');
tokens.consume();
} else if (tokens.canConsume("DISABLE", "KEYS") || tokens.canConsume("ENABLE", "KEYS")) {
} else if (tokens.canConsume("RENAME", "INDEX") || tokens.canConsume("RENAME", "KEY")) {
tokens.consume(); // old
tokens.consume("TO");
tokens.consume(); // new
} else if (tokens.canConsume("RENAME")) {
tokens.canConsumeAnyOf("AS", "TO");
TableId newTableId = parseQualifiedTableName(start);
newTableName.accept(newTableId);
} else if (tokens.canConsume("ORDER", "BY")) {
consumeCommaSeparatedValueList(start); // this should not affect the order of the columns in the table
} else if (tokens.canConsume("CONVERT", "TO", "CHARACTER", "SET")) {
tokens.consume(); // charset name
if (tokens.canConsume("COLLATE")) {
tokens.consume(); // collation name
}
} else if (tokens.canConsume("CHARACTER", "SET") || tokens.canConsume("DEFAULT", "CHARACTER", "SET")) {
tokens.canConsume('=');
tokens.consume(); // charset name
if (tokens.canConsume("COLLATE")) {
tokens.canConsume('=');
tokens.consume(); // collation name
}
} else if (tokens.canConsume("DISCARD", "TABLESPACE") || tokens.canConsume("IMPORT", "TABLESPACE")) {
// nothing
} else if (tokens.canConsume("FORCE")) {
// nothing
} else if (tokens.canConsume("WITH", "VALIDATION") || tokens.canConsume("WITHOUT", "VALIDATION")) {
// nothing
} else if (tokens.canConsume("DISCARD", "PARTITION") || tokens.canConsume("IMPORT", "PARTITION")) {
if (!tokens.canConsume("ALL")) {
tokens.consume(); // partition name
}
tokens.consume("TABLESPACE");
} else if (tokens.canConsume("COALLESCE", "PARTITION")) {
tokens.consume(); // number
} else if (tokens.canConsume("REORGANIZE", "PARTITION")) {
parsePartitionNames(start);
tokens.consume("INTO", "(");
parsePartitionDefinition(start, table);
tokens.consume(')');
} else if (tokens.canConsume("EXCHANGE", "PARTITION")) {
tokens.consume(); // partition name
tokens.consume("WITH", "TABLE");
parseSchemaQualifiedName(start); // table name
if (tokens.canConsumeAnyOf("WITH", "WITHOUT")) {
tokens.consume("VALIDATION");
}
} else if (tokens.matches(TokenStream.ANY_VALUE, "PARTITION")) {
tokens.consumeAnyOf("TRUNCATE", "CHECK", "ANALYZE", "OPTIMIZE", "REBUILD", "REPAIR");
tokens.consume("PARTITION");
if (!tokens.canConsume("ALL")) {
parsePartitionNames(start);
}
} else if (tokens.canConsume("REMOVE", "PARTITIONING")) {
// nothing
} else if (tokens.canConsume("UPGRADE", "PARTITIONING")) {
// nothing
}
}
protected void parseAlterUnknown(Marker start) {
consumeRemainingStatement(start);
debugSkipped(start);
}
@Override
protected void parseDrop(Marker marker) {
tokens.consume("DROP");
if (tokens.matches("TABLE") || tokens.matches("TEMPORARY", "TABLE")) {
parseDropTable(marker);
} else if (tokens.matches("VIEW")) {
parseDropView(marker);
} else {
parseDropUnknown(marker);
}
}
protected void parseDropTable(Marker start) {
tokens.canConsume("TEMPORARY");
tokens.consume("TABLE");
tokens.canConsume("IF", "EXISTS");
databaseTables.removeTable(parseQualifiedTableName(start));
while (tokens.canConsume(',')) {
databaseTables.removeTable(parseQualifiedTableName(start));
}
tokens.canConsumeAnyOf("RESTRICT", "CASCADE");
debugParsed(start);
}
protected void parseDropView(Marker start) {
if ( skipViews ) {
consumeRemainingStatement(start);
debugSkipped(start);
return;
}
tokens.consume("VIEW");
tokens.canConsume("IF", "EXISTS");
databaseTables.removeTable(parseQualifiedTableName(start));
while (tokens.canConsume(',')) {
databaseTables.removeTable(parseQualifiedTableName(start));
}
tokens.canConsumeAnyOf("RESTRICT", "CASCADE");
debugParsed(start);
}
protected void parseDropUnknown(Marker start) {
consumeRemainingStatement(start);
debugSkipped(start);
}
protected void parseRename(Marker start) {
tokens.consume("RENAME");
if (tokens.canConsume("TABLE")) {
parseRenameTable(start);
while (tokens.canConsume(',')) {
parseRenameTable(start);
}
} else if (tokens.canConsumeAnyOf("DATABASE", "SCHEMA")) {
// See https://dev.mysql.com/doc/refman/5.1/en/rename-database.html
consumeRemainingStatement(start);
}
}
protected void parseRenameTable(Marker start) {
TableId from = parseQualifiedTableName(start);
tokens.consume("TO");
TableId to = parseQualifiedTableName(start);
databaseTables.renameTable(from, to);
}
protected List<String> parseColumnNameList(Marker start) {
List<String> names = new ArrayList<>();
tokens.consume('(');
names.add(tokens.consume());
while (tokens.canConsume(',')) {
names.add(tokens.consume());
}
tokens.consume(')');
return names;
}
protected void parsePartitionNames(Marker start) {
consumeCommaSeparatedValueList(start);
}
protected void consumeCommaSeparatedValueList(Marker start) {
tokens.consume();
while (tokens.canConsume(',')) {
tokens.consume();
}
}
protected void consumeValueList(Marker start) {
tokens.consume('(');
consumeCommaSeparatedValueList(start);
tokens.consume(')');
}
/**
* Consume an expression surrounded by parentheses.
*
* @param start the start of the statement
*/
protected void consumeExpression(Marker start) {
tokens.consume("(");
tokens.consumeThrough(')', '(');
}
/**
* Try calling the supplied functions in sequence, stopping as soon as one of them succeeds.
*
* @param functions the functions
*/
@SuppressWarnings("unchecked")
protected void sequentially(Consumer<Marker>... functions) {
if (functions == null || functions.length == 0) return;
Collection<ParsingException> errors = new ArrayList<>();
Marker marker = tokens.mark();
for (Consumer<Marker> function : functions) {
try {
function.accept(marker);
return;
} catch (ParsingException e) {
errors.add(e);
tokens.rewind(marker);
}
}
parsingFailed(marker.position(), errors, "Unable to parse statement");
}
protected void parseDefaultClause(Marker start) {
tokens.consume("DEFAULT");
if (tokens.canConsume("CURRENT_TIMESTAMP")) {
tokens.canConsume("ON", "UPDATE", "CURRENT_TIMESTAMP");
} else if (tokens.canConsume("NULL")) {
// do nothing ...
} else {
parseLiteral(start);
// do nothing ...
}
}
}

View File

@ -0,0 +1,24 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql.ingest;
import java.util.Properties;
import io.debezium.util.IoUtil;
/**
* Information about this module.
*
* @author Randall Hauch
*/
public class Module {
private static final Properties INFO = IoUtil.loadProperties(Module.class, "io/debezium/mysql/build.properties");
public static String version() {
return INFO.getProperty("version");
}
}

View File

@ -0,0 +1,256 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql.ingest;
import java.io.IOException;
import java.util.ArrayList;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Consumer;
import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.source.SourceRecord;
import org.apache.kafka.connect.source.SourceTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.shyiko.mysql.binlog.BinaryLogClient;
import com.github.shyiko.mysql.binlog.BinaryLogClient.LifecycleListener;
import com.github.shyiko.mysql.binlog.event.Event;
import com.github.shyiko.mysql.binlog.event.EventData;
import com.github.shyiko.mysql.binlog.event.EventHeader;
import com.github.shyiko.mysql.binlog.event.EventHeaderV4;
import com.github.shyiko.mysql.binlog.event.EventType;
import com.github.shyiko.mysql.binlog.event.RotateEventData;
import com.github.shyiko.mysql.binlog.event.deserialization.EventDeserializer;
import com.github.shyiko.mysql.binlog.network.AuthenticationException;
import io.debezium.config.Configuration;
import io.debezium.mysql.MySqlConfiguration;
import io.debezium.relational.TableId;
import io.debezium.relational.Tables;
/**
* A Kafka Connect source task reads the MySQL binary log and generate the corresponding data change events.
*
* @see MySqlConnector
* @author Randall Hauch
*/
public class MySqlChangeDetector extends SourceTask {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final EnumMap<EventType, EventHandler> eventHandlers = new EnumMap<>(EventType.class);
private final Tables tables;
private final TableConverters tableConverters;
// These are all effectively constants between start(...) and stop(...)
private BinaryLogClient client;
private BlockingQueue<Event> events;
private List<Event> batchEvents;
private int maxBatchSize;
private long pollIntervalMs;
// Used in the methods that process events ...
private final SourceInfo sourceInfo = new SourceInfo();
public MySqlChangeDetector() {
this(null);
}
public MySqlChangeDetector( TopicSelector topicSelector ) {
topicSelector = topicSelector != null ? topicSelector : TopicSelector.defaultSelector();
tables = new Tables();
tableConverters = new TableConverters(topicSelector, tables, this::signalTablesChanged);
eventHandlers.put(EventType.TABLE_MAP, tableConverters::updateTableMetadata);
eventHandlers.put(EventType.QUERY, tableConverters::updateTableCommand);
eventHandlers.put(EventType.EXT_WRITE_ROWS, tableConverters::handleInsert);
eventHandlers.put(EventType.EXT_UPDATE_ROWS, tableConverters::handleUpdate);
eventHandlers.put(EventType.EXT_DELETE_ROWS, tableConverters::handleDelete);
}
@Override
public String version() {
return Module.version();
}
protected void signalTablesChanged( Set<TableId> changedTables ) {
// TODO: do something
}
@Override
public void start(Map<String, String> props) {
// Read and verify the configuration ...
final Configuration config = Configuration.from(props);
final String user = config.getString(MySqlConfiguration.USER);
final String password = config.getString(MySqlConfiguration.PASSWORD);
final String host = config.getString(MySqlConfiguration.HOSTNAME);
final int port = config.getInteger(MySqlConfiguration.PORT);
final Long serverId = config.getLong(MySqlConfiguration.SERVER_ID);
final String logicalId = config.getString(MySqlConfiguration.LOGICAL_ID.name(), "" + host + ":" + port);
final boolean keepAlive = config.getBoolean(MySqlConfiguration.KEEP_ALIVE);
final int maxQueueSize = config.getInteger(MySqlConfiguration.MAX_QUEUE_SIZE);
final long timeoutInMilliseconds = config.getLong(MySqlConfiguration.CONNECTION_TIMEOUT_MS);
maxBatchSize = config.getInteger(MySqlConfiguration.MAX_BATCH_SIZE);
pollIntervalMs = config.getLong(MySqlConfiguration.POLL_INTERVAL_MS);
// Create the queue ...
events = new LinkedBlockingDeque<>(maxQueueSize);
batchEvents = new ArrayList<>(maxBatchSize);
// Set up the log reader ...
client = new BinaryLogClient(host, port, user, password);
client.setServerId(serverId);
client.setKeepAlive(keepAlive);
if (logger.isDebugEnabled()) client.registerEventListener(this::logEvent);
client.registerEventListener(this::enqueue);
client.registerLifecycleListener(traceLifecycleListener());
// Check if we've already processed some of the log for this database ...
sourceInfo.setDatabase(logicalId);
if (context != null) {
// TODO: Figure out how to load the table definitions from previous runs. Can it be read from each of the output
// topics? Does it need to be serialized locally?
// Get the offsets for our partition ...
sourceInfo.setOffset(context.offsetStorageReader().offset(sourceInfo.partition()));
// And set the client to start from that point ...
client.setBinlogFilename(sourceInfo.binlogFilename());
client.setBinlogPosition(sourceInfo.binlogPosition());
// The event row number will be used when processing the first event ...
} else {
// initializes this position, though it will be reset when we see the first event (should be a rotate event) ...
sourceInfo.setBinlogPosition(client.getBinlogPosition());
}
// Start the log reader, which starts background threads ...
try {
client.connect(timeoutInMilliseconds);
} catch (TimeoutException e) {
double seconds = TimeUnit.MILLISECONDS.toSeconds(timeoutInMilliseconds);
throw new ConnectException("Timed out after " + seconds + " seconds while waiting to connect to the MySQL database at " + host
+ ":" + port + " with user '" + user + "'", e);
} catch (AuthenticationException e) {
throw new ConnectException("Failed to authenticate to the MySQL database at " + host + ":" + port + " with user '" + user + "'",
e);
} catch (Throwable e) {
throw new ConnectException(
"Unable to connect to the MySQL database at " + host + ":" + port + " with user '" + user + "': " + e.getMessage(), e);
}
}
@Override
public List<SourceRecord> poll() throws InterruptedException {
while (events.drainTo(batchEvents, maxBatchSize - batchEvents.size()) == 0 || batchEvents.isEmpty()) {
// No events to process, so sleep for a bit ...
sleep(pollIntervalMs);
}
// We have at least some records to process ...
List<SourceRecord> records = new ArrayList<>(batchEvents.size());
for (Event event : batchEvents) {
// Update the source offset info ...
EventHeader eventHeader = event.getHeader();
EventType eventType = eventHeader.getEventType();
if (eventType == EventType.ROTATE) {
EventData eventData = event.getData();
RotateEventData rotateEventData;
if (eventData instanceof EventDeserializer.EventDataWrapper) {
rotateEventData = (RotateEventData) ((EventDeserializer.EventDataWrapper) eventData).getInternal();
} else {
rotateEventData = (RotateEventData) eventData;
}
sourceInfo.setBinlogFilename(rotateEventData.getBinlogFilename());
sourceInfo.setBinlogPosition(rotateEventData.getBinlogPosition());
sourceInfo.setRowInEvent(0);
} else if (eventHeader instanceof EventHeaderV4) {
EventHeaderV4 trackableEventHeader = (EventHeaderV4) eventHeader;
long nextBinlogPosition = trackableEventHeader.getNextPosition();
if (nextBinlogPosition > 0) {
sourceInfo.setBinlogPosition(nextBinlogPosition);
sourceInfo.setRowInEvent(0);
}
}
// If there is a handler for this event, forward the event to it ...
EventHandler handler = eventHandlers.get(eventType);
if (handler != null) {
handler.handle(event, sourceInfo, records::add);
}
}
// We've processed them all, so clear the batch and return the records ...
batchEvents.clear();
return records;
}
@Override
public void stop() {
try {
client.disconnect();
} catch (IOException e) {
logger.error("Unexpected error when disconnecting from the MySQL binary log reader", e);
}
}
/**
* Adds the event into the queue for subsequent batch processing.
*
* @param event the event that was read from the binary log
*/
protected void enqueue(Event event) {
if (event != null) events.add(event);
}
protected void logEvent(Event event) {
logger.debug("Received event: " + event);
}
protected void sleep(long timeInMillis) {
try {
Thread.sleep(timeInMillis);
} catch (InterruptedException e) {
Thread.interrupted();
}
}
protected LifecycleListener traceLifecycleListener() {
return new LifecycleListener() {
@Override
public void onDisconnect(BinaryLogClient client) {
logger.debug("MySQL Connector disconnected");
}
@Override
public void onConnect(BinaryLogClient client) {
logger.info("MySQL Connector connected");
}
@Override
public void onCommunicationFailure(BinaryLogClient client, Exception ex) {
logger.error("MySQL Connector communication failure", ex);
}
@Override
public void onEventDeserializationFailure(BinaryLogClient client, Exception ex) {
logger.error("MySQL Connector received event deserialization failure", ex);
}
};
}
/**
* The functional interface for all event handler methods.
*/
@FunctionalInterface
protected static interface EventHandler {
void handle(Event event, SourceInfo source, Consumer<SourceRecord> recorder);
}
}

View File

@ -0,0 +1,48 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql.ingest;
import java.util.List;
import java.util.Map;
import org.apache.kafka.connect.connector.Task;
import org.apache.kafka.connect.source.SourceConnector;
/**
* A Kafka Connect source connector that creates tasks that read the MySQL binary log and generate the corresponding
* data change events.
*
* @author Randall Hauch
*/
public class MySqlConnector extends SourceConnector {
public MySqlConnector() {
}
@Override
public String version() {
return Module.version();
}
@Override
public void start(Map<String, String> props) {
}
@Override
public Class<? extends Task> taskClass() {
return null;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
return null;
}
@Override
public void stop() {
}
}

View File

@ -0,0 +1,182 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql.ingest;
import java.util.Map;
import io.debezium.annotation.NotThreadSafe;
import io.debezium.util.Collect;
/**
* Information about the source of information, which includes the position in the source binary log we have previously processed.
* <p>
* The {@link #partition() source partition} information describes the database whose log is being consumed. Typically, the
* database is identified by the host address port number of the MySQL server and the name of the database. Here's a JSON-like
* representation of an example database:
*
* <pre>
* {
* "db" : "myDatabase"
* }
* </pre>
*
* <p>
* The {@link #offset() source offset} information describes how much of the database's binary log the source the change detector
* has processed. Here's a JSON-like representation of an example:
*
* <pre>
* {
* "file" = "mysql-bin.000003",
* "pos" = 105586,
* "row" = 0
* }
* </pre>
*
* @author Randall Hauch
*/
@NotThreadSafe
final class SourceInfo {
public static final String DATABASE_PARTITION_KEY = "db";
public static final String BINLOG_FILENAME_OFFSET_KEY = "file";
public static final String BINLOG_POSITION_OFFSET_KEY = "pos";
public static final String BINLOG_EVENT_ROW_NUMBER_OFFSET_KEY = "row";
private String binlogFilename;
private long binlogPosition = 4;
private int eventRowNumber = 0;
private String databaseId;
private Map<String, ?> sourcePartition;
public SourceInfo() {
}
/**
* Set the database identifier. This is typically called once upon initialization.
*
* @param logicalId the logical identifier for the database; may not be null
*/
public void setDatabase(String logicalId) {
this.databaseId = logicalId;
sourcePartition = Collect.hashMapOf(DATABASE_PARTITION_KEY, databaseId);
}
/**
* Get the Kafka Connect detail about the source "partition", which describes the portion of the source that we are
* consuming. Since we're reading the binary log for a single database, the source partition specifies the
* {@link #setDatabase database server}.
* <p>
* The resulting map is mutable for efficiency reasons (this information rarely changes), but should not be mutated.
*
* @return the source partition information; never null
*/
public Map<String, ?> partition() {
return sourcePartition;
}
/**
* Get the Kafka Connect detail about the source "offset", which describes the position within the source where we last
* stopped reading.
*
* @return a copy of the current offset; never null
*/
public Map<String, Object> offset() {
return Collect.hashMapOf(BINLOG_FILENAME_OFFSET_KEY, binlogFilename,
BINLOG_POSITION_OFFSET_KEY, binlogPosition,
BINLOG_EVENT_ROW_NUMBER_OFFSET_KEY, eventRowNumber);
}
/**
* Set the current row number within a given event, and then get the Kafka Connect detail about the source "offset", which
* describes the position within the source where we last stopped reading.
*
* @param eventRowNumber the row number within the last event that was successfully processed
* @return a copy of the current offset; never null
*/
public Map<String, Object> offset(int eventRowNumber) {
setRowInEvent(eventRowNumber);
return offset();
}
/**
* Set the name of the MySQL binary log file.
*
* @param binlogFilename the name of the binary log file; may not be null
*/
public void setBinlogFilename(String binlogFilename) {
this.binlogFilename = binlogFilename;
}
/**
* Set the position within the MySQL binary log file.
*
* @param binlogPosition the position within the binary log file
*/
public void setBinlogPosition(long binlogPosition) {
this.binlogPosition = binlogPosition;
}
/**
* Set the index of the row within the event appearing at the {@link #binlogPosition() position} within the
* {@link #binlogFilename() binary log file}.
*
* @param rowNumber the 0-based row number
*/
public void setRowInEvent(int rowNumber) {
this.eventRowNumber = rowNumber;
}
/**
* Set the source offset, as read from Kafka Connect. This method does nothing if the supplied map is null.
*
* @param sourceOffset the previously-recorded Kafka Connect source offset
*/
public void setOffset(Map<String, Object> sourceOffset) {
if (sourceOffset != null) {
// We have previously recorded an offset ...
binlogFilename = (String) sourceOffset.get(BINLOG_FILENAME_OFFSET_KEY);
binlogPosition = (Long) sourceOffset.get(BINLOG_POSITION_OFFSET_KEY);
Integer rowNumber = (Integer) sourceOffset.get(BINLOG_EVENT_ROW_NUMBER_OFFSET_KEY);
eventRowNumber = rowNumber != null ? rowNumber.intValue() : 0;
}
}
/**
* Get the name of the MySQL binary log file that has been processed.
*
* @return the name of the binary log file; null if it has not been {@link #setBinlogFilename(String) set}
*/
public String binlogFilename() {
return binlogFilename;
}
/**
* Get the position within the MySQL binary log file that has been processed.
*
* @return the position within the binary log file; null if it has not been {@link #setBinlogPosition(long) set}
*/
public long binlogPosition() {
return binlogPosition;
}
/**
* Get the row within the event at the {@link #binlogPosition() position} within the {@link #binlogFilename() binary log file}
* .
*
* @return the 0-based row number
*/
public int eventRowNumber() {
return eventRowNumber;
}
/**
* Get the logical identifier of the database that is the source of the events.
* @return the database name; null if it has not been {@link #setDatabase(String) set}
*/
public String database() {
return databaseId;
}
}

View File

@ -0,0 +1,245 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql.ingest;
import java.io.Serializable;
import java.util.BitSet;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.function.Consumer;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.shyiko.mysql.binlog.event.DeleteRowsEventData;
import com.github.shyiko.mysql.binlog.event.Event;
import com.github.shyiko.mysql.binlog.event.QueryEventData;
import com.github.shyiko.mysql.binlog.event.TableMapEventData;
import com.github.shyiko.mysql.binlog.event.UpdateRowsEventData;
import com.github.shyiko.mysql.binlog.event.WriteRowsEventData;
import io.debezium.annotation.NotThreadSafe;
import io.debezium.mysql.MySqlDdlParser;
import io.debezium.relational.Table;
import io.debezium.relational.TableId;
import io.debezium.relational.TableSchema;
import io.debezium.relational.TableSchemaBuilder;
import io.debezium.relational.Tables;
import io.debezium.text.ParsingException;
/**
* @author Randall Hauch
*
*/
@NotThreadSafe
final class TableConverters {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final TopicSelector topicSelector;
private final MySqlDdlParser ddlParser;
private final Tables tables;
private final TableSchemaBuilder schemaBuilder = new TableSchemaBuilder();
private final Consumer<Set<TableId>> tablesChangedHandler;
private final Map<String, TableSchema> tableSchemaByTableName = new HashMap<>();
private final Map<Long, Converter> convertersByTableId = new HashMap<>();
private final Map<String, Long> tableNumbersByTableName = new HashMap<>();
public TableConverters( TopicSelector topicSelector, Tables tables, Consumer<Set<TableId>> tablesChangedHandler ) {
this.topicSelector = topicSelector;
this.tablesChangedHandler = tablesChangedHandler != null ? tablesChangedHandler : (ids)->{};
this.tables = tables != null ? tables : new Tables();
this.ddlParser = new MySqlDdlParser(false); // don't include views
}
public void updateTableCommand(Event event, SourceInfo source, Consumer<SourceRecord> recorder) {
QueryEventData command = event.getData();
String ddlStatements = command.getSql();
try {
this.ddlParser.parse(ddlStatements, tables);
} catch ( ParsingException e) {
logger.error("Error parsing DDL statement and updating tables", e);
} finally {
// Figure out what changed ...
Set<TableId> changes = tables.drainChanges();
changes.forEach(tableId->{
Table table = tables.forTable(tableId);
if ( table == null ) { // removed
tableSchemaByTableName.remove(tableId.table());
} else {
TableSchema schema = schemaBuilder.create(table, false);
tableSchemaByTableName.put(tableId.table(), schema);
}
});
tablesChangedHandler.accept(changes); // notify
}
}
/**
* Handle a change in the table metadata.
* <p>
* This method should be called whenever we consume a TABLE_MAP event, and every transaction in the log should include one
* of these for each table affected by the transaction. Each table map event includes a monotonically-increasing numeric
* identifier, and this identifier is used within subsequent events within the same transaction. This table identifier can
* change when:
* <ol>
* <li>the table structure is modified (e.g., via an {@code ALTER TABLE ...} command); or</li>
* <li>MySQL rotates to a new binary log file, even if the table structure does not change.</li>
* </ol>
*
* @param event the update event; never null
* @param source the source information; never null
* @param recorder the consumer to which all {@link SourceRecord}s should be passed; never null
*/
public void updateTableMetadata(Event event, SourceInfo source, Consumer<SourceRecord> recorder) {
TableMapEventData metadata = event.getData();
long tableNumber = metadata.getTableId();
if (!convertersByTableId.containsKey(tableNumber)) {
// We haven't seen this table ID, so we need to rebuild our converter functions ...
String databaseName = metadata.getDatabase();
String tableName = metadata.getTable();
String topicName = topicSelector.getTopic(databaseName, tableName);
// Just get the current schema, which should be up-to-date ...
TableSchema tableSchema = tableSchemaByTableName.get(tableName);
// Generate this table's insert, update, and delete converters ...
Converter converter = new Converter() {
@Override
public String topic() {
return topicName;
}
@Override
public Integer partition() {
return null;
}
@Override
public Schema keySchema() {
return tableSchema.keySchema();
}
@Override
public Schema valueSchema() {
return tableSchema.valueSchema();
}
@Override
public Object createKey(Serializable[] row, BitSet includedColumns) {
// assume all columns in the table are included ...
return tableSchema.keyFromColumnData(row);
}
@Override
public Struct inserted(Serializable[] row, BitSet includedColumns) {
// assume all columns in the table are included ...
return tableSchema.valueFromColumnData(row);
}
@Override
public Struct updated(Serializable[] after, BitSet includedColumns, Serializable[] before,
BitSet includedColumnsBeforeUpdate) {
// assume all columns in the table are included, and we'll write out only the updates ...
return tableSchema.valueFromColumnData(after);
}
@Override
public Struct deleted(Serializable[] deleted, BitSet includedColumns) {
// TODO: Should we write out the old values or null?
// assume all columns in the table are included ...
return null; // tableSchema.valueFromColumnData(row);
}
};
convertersByTableId.put(tableNumber, converter);
Long previousTableNumber = tableNumbersByTableName.put(tableName, tableNumber);
if ( previousTableNumber != null ) {
convertersByTableId.remove(previousTableNumber);
}
}
}
public void handleInsert(Event event, SourceInfo source, Consumer<SourceRecord> recorder) {
WriteRowsEventData write = event.getData();
long tableNumber = write.getTableId();
BitSet includedColumns = write.getIncludedColumns();
Converter converter = convertersByTableId.get(tableNumber);
String topic = converter.topic();
Integer partition = converter.partition();
for (int row = 0; row <= source.eventRowNumber(); ++row) {
Serializable[] values = write.getRows().get(row);
Schema keySchema = converter.keySchema();
Object key = converter.createKey(values,includedColumns);
Schema valueSchema = converter.valueSchema();
Struct value = converter.inserted(values,includedColumns);
SourceRecord record = new SourceRecord(source.partition(), source.offset(row), topic, partition,
keySchema, key, valueSchema, value);
recorder.accept(record);
}
}
/**
* Process the supplied event and generate any source records, adding them to the supplied consumer.
*
* @param event the database change data event to be processed; never null
* @param source the source information to use in the record(s); never null
* @param recorder the consumer of all source records; never null
*/
public void handleUpdate(Event event, SourceInfo source, Consumer<SourceRecord> recorder) {
UpdateRowsEventData update = event.getData();
long tableNumber = update.getTableId();
BitSet includedColumns = update.getIncludedColumns();
BitSet includedColumnsBefore = update.getIncludedColumnsBeforeUpdate();
Converter converter = convertersByTableId.get(tableNumber);
String topic = converter.topic();
Integer partition = converter.partition();
for (int row = 0; row <= source.eventRowNumber(); ++row) {
Map.Entry<Serializable[], Serializable[]> changes = update.getRows().get(row);
Serializable[] before = changes.getKey();
Serializable[] after = changes.getValue();
Schema keySchema = converter.keySchema();
Object key = converter.createKey(after,includedColumns);
Schema valueSchema = converter.valueSchema();
Struct value = converter.updated(before,includedColumnsBefore, after,includedColumns);
SourceRecord record = new SourceRecord(source.partition(), source.offset(row), topic, partition,
keySchema, key, valueSchema, value);
recorder.accept(record);
}
}
public void handleDelete(Event event, SourceInfo source, Consumer<SourceRecord> recorder) {
DeleteRowsEventData deleted = event.getData();
long tableNumber = deleted.getTableId();
BitSet includedColumns = deleted.getIncludedColumns();
Converter converter = convertersByTableId.get(tableNumber);
String topic = converter.topic();
Integer partition = converter.partition();
for (int row = 0; row <= source.eventRowNumber(); ++row) {
Serializable[] values = deleted.getRows().get(row);
Schema keySchema = converter.keySchema();
Object key = converter.createKey(values,includedColumns);
Schema valueSchema = converter.valueSchema();
Struct value = converter.inserted(values,includedColumns);
SourceRecord record = new SourceRecord(source.partition(), source.offset(row), topic, partition,
keySchema, key, valueSchema, value);
recorder.accept(record);
}
}
protected static interface Converter {
String topic();
Integer partition();
Schema keySchema();
Schema valueSchema();
Object createKey(Serializable[] row, BitSet includedColumns);
Struct inserted(Serializable[] row, BitSet includedColumns);
Struct updated(Serializable[] after, BitSet includedColumns, Serializable[] before, BitSet includedColumnsBeforeUpdate );
Struct deleted(Serializable[] deleted, BitSet includedColumns);
}
}

View File

@ -0,0 +1,43 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql.ingest;
/**
* A function that determines the name of a topic given the table name and database name.
*
* @author Randall Hauch
*/
@FunctionalInterface
public interface TopicSelector {
/**
* Get the default topic selector logic, which simply concatenates the database name and topic name using a '.' delimiter
* character.
*
* @return the topic selector; never null
*/
static TopicSelector defaultSelector() {
return defaultSelector(".");
}
/**
* Get the default topic selector logic, which simply concatenates the database name and topic name using the supplied
* delimiter.
*
* @param delimiter the string delineating the database name and table name; may not be null
* @return the topic selector; never null
*/
static TopicSelector defaultSelector(String delimiter) {
return (databaseName, tableName) -> databaseName + delimiter + tableName;
}
/**
* Get the name of the topic given the database and table names.
* @param databaseName the name of the database; may not be null
* @param tableName the name of the table; may not be null
* @return the topic name; never null
*/
String getTopic(String databaseName, String tableName);
}

View File

@ -0,0 +1 @@
version=${project.version}

View File

@ -1,29 +0,0 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.ingest.mysql;
import java.sql.SQLException;
import org.junit.Test;
import io.debezium.jdbc.TestDatabase;
public class ConnectionIT {
@Test
public void shouldConnectToDefaulDatabase() throws SQLException {
try (MySQLConnection conn = new MySQLConnection( TestDatabase.testConfig("mysql") );) {
conn.connect();
}
}
@Test
public void shouldConnectToEmptyDatabase() throws SQLException {
try (MySQLConnection conn = new MySQLConnection( TestDatabase.testConfig("emptydb") );) {
conn.connect();
}
}
}

View File

@ -0,0 +1,53 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.junit.Ignore;
import org.junit.Test;
import io.debezium.jdbc.TestDatabase;
public class ConnectionIT {
@Ignore
@Test
public void shouldConnectToDefaulDatabase() throws SQLException {
try (MySQLConnection conn = new MySQLConnection(TestDatabase.testConfig("mysql"));) {
conn.connect();
}
}
@Test
public void shouldDoStuffWithDatabase() throws SQLException {
try (MySQLConnection conn = new MySQLConnection(TestDatabase.testConfig("readbinlog_test"));) {
conn.connect();
// Set up the table as one transaction and wait to see the events ...
conn.execute("DROP TABLE IF EXISTS person",
"CREATE TABLE person ("
+ " name VARCHAR(255) primary key,"
+ " birthdate DATE NULL,"
+ " age INTEGER NULL DEFAULT 10,"
+ " salary DECIMAL(5,2),"
+ " bitStr BIT(18)"
+ ")");
conn.execute("SELECT * FROM person");
try (ResultSet rs = conn.connection().getMetaData().getColumns("readbinlog_test", null, null, null)) {
conn.print(rs);
}
}
}
@Ignore
@Test
public void shouldConnectToEmptyDatabase() throws SQLException {
try (MySQLConnection conn = new MySQLConnection(TestDatabase.testConfig("emptydb"));) {
conn.connect();
}
}
}

View File

@ -3,7 +3,7 @@
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.ingest.mysql;
package io.debezium.mysql;
import io.debezium.config.Configuration;
import io.debezium.jdbc.JdbcConnection;

View File

@ -0,0 +1,151 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.InputStream;
import java.sql.Types;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Before;
import org.junit.Test;
import static org.fest.assertions.Assertions.assertThat;
import io.debezium.relational.Column;
import io.debezium.relational.Table;
import io.debezium.relational.TableId;
import io.debezium.relational.Tables;
import io.debezium.relational.ddl.DdlParser;
import io.debezium.util.IoUtil;
public class MySqlDdlParserTest {
private DdlParser parser;
private Tables tables;
@Before
public void beforeEach() {
parser = new MySqlDdlParser();
tables = new Tables();
}
@Test
public void shouldParseMultipleStatements() {
String ddl = "CREATE TABLE foo ( " + System.lineSeparator()
+ " c1 INTEGER NOT NULL, " + System.lineSeparator()
+ " c2 VARCHAR(22) " + System.lineSeparator()
+ "); " + System.lineSeparator()
+ "-- This is a comment" + System.lineSeparator()
+ "DROP TABLE foo;" + System.lineSeparator();
parser.parse(ddl, tables);
assertThat(tables.size()).isEqualTo(0); // table created and dropped
}
@Test
public void shouldParseCreateTableStatementWithSingleGeneratedAndPrimaryKeyColumn() {
String ddl = "CREATE TABLE foo ( " + System.lineSeparator()
+ " c1 INTEGER NOT NULL AUTO_INCREMENT, " + System.lineSeparator()
+ " c2 VARCHAR(22) " + System.lineSeparator()
+ "); " + System.lineSeparator();
parser.parse(ddl, tables);
assertThat(tables.size()).isEqualTo(1);
Table foo = tables.forTable(new TableId(null, null, "foo"));
assertThat(foo).isNotNull();
assertThat(foo.columnNames()).containsExactly("c1", "c2");
assertThat(foo.primaryKeyColumnNames()).isEmpty();
assertColumn(foo, "c1", "INTEGER", Types.INTEGER, -1, -1, false, true, true);
assertColumn(foo, "c2", "VARCHAR", Types.VARCHAR, 22, -1, true, false, false);
}
@Test
public void shouldParseCreateTableStatementWithSingleGeneratedColumnAsPrimaryKey() {
String ddl = "CREATE TABLE my.foo ( " + System.lineSeparator()
+ " c1 INTEGER NOT NULL AUTO_INCREMENT, " + System.lineSeparator()
+ " c2 VARCHAR(22), " + System.lineSeparator()
+ " PRIMARY KEY (c1)" + System.lineSeparator()
+ "); " + System.lineSeparator();
parser.parse(ddl, tables);
assertThat(tables.size()).isEqualTo(1);
Table foo = tables.forTable(new TableId("my", null, "foo"));
assertThat(foo).isNotNull();
assertThat(foo.columnNames()).containsExactly("c1", "c2");
assertThat(foo.primaryKeyColumnNames()).containsExactly("c1");
assertColumn(foo, "c1", "INTEGER", Types.INTEGER, -1, -1, false, true, true);
assertColumn(foo, "c2", "VARCHAR", Types.VARCHAR, 22, -1, true, false, false);
parser.parse("DROP TABLE my.foo", tables);
assertThat(tables.size()).isEqualTo(0);
}
@Test
public void shouldParseCreateStatements() {
parser.parse(readFile("ddl/mysql-test-create.ddl"), tables);
//System.out.println(tables);
}
@Test
public void shouldParseTestStatements() {
parser.parse(readFile("ddl/mysql-test-statements.ddl"), tables);
System.out.println(tables);
}
@Test
public void shouldParseSomeLinesFromCreateStatements() {
parser.parse(readLines(189,"ddl/mysql-test-create.ddl"), tables);
}
protected String readFile( String classpathResource ) {
try ( InputStream stream = getClass().getClassLoader().getResourceAsStream(classpathResource); ) {
assertThat(stream).isNotNull();
return IoUtil.read(stream);
} catch ( IOException e ) {
fail("Unable to read '" + classpathResource + "'");
}
assert false : "should never get here";
return null;
}
/**
* Reads the lines starting with a given line number from the specified file on the classpath. Any lines preceding the
* given line number will be included as empty lines, meaning the line numbers will match the input file.
* @param startingLineNumber the 1-based number designating the first line to be included
* @param classpathResource the path to the file on the classpath
* @return the string containing the subset of the file contents; never null but possibly empty
*/
protected String readLines( int startingLineNumber, String classpathResource ) {
try ( InputStream stream = getClass().getClassLoader().getResourceAsStream(classpathResource); ) {
assertThat(stream).isNotNull();
StringBuilder sb = new StringBuilder();
AtomicInteger counter = new AtomicInteger();
IoUtil.readLines(stream,line->{
if (counter.incrementAndGet() >= startingLineNumber) sb.append(line);
sb.append(System.lineSeparator());
});
return sb.toString();
} catch ( IOException e ) {
fail("Unable to read '" + classpathResource + "'");
}
assert false : "should never get here";
return null;
}
protected void assertColumn(Table table, String name, String typeName, int jdbcType, int length, int scale,
boolean optional, boolean generated, boolean autoIncremented) {
Column column = table.columnWithName(name);
assertThat(column.name()).isEqualTo(name);
assertThat(column.typeName()).isEqualTo(typeName);
assertThat(column.jdbcType()).isEqualTo(jdbcType);
assertThat(column.length()).isEqualTo(length);
assertThat(column.scale()).isEqualTo(scale);
assertThat(column.isOptional()).isEqualTo(optional);
assertThat(column.isGenerated()).isEqualTo(generated);
assertThat(column.isAutoIncremented()).isEqualTo(autoIncremented);
}
}

View File

@ -0,0 +1,184 @@
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.mysql.ingest;
import java.sql.SQLException;
import java.sql.Types;
import org.junit.Test;
import static org.fest.assertions.Assertions.assertThat;
import io.debezium.jdbc.TestDatabase;
import io.debezium.mysql.MySQLConnection;
import io.debezium.relational.Column;
import io.debezium.relational.Table;
import io.debezium.relational.Tables;
public class MetadataIT {
@Test
public void shouldLoadMetadata() throws SQLException {
try (MySQLConnection conn = new MySQLConnection(TestDatabase.testConfig("readbinlog_test"));) {
conn.connect();
// Set up the table as one transaction and wait to see the events ...
conn.execute("DROP TABLE IF EXISTS person",
"DROP TABLE IF EXISTS product",
"DROP TABLE IF EXISTS purchased");
conn.execute("CREATE TABLE person ("
+ " name VARCHAR(255) primary key,"
+ " birthdate DATE NULL,"
+ " age INTEGER NULL DEFAULT 10,"
+ " salary DECIMAL(5,2),"
+ " bitStr BIT(18)"
+ ")");
conn.execute("SELECT * FROM person");
Tables tables = new Tables();
conn.readSchema(tables, "readbinlog_test", null, null, null);
//System.out.println(tables);
assertThat(tables.size()).isEqualTo(1);
Table person = tables.forTable("readbinlog_test", null, "person");
assertThat(person).isNotNull();
assertThat(person.filterColumns(col->col.isAutoIncremented())).isEmpty();
assertThat(person.primaryKeyColumnNames()).containsOnly("name");
assertThat(person.columnNames()).containsExactly("name","birthdate","age","salary","bitStr");
assertThat(person.columnWithName("name").name()).isEqualTo("name");
assertThat(person.columnWithName("name").typeName()).isEqualTo("VARCHAR");
assertThat(person.columnWithName("name").jdbcType()).isEqualTo(Types.VARCHAR);
assertThat(person.columnWithName("name").length()).isEqualTo(255);
assertThat(person.columnWithName("name").scale()).isEqualTo(0);
assertThat(person.columnWithName("name").position()).isEqualTo(1);
assertThat(person.columnWithName("name").isAutoIncremented()).isFalse();
assertThat(person.columnWithName("name").isGenerated()).isFalse();
assertThat(person.columnWithName("name").isOptional()).isFalse();
assertThat(person.columnWithName("birthdate").name()).isEqualTo("birthdate");
assertThat(person.columnWithName("birthdate").typeName()).isEqualTo("DATE");
assertThat(person.columnWithName("birthdate").jdbcType()).isEqualTo(Types.DATE);
assertThat(person.columnWithName("birthdate").length()).isEqualTo(10);
assertThat(person.columnWithName("birthdate").scale()).isEqualTo(0);
assertThat(person.columnWithName("birthdate").position()).isEqualTo(2);
assertThat(person.columnWithName("birthdate").isAutoIncremented()).isFalse();
assertThat(person.columnWithName("birthdate").isGenerated()).isFalse();
assertThat(person.columnWithName("birthdate").isOptional()).isTrue();
assertThat(person.columnWithName("age").name()).isEqualTo("age");
assertThat(person.columnWithName("age").typeName()).isEqualTo("INT");
assertThat(person.columnWithName("age").jdbcType()).isEqualTo(Types.INTEGER);
assertThat(person.columnWithName("age").length()).isEqualTo(10);
assertThat(person.columnWithName("age").scale()).isEqualTo(0);
assertThat(person.columnWithName("age").position()).isEqualTo(3);
assertThat(person.columnWithName("age").isAutoIncremented()).isFalse();
assertThat(person.columnWithName("age").isGenerated()).isFalse();
assertThat(person.columnWithName("age").isOptional()).isTrue();
assertThat(person.columnWithName("salary").name()).isEqualTo("salary");
assertThat(person.columnWithName("salary").typeName()).isEqualTo("DECIMAL");
assertThat(person.columnWithName("salary").jdbcType()).isEqualTo(Types.DECIMAL);
assertThat(person.columnWithName("salary").length()).isEqualTo(5);
assertThat(person.columnWithName("salary").scale()).isEqualTo(2);
assertThat(person.columnWithName("salary").position()).isEqualTo(4);
assertThat(person.columnWithName("salary").isAutoIncremented()).isFalse();
assertThat(person.columnWithName("salary").isGenerated()).isFalse();
assertThat(person.columnWithName("salary").isOptional()).isTrue();
assertThat(person.columnWithName("bitStr").name()).isEqualTo("bitStr");
assertThat(person.columnWithName("bitStr").typeName()).isEqualTo("BIT");
assertThat(person.columnWithName("bitStr").jdbcType()).isEqualTo(Types.BIT);
assertThat(person.columnWithName("bitStr").length()).isEqualTo(18);
assertThat(person.columnWithName("bitStr").scale()).isEqualTo(0);
assertThat(person.columnWithName("bitStr").position()).isEqualTo(5);
assertThat(person.columnWithName("bitStr").isAutoIncremented()).isFalse();
assertThat(person.columnWithName("bitStr").isGenerated()).isFalse();
assertThat(person.columnWithName("bitStr").isOptional()).isTrue();
conn.execute("CREATE TABLE product ("
+ " id INT NOT NULL AUTO_INCREMENT,"
+ " createdByDate DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,"
+ " modifiedDate DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,"
+ " PRIMARY KEY(id)"
+ ")");
conn.execute("SELECT * FROM product");
tables = new Tables();
conn.readSchema(tables, "readbinlog_test", null, null, null);
// System.out.println(tables);
assertThat(tables.size()).isEqualTo(2);
Table product = tables.forTable("readbinlog_test", null, "product");
assertThat(product).isNotNull();
assertThat(product.filterColumnNames(Column::isAutoIncremented)).containsOnly("id");
assertThat(product.primaryKeyColumnNames()).containsOnly("id");
assertThat(product.columnNames()).containsExactly("id","createdByDate","modifiedDate");
assertThat(product.columnWithName("id").name()).isEqualTo("id");
assertThat(product.columnWithName("id").typeName()).isEqualTo("INT");
assertThat(product.columnWithName("id").jdbcType()).isEqualTo(Types.INTEGER);
assertThat(product.columnWithName("id").length()).isEqualTo(10);
assertThat(product.columnWithName("id").scale()).isEqualTo(0);
assertThat(product.columnWithName("id").position()).isEqualTo(1);
assertThat(product.columnWithName("id").isAutoIncremented()).isTrue();
assertThat(product.columnWithName("id").isGenerated()).isFalse();
assertThat(product.columnWithName("id").isOptional()).isFalse();
assertThat(product.columnWithName("createdByDate").name()).isEqualTo("createdByDate");
assertThat(product.columnWithName("createdByDate").typeName()).isEqualTo("DATETIME");
assertThat(product.columnWithName("createdByDate").jdbcType()).isEqualTo(Types.TIMESTAMP);
assertThat(product.columnWithName("createdByDate").length()).isEqualTo(19);
assertThat(product.columnWithName("createdByDate").scale()).isEqualTo(0);
assertThat(product.columnWithName("createdByDate").position()).isEqualTo(2);
assertThat(product.columnWithName("createdByDate").isAutoIncremented()).isFalse();
assertThat(product.columnWithName("createdByDate").isGenerated()).isFalse();
assertThat(product.columnWithName("createdByDate").isOptional()).isFalse();
assertThat(product.columnWithName("modifiedDate").name()).isEqualTo("modifiedDate");
assertThat(product.columnWithName("modifiedDate").typeName()).isEqualTo("DATETIME");
assertThat(product.columnWithName("modifiedDate").jdbcType()).isEqualTo(Types.TIMESTAMP);
assertThat(product.columnWithName("modifiedDate").length()).isEqualTo(19);
assertThat(product.columnWithName("modifiedDate").scale()).isEqualTo(0);
assertThat(product.columnWithName("modifiedDate").position()).isEqualTo(3);
assertThat(product.columnWithName("modifiedDate").isAutoIncremented()).isFalse();
assertThat(product.columnWithName("modifiedDate").isGenerated()).isFalse();
assertThat(product.columnWithName("modifiedDate").isOptional()).isFalse();
conn.execute("CREATE TABLE purchased ("
+ " purchaser VARCHAR(255) NOT NULL,"
+ " productId INT NOT NULL,"
+ " purchaseDate DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,"
+ " PRIMARY KEY(productId,purchaser)"
+ ")");
conn.execute("SELECT * FROM purchased");
tables = new Tables();
conn.readSchema(tables, "readbinlog_test", null, null, null);
//System.out.println(tables);
assertThat(tables.size()).isEqualTo(3);
Table purchased = tables.forTable("readbinlog_test", null, "purchased");
assertThat(purchased).isNotNull();
assertThat(person.filterColumns(col->col.isAutoIncremented())).isEmpty();
assertThat(purchased.primaryKeyColumnNames()).containsOnly("productId","purchaser");
assertThat(purchased.columnNames()).containsExactly("purchaser","productId","purchaseDate");
assertThat(purchased.columnWithName("purchaser").name()).isEqualTo("purchaser");
assertThat(purchased.columnWithName("purchaser").typeName()).isEqualTo("VARCHAR");
assertThat(purchased.columnWithName("purchaser").jdbcType()).isEqualTo(Types.VARCHAR);
assertThat(purchased.columnWithName("purchaser").length()).isEqualTo(255);
assertThat(purchased.columnWithName("purchaser").scale()).isEqualTo(0);
assertThat(purchased.columnWithName("purchaser").position()).isEqualTo(1);
assertThat(purchased.columnWithName("purchaser").isAutoIncremented()).isFalse();
assertThat(purchased.columnWithName("purchaser").isGenerated()).isFalse();
assertThat(purchased.columnWithName("purchaser").isOptional()).isFalse();
assertThat(purchased.columnWithName("productId").name()).isEqualTo("productId");
assertThat(purchased.columnWithName("productId").typeName()).isEqualTo("INT");
assertThat(purchased.columnWithName("productId").jdbcType()).isEqualTo(Types.INTEGER);
assertThat(purchased.columnWithName("productId").length()).isEqualTo(10);
assertThat(purchased.columnWithName("productId").scale()).isEqualTo(0);
assertThat(purchased.columnWithName("productId").position()).isEqualTo(2);
assertThat(purchased.columnWithName("productId").isAutoIncremented()).isFalse();
assertThat(purchased.columnWithName("productId").isGenerated()).isFalse();
assertThat(purchased.columnWithName("productId").isOptional()).isFalse();
assertThat(purchased.columnWithName("purchaseDate").name()).isEqualTo("purchaseDate");
assertThat(purchased.columnWithName("purchaseDate").typeName()).isEqualTo("DATETIME");
assertThat(purchased.columnWithName("purchaseDate").jdbcType()).isEqualTo(Types.TIMESTAMP);
assertThat(purchased.columnWithName("purchaseDate").length()).isEqualTo(19);
assertThat(purchased.columnWithName("purchaseDate").scale()).isEqualTo(0);
assertThat(purchased.columnWithName("purchaseDate").position()).isEqualTo(3);
assertThat(purchased.columnWithName("purchaseDate").isAutoIncremented()).isFalse();
assertThat(purchased.columnWithName("purchaseDate").isGenerated()).isFalse();
assertThat(purchased.columnWithName("purchaseDate").isOptional()).isFalse();
}
}
}

View File

@ -3,7 +3,7 @@
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.ingest.mysql;
package io.debezium.mysql.ingest;
import static org.junit.Assert.fail;
@ -46,12 +46,19 @@
import io.debezium.jdbc.JdbcConfiguration;
import io.debezium.jdbc.TestDatabase;
import io.debezium.mysql.MySQLConnection;
public class ReadBinLogIT {
protected static final Logger LOGGER = LoggerFactory.getLogger(ReadBinLogIT.class);
protected static final long DEFAULT_TIMEOUT = TimeUnit.SECONDS.toMillis(3);
private static final class AnyValue implements Serializable {
private static final long serialVersionUID = 1L;
}
private static final Serializable ANY_OBJECT = new AnyValue();
private JdbcConfiguration config;
private EventCounters counters;
private BinaryLogClient client;
@ -81,7 +88,13 @@ public void beforeEach() throws TimeoutException, IOException, SQLException, Int
// Set up the table as one transaction and wait to see the events ...
conn.execute("DROP TABLE IF EXISTS person",
"CREATE TABLE person (name VARCHAR(255) primary key)");
"CREATE TABLE person (" +
" name VARCHAR(255) primary key," +
" age INTEGER NULL DEFAULT 10," +
" createdAt DATETIME NULL DEFAULT CURRENT_TIMESTAMP," +
" updatedAt DATETIME NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" +
")");
counters.waitFor(2, EventType.QUERY, DEFAULT_TIMEOUT);
counters.reset();
}
@ -104,36 +117,37 @@ public void afterEach() throws IOException, SQLException {
@Test
public void shouldCaptureSingleWriteUpdateDeleteEvents() throws Exception {
// write/insert
conn.execute("INSERT INTO person VALUES ('Georgia')");
conn.execute("INSERT INTO person(name,age) VALUES ('Georgia',30)");
counters.waitFor(1, WriteRowsEventData.class, DEFAULT_TIMEOUT);
List<WriteRowsEventData> writeRowEvents = recordedEventData(WriteRowsEventData.class, 1);
assertRows(writeRowEvents.get(0), rows().insertedRow("Georgia"));
assertRows(writeRowEvents.get(0), rows().insertedRow("Georgia", 30, any(), any()));
// update
conn.execute("UPDATE person SET name = 'Maggie' WHERE name = 'Georgia'");
counters.waitFor(1, UpdateRowsEventData.class, DEFAULT_TIMEOUT);
List<UpdateRowsEventData> updateRowEvents = recordedEventData(UpdateRowsEventData.class, 1);
assertRows(updateRowEvents.get(0), rows().changeRow("Georgia").to("Maggie"));
assertRows(updateRowEvents.get(0),
rows().changeRow("Georgia", 30, any(), any()).to("Maggie", 30, any(), any()));
// delete
conn.execute("DELETE FROM person WHERE name = 'Maggie'");
counters.waitFor(1, DeleteRowsEventData.class, DEFAULT_TIMEOUT);
List<DeleteRowsEventData> deleteRowEvents = recordedEventData(DeleteRowsEventData.class, 1);
assertRows(deleteRowEvents.get(0), rows().removedRow("Maggie"));
assertRows(deleteRowEvents.get(0), rows().removedRow("Maggie", 30, any(), any()));
}
@Test
public void shouldCaptureMultipleWriteUpdateDeleteEvents() throws Exception {
// write/insert as a single transaction
conn.execute("INSERT INTO person VALUES ('Georgia')",
"INSERT INTO person VALUES ('Janice')");
conn.execute("INSERT INTO person(name,age) VALUES ('Georgia',30)",
"INSERT INTO person(name,age) VALUES ('Janice',19)");
counters.waitFor(1, QueryEventData.class, DEFAULT_TIMEOUT); // BEGIN
counters.waitFor(1, TableMapEventData.class, DEFAULT_TIMEOUT);
counters.waitFor(2, WriteRowsEventData.class, DEFAULT_TIMEOUT);
counters.waitFor(1, XidEventData.class, DEFAULT_TIMEOUT); // COMMIT
List<WriteRowsEventData> writeRowEvents = recordedEventData(WriteRowsEventData.class, 2);
assertRows(writeRowEvents.get(0), rows().insertedRow("Georgia"));
assertRows(writeRowEvents.get(1), rows().insertedRow("Janice"));
assertRows(writeRowEvents.get(0), rows().insertedRow("Georgia", 30, any(), any()));
assertRows(writeRowEvents.get(1), rows().insertedRow("Janice", 19, any(), any()));
counters.reset();
// update as a single transaction
@ -144,8 +158,8 @@ public void shouldCaptureMultipleWriteUpdateDeleteEvents() throws Exception {
counters.waitFor(2, UpdateRowsEventData.class, DEFAULT_TIMEOUT);
counters.waitFor(1, XidEventData.class, DEFAULT_TIMEOUT); // COMMIT
List<UpdateRowsEventData> updateRowEvents = recordedEventData(UpdateRowsEventData.class, 2);
assertRows(updateRowEvents.get(0), rows().changeRow("Georgia").to("Maggie"));
assertRows(updateRowEvents.get(1), rows().changeRow("Janice").to("Jamie"));
assertRows(updateRowEvents.get(0), rows().changeRow("Georgia", 30, any(), any()).to("Maggie", 30, any(), any()));
assertRows(updateRowEvents.get(1), rows().changeRow("Janice", 19, any(), any()).to("Jamie", 19, any(), any()));
counters.reset();
// delete as a single transaction
@ -156,20 +170,21 @@ public void shouldCaptureMultipleWriteUpdateDeleteEvents() throws Exception {
counters.waitFor(2, DeleteRowsEventData.class, DEFAULT_TIMEOUT);
counters.waitFor(1, XidEventData.class, DEFAULT_TIMEOUT); // COMMIT
List<DeleteRowsEventData> deleteRowEvents = recordedEventData(DeleteRowsEventData.class, 2);
assertRows(deleteRowEvents.get(0), rows().removedRow("Maggie"));
assertRows(deleteRowEvents.get(1), rows().removedRow("Jamie"));
assertRows(deleteRowEvents.get(0), rows().removedRow("Maggie", 30, any(), any()));
assertRows(deleteRowEvents.get(1), rows().removedRow("Jamie", 19, any(), any()));
}
@Test
public void shouldCaptureMultipleWriteUpdateDeletesInSingleEvents() throws Exception {
// write/insert as a single statement/transaction
conn.execute("INSERT INTO person VALUES ('Georgia'),('Janice')");
conn.execute("INSERT INTO person(name,age) VALUES ('Georgia',30),('Janice',19)");
counters.waitFor(1, QueryEventData.class, DEFAULT_TIMEOUT); // BEGIN
counters.waitFor(1, TableMapEventData.class, DEFAULT_TIMEOUT);
counters.waitFor(1, WriteRowsEventData.class, DEFAULT_TIMEOUT);
counters.waitFor(1, XidEventData.class, DEFAULT_TIMEOUT); // COMMIT
List<WriteRowsEventData> writeRowEvents = recordedEventData(WriteRowsEventData.class, 1);
assertRows(writeRowEvents.get(0), rows().insertedRow("Georgia").insertedRow("Janice"));
assertRows(writeRowEvents.get(0), rows().insertedRow("Georgia", 30, any(), any())
.insertedRow("Janice", 19, any(), any()));
counters.reset();
// update as a single statement/transaction
@ -183,7 +198,8 @@ public void shouldCaptureMultipleWriteUpdateDeletesInSingleEvents() throws Excep
counters.waitFor(1, UpdateRowsEventData.class, DEFAULT_TIMEOUT);
counters.waitFor(1, XidEventData.class, DEFAULT_TIMEOUT); // COMMIT
List<UpdateRowsEventData> updateRowEvents = recordedEventData(UpdateRowsEventData.class, 1);
assertRows(updateRowEvents.get(0), rows().changeRow("Georgia").to("Maggie").changeRow("Janice").to("Jamie"));
assertRows(updateRowEvents.get(0), rows().changeRow("Georgia", 30, any(), any()).to("Maggie", 30, any(), any())
.changeRow("Janice", 19, any(), any()).to("Jamie", 19, any(), any()));
counters.reset();
// delete as a single statement/transaction
@ -193,22 +209,24 @@ public void shouldCaptureMultipleWriteUpdateDeletesInSingleEvents() throws Excep
counters.waitFor(1, DeleteRowsEventData.class, DEFAULT_TIMEOUT);
counters.waitFor(1, XidEventData.class, DEFAULT_TIMEOUT); // COMMIT
List<DeleteRowsEventData> deleteRowEvents = recordedEventData(DeleteRowsEventData.class, 1);
assertRows(deleteRowEvents.get(0), rows().removedRow("Maggie").removedRow("Jamie"));
assertRows(deleteRowEvents.get(0), rows().removedRow("Maggie", 30, any(), any())
.removedRow("Jamie", 19, any(), any()));
}
@Test
public void shouldQueryInformationSchema() throws Exception {
// long tableId = writeRows.getTableId();
// BitSet columnIds = writeRows.getIncludedColumns();
//
// conn.query("select TABLE_NAME, ROW_FORMAT, TABLE_ROWS, AVG_ROW_LENGTH, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, " +
// "AUTO_INCREMENT, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, CHECKSUM, CREATE_OPTIONS, TABLE_COMMENT " +
// "from INFORMATION_SCHEMA.TABLES " +
// "where TABLE_SCHEMA like 'readbinlog_test' and TABLE_NAME like 'person'", conn::print);
// conn.query("select TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, " +
// "DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, " +
// "CHARACTER_SET_NAME, COLLATION_NAME from INFORMATION_SCHEMA.COLUMNS " +
// "where TABLE_SCHEMA like 'readbinlog_test' and TABLE_NAME like 'person'", conn::print);
// long tableId = writeRows.getTableId();
// BitSet columnIds = writeRows.getIncludedColumns();
//
// conn.query("select TABLE_NAME, ROW_FORMAT, TABLE_ROWS, AVG_ROW_LENGTH, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH,
// DATA_FREE, " +
// "AUTO_INCREMENT, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, CHECKSUM, CREATE_OPTIONS, TABLE_COMMENT " +
// "from INFORMATION_SCHEMA.TABLES " +
// "where TABLE_SCHEMA like 'readbinlog_test' and TABLE_NAME like 'person'", conn::print);
// conn.query("select TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, " +
// "DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, " +
// "CHARACTER_SET_NAME, COLLATION_NAME from INFORMATION_SCHEMA.COLUMNS " +
// "where TABLE_SCHEMA like 'readbinlog_test' and TABLE_NAME like 'person'", conn::print);
}
@ -238,16 +256,20 @@ protected void assertRow(Serializable[] data, Serializable... expected) {
assertThat(data).contains((Object[]) expected);
}
protected void assertRows( WriteRowsEventData eventData, int numRowsInEvent, Serializable... expectedValuesInRows ) {
protected void assertRows(WriteRowsEventData eventData, int numRowsInEvent, Serializable... expectedValuesInRows) {
assertThat(eventData.getRows().size()).isEqualTo(numRowsInEvent);
int valuePosition = 0;
for (Serializable[] row : eventData.getRows() ) {
for ( Serializable value : row ) {
for (Serializable[] row : eventData.getRows()) {
for (Serializable value : row) {
assertThat(value).isEqualTo(expectedValuesInRows[valuePosition++]);
}
}
}
protected Serializable any() {
return ANY_OBJECT;
}
public static class Row {
public Serializable[] fromValues;
public Serializable[] toValues;
@ -255,21 +277,24 @@ public static class Row {
}
public static interface UpdateBuilder {
RowBuilder to( Serializable...values );
RowBuilder to(Serializable... values);
}
public static class RowBuilder {
private List<Row> rows = new ArrayList<>();
private Row nextRow = null;
public RowBuilder insertedRow( Serializable...values ) {
public RowBuilder insertedRow(Serializable... values) {
maybeAddRow();
return changeRow().to(values);
}
public RowBuilder removedRow( Serializable...values ) {
public RowBuilder removedRow(Serializable... values) {
maybeAddRow();
return changeRow(values).to(values);
}
public UpdateBuilder changeRow( Serializable...values ) {
public UpdateBuilder changeRow(Serializable... values) {
maybeAddRow();
nextRow = new Row();
nextRow.fromValues = values;
@ -281,78 +306,95 @@ public RowBuilder to(Serializable... values) {
}
};
}
protected void maybeAddRow() {
if ( nextRow != null ) {
if (nextRow != null) {
rows.add(nextRow);
nextRow = null;
}
}
protected List<Row> rows() {
maybeAddRow();
return rows;
}
protected boolean findInsertedRow( Serializable[] values ) {
protected boolean findInsertedRow(Serializable[] values) {
maybeAddRow();
for ( Iterator<Row> iter = rows.iterator(); iter.hasNext(); ) {
for (Iterator<Row> iter = rows.iterator(); iter.hasNext();) {
Row expectedRow = iter.next();
if ( Arrays.deepEquals(expectedRow.toValues,values)) {
if (deepEquals(expectedRow.toValues, values)) {
iter.remove();
return true;
}
}
return false;
}
protected boolean findDeletedRow( Serializable[] values ) {
protected boolean findDeletedRow(Serializable[] values) {
maybeAddRow();
for ( Iterator<Row> iter = rows.iterator(); iter.hasNext(); ) {
for (Iterator<Row> iter = rows.iterator(); iter.hasNext();) {
Row expectedRow = iter.next();
if ( Arrays.deepEquals(expectedRow.fromValues,values)) {
if (deepEquals(expectedRow.fromValues, values)) {
iter.remove();
return true;
}
}
return false;
}
protected boolean findUpdatedRow( Serializable[] oldValues, Serializable[] newValues ) {
protected boolean findUpdatedRow(Serializable[] oldValues, Serializable[] newValues) {
maybeAddRow();
for ( Iterator<Row> iter = rows.iterator(); iter.hasNext(); ) {
for (Iterator<Row> iter = rows.iterator(); iter.hasNext();) {
Row expectedRow = iter.next();
if ( Arrays.deepEquals(expectedRow.fromValues,oldValues) && Arrays.deepEquals(expectedRow.toValues,newValues)) {
if (deepEquals(expectedRow.fromValues, oldValues) && deepEquals(expectedRow.toValues, newValues)) {
iter.remove();
return true;
}
}
return false;
}
protected boolean deepEquals(Serializable[] expectedValues, Serializable[] actualValues) {
assertThat(expectedValues.length).isEqualTo(actualValues.length);
// Make a copy of the actual values, and find all 'AnyValue' instances in the expected values and replace
// their counterpart in the copy of the actual values ...
Serializable[] actualValuesCopy = Arrays.copyOf(actualValues, actualValues.length);
for (int i = 0; i != actualValuesCopy.length; ++i) {
if (expectedValues[i] instanceof AnyValue) actualValuesCopy[i] = expectedValues[i];
}
// Now compare the arrays ...
return Arrays.deepEquals(expectedValues, actualValuesCopy);
}
}
protected RowBuilder rows() {
return new RowBuilder();
}
protected void assertRows( UpdateRowsEventData eventData, RowBuilder rows ) {
protected void assertRows(UpdateRowsEventData eventData, RowBuilder rows) {
assertThat(eventData.getRows().size()).isEqualTo(rows.rows().size());
for (Map.Entry<Serializable[], Serializable[]> row : eventData.getRows() ) {
if ( !rows.findUpdatedRow(row.getKey(), row.getValue()) ) {
fail("Failed to find updated row: " + eventData );
for (Map.Entry<Serializable[], Serializable[]> row : eventData.getRows()) {
if (!rows.findUpdatedRow(row.getKey(), row.getValue())) {
fail("Failed to find updated row: " + eventData);
}
}
}
protected void assertRows( WriteRowsEventData eventData, RowBuilder rows ) {
protected void assertRows(WriteRowsEventData eventData, RowBuilder rows) {
assertThat(eventData.getRows().size()).isEqualTo(rows.rows().size());
for (Serializable[] removedRow : eventData.getRows() ) {
if ( !rows.findInsertedRow(removedRow) ) {
fail("Failed to find inserted row: " + eventData );
for (Serializable[] removedRow : eventData.getRows()) {
if (!rows.findInsertedRow(removedRow)) {
fail("Failed to find inserted row: " + eventData);
}
}
}
protected void assertRows( DeleteRowsEventData eventData, RowBuilder rows ) {
protected void assertRows(DeleteRowsEventData eventData, RowBuilder rows) {
assertThat(eventData.getRows().size()).isEqualTo(rows.rows().size());
for (Serializable[] removedRow : eventData.getRows() ) {
if ( !rows.findDeletedRow(removedRow) ) {
fail("Failed to find removed row: " + eventData );
for (Serializable[] removedRow : eventData.getRows()) {
if (!rows.findDeletedRow(removedRow)) {
fail("Failed to find removed row: " + eventData);
}
}
}

View File

@ -0,0 +1,1131 @@
--
-- BUILD SCRIPT
-- RDBMS: MYSQL 5.0
--
CREATE TABLE AUDITENTRIES
(
TIMESTAMP VARCHAR(50) NOT NULL,
CONTEXT VARCHAR(64) NOT NULL,
ACTIVITY VARCHAR(64) NOT NULL,
RESOURCES VARCHAR(4000) NOT NULL,
PRINCIPAL VARCHAR(255) NOT NULL,
HOSTNAME VARCHAR(64) NOT NULL,
VMID VARCHAR(64) NOT NULL
);
CREATE TABLE AUTHPERMTYPES
(
PERMTYPEUID NUMERIC(10) NOT NULL PRIMARY KEY,
DISPLAYNAME VARCHAR(250) NOT NULL,
FACTORYCLASSNAME VARCHAR(80) NOT NULL
);
CREATE TABLE AUTHPOLICIES
(
POLICYUID NUMERIC(10) NOT NULL PRIMARY KEY,
DESCRIPTION VARCHAR(250),
POLICYNAME VARCHAR(250) NOT NULL
);
CREATE TABLE AUTHPRINCIPALS
(
PRINCIPALTYPE NUMERIC(10) NOT NULL,
PRINCIPALNAME VARCHAR(255) NOT NULL,
POLICYUID NUMERIC(10) NOT NULL REFERENCES AUTHPOLICIES (POLICYUID) ,
GRANTOR VARCHAR(255) NOT NULL,
CONSTRAINT PK_AUTHPOLICYPRINCIPALS UNIQUE (PRINCIPALNAME, POLICYUID)
);
CREATE TABLE AUTHREALMS
(
REALMUID NUMERIC(10) NOT NULL PRIMARY KEY,
REALMNAME VARCHAR(250) NOT NULL UNIQUE,
DESCRIPTION VARCHAR(550)
);
CREATE TABLE CFG_STARTUP_STATE
(STATE INTEGER DEFAULT 0 ,
LASTCHANGED VARCHAR(50) );
CREATE TABLE IDTABLE
(
IDCONTEXT VARCHAR(20) NOT NULL PRIMARY KEY,
NEXTID NUMERIC
);
CREATE TABLE LOGMESSAGETYPES
(
MESSAGELEVEL NUMERIC(10) NOT NULL PRIMARY KEY,
NAME VARCHAR(64) NOT NULL,
DISPLAYNAME VARCHAR(64)
);
CREATE TABLE MM_PRODUCTS
(
PRODUCT_UID NUMERIC NOT NULL PRIMARY KEY,
PRODUCT_NAME VARCHAR(50) NOT NULL,
PRODUCT_DISPLAY_NM VARCHAR(100)
);
CREATE TABLE PRINCIPALTYPES
(
PRINCIPALTYPEUID NUMERIC(10) NOT NULL PRIMARY KEY,
PRINCIPALTYPE VARCHAR(60) NOT NULL,
DISPLAYNAME VARCHAR(80) NOT NULL,
LASTCHANGEDBY VARCHAR(255) NOT NULL,
LASTCHANGED VARCHAR(50)
);
-- ========= STATEMENT 10 ============
CREATE TABLE RT_MDLS
(
MDL_UID NUMERIC(10) NOT NULL PRIMARY KEY,
MDL_UUID VARCHAR(64) NOT NULL,
MDL_NM VARCHAR(255) NOT NULL,
MDL_VERSION VARCHAR(50),
DESCRIPTION VARCHAR(255),
MDL_URI VARCHAR(255),
MDL_TYPE NUMERIC(3),
IS_PHYSICAL CHAR(1) NOT NULL,
MULTI_SOURCED CHAR(1) DEFAULT '0',
VISIBILITY NUMERIC(10)
);
CREATE TABLE RT_MDL_PRP_NMS
(
PRP_UID NUMERIC(10) NOT NULL PRIMARY KEY,
MDL_UID NUMERIC(10) NOT NULL ,
PRP_NM VARCHAR(255) NOT NULL
);
CREATE TABLE RT_MDL_PRP_VLS
(
PRP_UID NUMERIC(10) NOT NULL ,
PART_ID NUMERIC(10) NOT NULL,
PRP_VL VARCHAR(255) NOT NULL,
CONSTRAINT PK_MDL_PRP_VLS UNIQUE (PRP_UID, PART_ID)
);
CREATE TABLE RT_VIRTUAL_DBS
(
VDB_UID NUMERIC(10) NOT NULL PRIMARY KEY,
VDB_VERSION VARCHAR(50) NOT NULL,
VDB_NM VARCHAR(255) NOT NULL,
DESCRIPTION VARCHAR(255),
PROJECT_GUID VARCHAR(64),
VDB_STATUS NUMERIC NOT NULL,
WSDL_DEFINED CHAR(1) DEFAULT '0',
VERSION_BY VARCHAR(100),
VERSION_DATE VARCHAR(50) NOT NULL,
CREATED_BY VARCHAR(100),
CREATION_DATE VARCHAR(50),
UPDATED_BY VARCHAR(100),
UPDATED_DATE VARCHAR(50),
VDB_FILE_NM VARCHAR(2048)
);
CREATE TABLE SERVICESESSIONS
(
SESSIONUID NUMERIC(10) NOT NULL PRIMARY KEY,
PRINCIPAL VARCHAR(255) NOT NULL,
APPLICATION VARCHAR(128) NOT NULL,
CREATIONTIME VARCHAR(50),
CLIENTCOUNT NUMERIC(10) NOT NULL,
STATE NUMERIC(10) NOT NULL,
STATETIME VARCHAR(50),
USESSUBSCRIBER CHAR(1) NOT NULL,
PRODUCTINFO1 VARCHAR(255),
PRODUCTINFO2 VARCHAR(255),
PRODUCTINFO3 VARCHAR(255),
PRODUCTINFO4 VARCHAR(255)
);
-- ========= STATEMENT 15 ============
CREATE INDEX RTMDLS_NM_IX ON RT_MDLS (MDL_NM);
CREATE INDEX RTVIRTUALDBS_NM_IX ON RT_VIRTUAL_DBS (VDB_NM);
CREATE INDEX RTVIRTUALDBS_VRSN_IX ON RT_VIRTUAL_DBS (VDB_VERSION);
CREATE UNIQUE INDEX MDL_PRP_NMS_UIX ON RT_MDL_PRP_NMS (MDL_UID, PRP_NM);
CREATE UNIQUE INDEX PRNCIPALTYP_UIX ON PRINCIPALTYPES (PRINCIPALTYPE);
-- ========= STATEMENT 20 ============
CREATE UNIQUE INDEX AUTHPOLICIES_NAM_UIX ON AUTHPOLICIES (POLICYNAME);
CREATE TABLE AUTHPERMISSIONS
(
PERMISSIONUID NUMERIC(10) NOT NULL PRIMARY KEY,
RESOURCENAME VARCHAR(250) NOT NULL,
ACTIONS NUMERIC(10) NOT NULL,
CONTENTMODIFIER VARCHAR(250),
PERMTYPEUID NUMERIC(10) NOT NULL REFERENCES AUTHPERMTYPES (PERMTYPEUID) ,
REALMUID NUMERIC(10) NOT NULL REFERENCES AUTHREALMS (REALMUID) ,
POLICYUID NUMERIC(10) NOT NULL REFERENCES AUTHPOLICIES (POLICYUID)
);
CREATE TABLE LOGENTRIES
(
TIMESTAMP VARCHAR(50) NOT NULL,
CONTEXT VARCHAR(64) NOT NULL,
MSGLEVEL NUMERIC(10) NOT NULL REFERENCES LOGMESSAGETYPES (MESSAGELEVEL) ,
EXCEPTION VARCHAR(4000),
MESSAGE VARCHAR(2000) NOT NULL,
HOSTNAME VARCHAR(64) NOT NULL,
VMID VARCHAR(64) NOT NULL,
THREAModeShapeME VARCHAR(64) NOT NULL,
VMSEQNUM NUMERIC(7) NOT NULL
);
CREATE TABLE PRODUCTSSESSIONS
(
PRODUCT_UID NUMERIC NOT NULL,
SESSION_UID NUMERIC NOT NULL,
PRIMARY KEY (PRODUCT_UID, SESSION_UID)
);
ALTER TABLE PRODUCTSSESSIONS
ADD CONSTRAINT FK_PRODSESS_PRODS
FOREIGN KEY (PRODUCT_UID)
REFERENCES MM_PRODUCTS (PRODUCT_UID);
ALTER TABLE PRODUCTSSESSIONS
ADD CONSTRAINT FK_PRODSESS_SVCSES
FOREIGN KEY (SESSION_UID)
REFERENCES SERVICESESSIONS (SESSIONUID);
CREATE TABLE RT_VDB_MDLS
(
VDB_UID NUMERIC(10) NOT NULL ,
MDL_UID NUMERIC(10) NOT NULL ,
CNCTR_BNDNG_NM VARCHAR(255)
);
CREATE INDEX AWA_SYS_MSGLEVEL_1E6F845E ON LOGENTRIES (MSGLEVEL);
CREATE UNIQUE INDEX AUTHPERM_UIX ON AUTHPERMISSIONS ( POLICYUID, RESOURCENAME);
CREATE TABLE CS_EXT_FILES (
FILE_UID INTEGER NOT NULL,
CHKSUM NUMERIC(20),
FILE_NAME VARCHAR(255) NOT NULL,
FILE_CONTENTS LONGBLOB,
CONFIG_CONTENTS LONGTEXT,
SEARCH_POS INTEGER,
IS_ENABLED CHAR(1),
FILE_DESC VARCHAR(4000),
CREATED_BY VARCHAR(100),
CREATION_DATE VARCHAR(50),
UPDATED_BY VARCHAR(100),
UPDATE_DATE VARCHAR(50),
FILE_TYPE VARCHAR(30),
CONSTRAINT PK_CS_EXT_FILES PRIMARY KEY (FILE_UID)
)
;
-- ========= STATEMENT 30 ============
ALTER TABLE CS_EXT_FILES ADD CONSTRAINT CSEXFILS_FIL_NA_UK UNIQUE (FILE_NAME);
CREATE TABLE MMSCHEMAINFO_CA
(
SCRIPTNAME VARCHAR(50),
SCRIPTEXECUTEDBY VARCHAR(50),
SCRIPTREV VARCHAR(50),
RELEASEDATE VARCHAR(50),
DATECREATED DATE,
DATEUPDATED DATE,
UPDATEID VARCHAR(50),
METAMATRIXSERVERURL VARCHAR(100)
)
;
CREATE TABLE CS_SYSTEM_PROPS (
PROPERTY_NAME VARCHAR(255),
PROPERTY_VALUE VARCHAR(255)
);
CREATE UNIQUE INDEX SYSPROPS_KEY ON CS_SYSTEM_PROPS (PROPERTY_NAME);
CREATE TABLE CFG_LOCK (
USER_NAME VARCHAR(50) NOT NULL,
DATETIME_ACQUIRED VARCHAR(50) NOT NULL,
DATETIME_EXPIRE VARCHAR(50) NOT NULL,
HOST VARCHAR(100),
LOCK_TYPE NUMERIC (1) );
CREATE TABLE TX_MMXCMDLOG
(REQUESTID VARCHAR(255) NOT NULL,
TXNUID VARCHAR(50) NULL,
CMDPOINT NUMERIC(10) NOT NULL,
SESSIONUID VARCHAR(255) NOT NULL,
APP_NAME VARCHAR(255) NULL,
PRINCIPAL_NA VARCHAR(255) NOT NULL,
VDBNAME VARCHAR(255) NOT NULL,
VDBVERSION VARCHAR(50) NOT NULL,
CREATED_TS VARCHAR(50) NULL,
ENDED_TS VARCHAR(50) NULL,
CMD_STATUS NUMERIC(10) NOT NULL,
SQL_ID NUMERIC(10),
FINL_ROWCNT NUMERIC(10)
)
;
CREATE TABLE TX_SRCCMDLOG
(REQUESTID VARCHAR(255) NOT NULL,
NODEID NUMERIC(10) NOT NULL,
SUBTXNUID VARCHAR(50) NULL,
CMD_STATUS NUMERIC(10) NOT NULL,
MDL_NM VARCHAR(255) NOT NULL,
CNCTRNAME VARCHAR(255) NOT NULL,
CMDPOINT NUMERIC(10) NOT NULL,
SESSIONUID VARCHAR(255) NOT NULL,
PRINCIPAL_NA VARCHAR(255) NOT NULL,
CREATED_TS VARCHAR(50) NULL,
ENDED_TS VARCHAR(50) NULL,
SQL_ID NUMERIC(10) NULL,
FINL_ROWCNT NUMERIC(10) NULL
)
;
CREATE TABLE TX_SQL ( SQL_ID NUMERIC(10) NOT NULL,
SQL_VL TEXT )
;
ALTER TABLE TX_SQL
ADD CONSTRAINT TX_SQL_PK
PRIMARY KEY (SQL_ID)
;
-- ========= STATEMENT 39 ============
--
-- The ITEMS table stores the raw, structure-independent information about the items contained by the Repository. This table is capable of persisting multiple versions of an item.
--
CREATE TABLE MBR_ITEMS
(
ITEM_ID_P1 NUMERIC(20) NOT NULL,
ITEM_ID_P2 NUMERIC(20) NOT NULL,
ITEM_VERSION VARCHAR(80) NOT NULL,
ITEM_NAME VARCHAR(255) NOT NULL,
UPPER_ITEM_NAME VARCHAR(255) NOT NULL,
COMMENT_FLD VARCHAR(2000),
LOCK_HOLDER VARCHAR(100),
LOCK_DATE VARCHAR(50),
CREATED_BY VARCHAR(100) NOT NULL,
CREATION_DATE VARCHAR(50) NOT NULL,
ITEM_TYPE NUMERIC(10) NOT NULL
);
--
-- The ITEM_CONTENTS table stores the contents for items (files) stored in the repository. This table is capable of persisting multiple versions of the contents for an item.
--
CREATE TABLE MBR_ITEM_CONTENTS
(
ITEM_ID_P1 NUMERIC(20) NOT NULL,
ITEM_ID_P2 NUMERIC(20) NOT NULL,
ITEM_VERSION VARCHAR(80) NOT NULL,
ITEM_CONTENT LONGBLOB NOT NULL
);
--
-- The ENTRIES table stores the structure information for all the objects stored in the Repository. This includes both folders and items.
--
CREATE TABLE MBR_ENTRIES
(
ENTRY_ID_P1 NUMERIC(20) NOT NULL,
ENTRY_ID_P2 NUMERIC(20) NOT NULL,
ENTRY_NAME VARCHAR(255) NOT NULL,
UPPER_ENTRY_NAME VARCHAR(255) NOT NULL,
ITEM_ID_P1 NUMERIC(20),
ITEM_ID_P2 NUMERIC(20),
ITEM_VERSION VARCHAR(80),
PARENT_ENTRY_ID_P1 NUMERIC(20),
PARENT_ENTRY_ID_P2 NUMERIC(20),
DELETED NUMERIC(1) NOT NULL
);
--
-- The LABELS table stores the various labels that have been defined.
--
CREATE TABLE MBR_LABELS
(
LABEL_ID_P1 NUMERIC(20) NOT NULL,
LABEL_ID_P2 NUMERIC(20) NOT NULL,
LABEL_FLD VARCHAR(255) NOT NULL,
COMMENT_FLD VARCHAR(2000),
CREATED_BY VARCHAR(100) NOT NULL,
CREATION_DATE VARCHAR(50) NOT NULL
);
--
-- The ITEM_LABELS table maintains the relationships between the ITEMS and the LABELs; that is, the labels that have been applied to each of the item versions. (This is a simple intersect table.)
--
CREATE TABLE MBR_ITEM_LABELS
(
ITEM_ID_P1 NUMERIC(20) NOT NULL,
ITEM_ID_P2 NUMERIC(20) NOT NULL,
ITEM_VERSION VARCHAR(80) NOT NULL,
LABEL_ID_P1 NUMERIC(20) NOT NULL,
LABEL_ID_P2 NUMERIC(20) NOT NULL
);
--
-- The ITEM_LABELS table maintains the relationships between the ITEMS and the LABELs; that is, the labels that have been applied to each of the item versions. (This is a simple intersect table.)
--
CREATE TABLE MBR_FOLDER_LABELS
(
ENTRY_ID_P1 NUMERIC(20) NOT NULL,
ENTRY_ID_P2 NUMERIC(20) NOT NULL,
LABEL_ID_P1 NUMERIC(20) NOT NULL,
LABEL_ID_P2 NUMERIC(20) NOT NULL
);
CREATE TABLE MBR_ITEM_TYPES
(
ITEM_TYPE_CODE NUMERIC(10) NOT NULL,
ITEM_TYPE_NM VARCHAR(20) NOT NULL
);
CREATE TABLE MBR_POLICIES
(
POLICY_NAME VARCHAR(250) NOT NULL,
CREATION_DATE VARCHAR(50),
CHANGE_DATE VARCHAR(50),
GRANTOR VARCHAR(32)
);
CREATE TABLE MBR_POL_PERMS
(
ENTRY_ID_P1 NUMERIC(20) NOT NULL,
ENTRY_ID_P2 NUMERIC(20) NOT NULL,
POLICY_NAME VARCHAR(250) NOT NULL,
CREATE_BIT CHAR(1) NOT NULL,
READ_BIT CHAR(1) NOT NULL,
UPDATE_BIT CHAR(1) NOT NULL,
DELETE_BIT CHAR(1) NOT NULL
);
CREATE TABLE MBR_POL_USERS
(
POLICY_NAME VARCHAR(250) NOT NULL,
USER_NAME VARCHAR(80) NOT NULL
);
CREATE UNIQUE INDEX MBR_ENT_NM_PNT_IX ON MBR_ENTRIES (UPPER_ENTRY_NAME,PARENT_ENTRY_ID_P1,PARENT_ENTRY_ID_P2);
-- ========= STATEMENT 50 ============
CREATE INDEX MBR_ITEMS_ID_IX ON MBR_ITEMS (ITEM_ID_P1,ITEM_ID_P2);
CREATE INDEX MBR_ENT_PARNT_IX ON MBR_ENTRIES (PARENT_ENTRY_ID_P1);
CREATE INDEX MBR_ENT_NM_IX ON MBR_ENTRIES (UPPER_ENTRY_NAME);
ALTER TABLE MBR_ITEMS
ADD CONSTRAINT PK_ITEMS
PRIMARY KEY (ITEM_ID_P1,ITEM_ID_P2,ITEM_VERSION);
ALTER TABLE MBR_ITEM_CONTENTS
ADD CONSTRAINT PK_ITEM_CONTENTS
PRIMARY KEY (ITEM_ID_P1,ITEM_ID_P2,ITEM_VERSION);
ALTER TABLE MBR_ENTRIES
ADD CONSTRAINT PK_ENTRIES
PRIMARY KEY (ENTRY_ID_P1,ENTRY_ID_P2);
ALTER TABLE MBR_LABELS
ADD CONSTRAINT PK_LABELS
PRIMARY KEY (LABEL_ID_P1,LABEL_ID_P2);
ALTER TABLE MBR_ITEM_LABELS
ADD CONSTRAINT PK_ITEM_LABELS
PRIMARY KEY (ITEM_ID_P1,ITEM_ID_P2,ITEM_VERSION,LABEL_ID_P1,LABEL_ID_P2);
ALTER TABLE MBR_FOLDER_LABELS
ADD CONSTRAINT PK_FOLDER_LABELS
PRIMARY KEY (ENTRY_ID_P1,ENTRY_ID_P2,LABEL_ID_P1,LABEL_ID_P2);
ALTER TABLE MBR_POLICIES
ADD CONSTRAINT PK_POLICIES
PRIMARY KEY (POLICY_NAME);
-- ========= STATEMENT 60 ============
ALTER TABLE MBR_POL_PERMS
ADD CONSTRAINT PK_POL_PERMS
PRIMARY KEY (ENTRY_ID_P1,ENTRY_ID_P2,POLICY_NAME);
ALTER TABLE MBR_POL_USERS
ADD CONSTRAINT PK_POL_USERS
PRIMARY KEY (POLICY_NAME,USER_NAME);
-- (generated from DtcBase/ObjectIndex)
CREATE OR REPLACE VIEW MBR_READ_ENTRIES (ENTRY_ID_P1,ENTRY_ID_P2,USER_NAME) AS
SELECT MBR_POL_PERMS.ENTRY_ID_P1, MBR_POL_PERMS.ENTRY_ID_P2,
MBR_POL_USERS.USER_NAME
FROM MBR_POL_PERMS, MBR_POL_USERS , CS_SYSTEM_PROPS
where MBR_POL_PERMS.POLICY_NAME=MBR_POL_USERS.POLICY_NAME
AND (CS_SYSTEM_PROPS.PROPERTY_NAME='metamatrix.authorization.metabase.CheckingEnabled'
AND CS_SYSTEM_PROPS.PROPERTY_VALUE ='true'
AND MBR_POL_PERMS.READ_BIT='1')
UNION ALL
SELECT ENTRY_ID_P1, ENTRY_ID_P2, NULL
FROM MBR_ENTRIES ,CS_SYSTEM_PROPS
WHERE CS_SYSTEM_PROPS.PROPERTY_NAME='metamatrix.authorization.metabase.CheckingEnabled'
AND CS_SYSTEM_PROPS.PROPERTY_VALUE ='false'
;
CREATE INDEX MBR_POL_PERMS_IX1 ON MBR_POL_PERMS (POLICY_NAME, READ_BIT);
CREATE INDEX LOGENTRIES_TMSTMP_IX ON LOGENTRIES (TIMESTAMP);
CREATE TABLE DD_TXN_STATES
(
ID INTEGER NOT NULL,
STATE VARCHAR(128) NOT NULL
);
CREATE TABLE DD_TXN_LOG
(
ID BIGINT NOT NULL,
USER_NME VARCHAR(128),
BEGIN_TXN VARCHAR(50),
END_TXN VARCHAR(50),
ACTION VARCHAR(128),
TXN_STATE INTEGER
);
CREATE TABLE DD_SHREDQUEUE
(
QUEUE_ID NUMERIC(19) NOT NULL,
UUID1 NUMERIC(20) NOT NULL,
UUID2 NUMERIC(20) NOT NULL,
OBJECT_ID VARCHAR(44) NOT NULL,
NAME VARCHAR(128) NOT NULL,
VERSION VARCHAR(20),
MDL_PATH VARCHAR(2000),
CMD_ACTION NUMERIC(1) NOT NULL,
TXN_ID NUMERIC(19) ,
SUB_BY_NME VARCHAR(100),
SUB_BY_DATE VARCHAR(50)
);
CREATE UNIQUE INDEX DDSQ_QUE_IX ON DD_SHREDQUEUE (QUEUE_ID)
;
CREATE UNIQUE INDEX DDSQ_TXN_IX ON DD_SHREDQUEUE (TXN_ID)
;
-- ========= STATEMENT 70 ============
CREATE INDEX DDSQ_UUID_IX ON DD_SHREDQUEUE (OBJECT_ID)
;
-- == new DTC start ==
-- (generated from Models)
CREATE TABLE MMR_MODELS
(
ID BIGINT NOT NULL,
NAME VARCHAR(256),
PATH VARCHAR(1024),
NAMESPACE VARCHAR(1024),
IS_METAMODEL SMALLINT,
VERSION VARCHAR(64),
IS_INCOMPLETE SMALLINT,
SHRED_TIME DATETIME
);
-- (generated from Resources)
CREATE TABLE MMR_RESOURCES
(
MODEL_ID BIGINT NOT NULL,
CONTENT LONGTEXT NOT NULL
);
-- (generated from Objects)
CREATE TABLE MMR_OBJECTS
(
ID BIGINT NOT NULL,
MODEL_ID BIGINT NOT NULL,
NAME VARCHAR(256),
PATH VARCHAR(1024),
CLASS_NAME VARCHAR(256),
UUID VARCHAR(64),
NDX_PATH VARCHAR(256),
IS_UNRESOLVED SMALLINT
);
-- (generated from ResolvedObjects)
CREATE TABLE MMR_RESOLVED_OBJECTS
(
OBJ_ID BIGINT NOT NULL,
MODEL_ID BIGINT NOT NULL,
CLASS_ID BIGINT NOT NULL,
CONTAINER_ID BIGINT
);
-- (generated from ReferenceFeatures)
CREATE TABLE MMR_REF_FEATURES
(
MODEL_ID BIGINT NOT NULL,
OBJ_ID BIGINT NOT NULL,
NDX INT,
DATATYPE_ID BIGINT,
LOWER_BOUND INT,
UPPER_BOUND INT,
IS_CHANGEABLE SMALLINT,
IS_UNSETTABLE SMALLINT,
IS_CONTAINMENT SMALLINT,
OPPOSITE_ID BIGINT
);
-- (generated from AttributeFeatures)
CREATE TABLE MMR_ATTR_FEATURES
(
MODEL_ID BIGINT NOT NULL,
OBJ_ID BIGINT NOT NULL,
NDX INT,
DATATYPE_ID BIGINT,
LOWER_BOUND INT,
UPPER_BOUND INT,
IS_CHANGEABLE SMALLINT,
IS_UNSETTABLE SMALLINT
);
-- (generated from References)
CREATE TABLE MMR_REFS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
TO_ID BIGINT NOT NULL
);
-- (generated from BooleanAttributes)
CREATE TABLE MMR_BOOLEAN_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE SMALLINT NOT NULL
);
-- (generated from ByteAttributes)
CREATE TABLE MMR_BYTE_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE CHAR(1) NOT NULL
);
-- ========= STATEMENT 80 ============
-- (generated from CharAttributes)
CREATE TABLE MMR_CHAR_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE CHAR(1)
);
-- (generated from ClobAttributes)
CREATE TABLE MMR_CLOB_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE LONGTEXT
);
-- (generated from DoubleAttributes)
CREATE TABLE MMR_DOUBLE_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE DOUBLE NOT NULL
);
-- (generated from EnumeratedAttributes)
CREATE TABLE MMR_ENUM_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE INT NOT NULL
);
-- (generated from FloatAttributes)
CREATE TABLE MMR_FLOAT_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE FLOAT NOT NULL
);
-- (generated from IntAttributes)
CREATE TABLE MMR_INT_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE INT NOT NULL
);
-- (generated from LongAttributes)
CREATE TABLE MMR_LONG_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE BIGINT NOT NULL
);
-- (generated from ShortAttributes)
CREATE TABLE MMR_SHORT_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE SMALLINT NOT NULL
);
-- (generated from StringAttributes)
CREATE TABLE MMR_STRING_ATTRS
(
MODEL_ID BIGINT NOT NULL,
OBJECT_ID BIGINT NOT NULL,
FEATURE_ID BIGINT NOT NULL,
NDX INT NOT NULL,
VALUE VARCHAR(4000)
);
-- Index length too long for MMR_MODELS(NAME,PATH)
CREATE INDEX MOD_PATH_NDX ON MMR_MODELS (NAME);
-- ========= STATEMENT 90 ============
CREATE INDEX MOD_PATH2_NDX ON MMR_MODELS (PATH);
CREATE INDEX MOD_NAMESPACE_NDX ON MMR_MODELS (NAMESPACE);
CREATE INDEX OBJ_UUID_NDX ON MMR_OBJECTS (UUID);
CREATE INDEX RES_OBJ_MODEL_NDX ON MMR_RESOLVED_OBJECTS (MODEL_ID);
CREATE INDEX RES_OBJ_CLASS_NDX ON MMR_RESOLVED_OBJECTS (CLASS_ID);
CREATE INDEX RF_DATATYPE_NDX ON MMR_REF_FEATURES (DATATYPE_ID);
CREATE INDEX RF_MODEL_NDX ON MMR_REF_FEATURES (MODEL_ID);
CREATE INDEX AF_DATATYPE_NDX ON MMR_ATTR_FEATURES (DATATYPE_ID);
CREATE INDEX AF_MODEL_NDX ON MMR_ATTR_FEATURES (MODEL_ID);
CREATE INDEX BOL_FEATURE_NDX ON MMR_BOOLEAN_ATTRS (FEATURE_ID);
-- ========= STATEMENT 100 ============
CREATE INDEX BOL_MODEL_NDX ON MMR_BOOLEAN_ATTRS (MODEL_ID);
CREATE INDEX BYT_FEATURE_NDX ON MMR_BYTE_ATTRS (FEATURE_ID);
CREATE INDEX BYT_MODEL_NDX ON MMR_BYTE_ATTRS (MODEL_ID);
CREATE INDEX CHR_FEATURE_NDX ON MMR_CHAR_ATTRS (FEATURE_ID);
CREATE INDEX CHR_MODEL_NDX ON MMR_CHAR_ATTRS (MODEL_ID);
CREATE INDEX CLOB_FEATURE_NDX ON MMR_CLOB_ATTRS (FEATURE_ID);
CREATE INDEX CLOB_MODEL_NDX ON MMR_CLOB_ATTRS (MODEL_ID);
CREATE INDEX DBL_FEATURE_NDX ON MMR_DOUBLE_ATTRS (FEATURE_ID);
CREATE INDEX DBL_MODEL_NDX ON MMR_DOUBLE_ATTRS (MODEL_ID);
CREATE INDEX ENUM_FEATURE_NDX ON MMR_ENUM_ATTRS (FEATURE_ID);
-- ========= STATEMENT 110 ============
CREATE INDEX ENUM_MODEL_NDX ON MMR_ENUM_ATTRS (MODEL_ID);
CREATE INDEX FLT_FEATURE_NDX ON MMR_FLOAT_ATTRS (FEATURE_ID);
CREATE INDEX FLT_MODEL_NDX ON MMR_FLOAT_ATTRS (MODEL_ID);
CREATE INDEX INT_FEATURE_NDX ON MMR_INT_ATTRS (FEATURE_ID);
CREATE INDEX INT_MODEL_NDX ON MMR_INT_ATTRS (MODEL_ID);
CREATE INDEX LNG_FEATURE_NDX ON MMR_LONG_ATTRS (FEATURE_ID);
CREATE INDEX LNG_MODEL_NDX ON MMR_LONG_ATTRS (MODEL_ID);
CREATE INDEX REF_FEATURE_NDX ON MMR_REFS (FEATURE_ID);
CREATE INDEX REF_TO_NDX ON MMR_REFS (TO_ID);
CREATE INDEX REF_MODEL_NDX ON MMR_REFS (MODEL_ID);
-- ========= STATEMENT 120 ============
CREATE INDEX SHR_FEATURE_NDX ON MMR_SHORT_ATTRS (FEATURE_ID);
CREATE INDEX SHR_MODEL_NDX ON MMR_SHORT_ATTRS (MODEL_ID);
CREATE INDEX STR_FEATURE_NDX ON MMR_STRING_ATTRS (FEATURE_ID);
CREATE INDEX STR_MODEL_NDX ON MMR_STRING_ATTRS (MODEL_ID);
ALTER TABLE MMR_MODELS
ADD CONSTRAINT MOD_PK
PRIMARY KEY (ID);
ALTER TABLE MMR_RESOURCES
ADD CONSTRAINT RSRC_PK
PRIMARY KEY (MODEL_ID);
ALTER TABLE MMR_OBJECTS
ADD CONSTRAINT OBJ_PK
PRIMARY KEY (ID);
ALTER TABLE MMR_RESOLVED_OBJECTS
ADD CONSTRAINT RES_OBJ_PK
PRIMARY KEY (OBJ_ID);
ALTER TABLE MMR_REF_FEATURES
ADD CONSTRAINT RF_PK
PRIMARY KEY (OBJ_ID);
ALTER TABLE MMR_ATTR_FEATURES
ADD CONSTRAINT AF_PK
PRIMARY KEY (OBJ_ID);
-- ========= STATEMENT 130 ============
ALTER TABLE MMR_REFS
ADD CONSTRAINT REF_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
ALTER TABLE MMR_BOOLEAN_ATTRS
ADD CONSTRAINT BOL_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
ALTER TABLE MMR_BYTE_ATTRS
ADD CONSTRAINT BYT_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
ALTER TABLE MMR_CHAR_ATTRS
ADD CONSTRAINT CHR_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
ALTER TABLE MMR_CLOB_ATTRS
ADD CONSTRAINT CLOB_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
ALTER TABLE MMR_DOUBLE_ATTRS
ADD CONSTRAINT DBL_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
ALTER TABLE MMR_ENUM_ATTRS
ADD CONSTRAINT ENUM_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
ALTER TABLE MMR_FLOAT_ATTRS
ADD CONSTRAINT FLT_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
ALTER TABLE MMR_INT_ATTRS
ADD CONSTRAINT INT_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
ALTER TABLE MMR_LONG_ATTRS
ADD CONSTRAINT LNG_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
-- ========= STATEMENT 140 ============
ALTER TABLE MMR_SHORT_ATTRS
ADD CONSTRAINT SHR_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
ALTER TABLE MMR_STRING_ATTRS
ADD CONSTRAINT STR_PK
PRIMARY KEY (OBJECT_ID,FEATURE_ID,NDX);
-- View for obtaining the features by metaclass
-- (don't use parenthesis)
CREATE OR REPLACE VIEW MMR_FEATURES AS
SELECT MMR_MODELS.NAMESPACE AS NAMESPACE,
PARENTS.NAME AS CLASS_NAME,
MMR_OBJECTS.NAME AS FEATURE_NAME,
MMR_ATTR_FEATURES.OBJ_ID AS FEATURE_ID,
'Attribute' AS FEATURE_TYPE
FROM MMR_MODELS JOIN MMR_OBJECTS ON MMR_MODELS.ID=MMR_OBJECTS.MODEL_ID
JOIN MMR_ATTR_FEATURES ON MMR_OBJECTS.ID = MMR_ATTR_FEATURES.OBJ_ID
JOIN MMR_RESOLVED_OBJECTS ON MMR_OBJECTS.ID = MMR_RESOLVED_OBJECTS.OBJ_ID
JOIN MMR_OBJECTS PARENTS ON MMR_RESOLVED_OBJECTS.CONTAINER_ID = PARENTS.ID
UNION ALL
SELECT MMR_MODELS.NAMESPACE AS NAMESPACE,
PARENTS.NAME AS CLASS_NAME,
MMR_OBJECTS.NAME AS FEATURE_NAME,
MMR_REF_FEATURES.OBJ_ID AS FEATURE_ID,
'Reference' AS FEATURE_TYPE
FROM MMR_MODELS JOIN MMR_OBJECTS ON MMR_MODELS.ID=MMR_OBJECTS.MODEL_ID
JOIN MMR_REF_FEATURES ON MMR_OBJECTS.ID = MMR_REF_FEATURES.OBJ_ID
JOIN MMR_RESOLVED_OBJECTS ON MMR_OBJECTS.ID = MMR_RESOLVED_OBJECTS.OBJ_ID
JOIN MMR_OBJECTS PARENTS ON MMR_RESOLVED_OBJECTS.CONTAINER_ID = PARENTS.ID
;
-- View for obtaining the feature values
-- (don't use parenthesis)
CREATE OR REPLACE VIEW MMR_FEATURE_VALUES AS
SELECT OBJECT_ID, MODEL_ID, FEATURE_ID, NDX,
VALUE AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
NULL AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
NULL AS INT_VALUE,
NULL AS LONG_VALUE,
NULL AS SHORT_VALUE,
NULL AS STRING_VALUE,
NULL AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_BOOLEAN_ATTRS
UNION ALL
SELECT OBJECT_ID, MODEL_ID, FEATURE_ID, NDX,
NULL AS BOOLEAN_VALUE,
VALUE AS BYTE_VALUE,
NULL AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
NULL AS INT_VALUE,
NULL AS LONG_VALUE,
NULL AS SHORT_VALUE,
NULL AS STRING_VALUE,
NULL AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_BYTE_ATTRS
UNION ALL
SELECT OBJECT_ID, MODEL_ID, FEATURE_ID, NDX,
NULL AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
VALUE AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
NULL AS INT_VALUE,
NULL AS LONG_VALUE,
NULL AS SHORT_VALUE,
NULL AS STRING_VALUE,
NULL AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_CHAR_ATTRS
UNION ALL
SELECT OBJECT_ID, MODEL_ID, FEATURE_ID, NDX,
NULL AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
NULL AS CHAR_VALUE,
VALUE AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
NULL AS INT_VALUE,
NULL AS LONG_VALUE,
NULL AS SHORT_VALUE,
NULL AS STRING_VALUE,
NULL AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_DOUBLE_ATTRS
UNION ALL
SELECT OBJECT_ID, MODEL_ID, FEATURE_ID, NDX,
NULL AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
NULL AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
VALUE AS FLOAT_VALUE,
NULL AS INT_VALUE,
NULL AS LONG_VALUE,
NULL AS SHORT_VALUE,
NULL AS STRING_VALUE,
NULL AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_FLOAT_ATTRS
UNION ALL
SELECT OBJECT_ID, MODEL_ID, FEATURE_ID, NDX,
NULL AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
NULL AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
VALUE AS INT_VALUE,
NULL AS LONG_VALUE,
NULL AS SHORT_VALUE,
NULL AS STRING_VALUE,
NULL AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_INT_ATTRS
UNION ALL
SELECT OBJECT_ID, MODEL_ID, FEATURE_ID, NDX,
NULL AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
NULL AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
NULL AS INT_VALUE,
VALUE AS LONG_VALUE,
NULL AS SHORT_VALUE,
NULL AS STRING_VALUE,
NULL AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_LONG_ATTRS
UNION ALL
SELECT OBJECT_ID, MODEL_ID, FEATURE_ID, NDX,
NULL AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
NULL AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
NULL AS INT_VALUE,
NULL AS LONG_VALUE,
VALUE AS SHORT_VALUE,
NULL AS STRING_VALUE,
NULL AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_SHORT_ATTRS
UNION ALL
SELECT OBJECT_ID, MODEL_ID, FEATURE_ID, NDX,
NULL AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
NULL AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
NULL AS INT_VALUE,
NULL AS LONG_VALUE,
NULL AS SHORT_VALUE,
VALUE AS STRING_VALUE,
NULL AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_STRING_ATTRS
UNION ALL
SELECT OBJECT_ID, MODEL_ID, FEATURE_ID, NDX,
NULL AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
NULL AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
NULL AS INT_VALUE,
NULL AS LONG_VALUE,
NULL AS SHORT_VALUE,0
NULL AS STRING_VALUE,
VALUE AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_CLOB_ATTRS
UNION ALL
SELECT MMR_ENUM_ATTRS.OBJECT_ID, MMR_ENUM_ATTRS.MODEL_ID, MMR_ENUM_ATTRS.FEATURE_ID, MMR_ENUM_ATTRS.NDX,
NULL AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
NULL AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
NULL AS INT_VALUE,
NULL AS LONG_VALUE,
NULL AS SHORT_VALUE,
NULL AS STRING_VALUE,
NULL AS CLOB_VALUE,
MMR_OBJECTS.ID AS ENUM_ID,
MMR_REFS.NDX AS ENUM_VALUE,
MMR_OBJECTS.NAME AS ENUM_NAME,
NULL AS REF_OBJ_ID,
NULL AS REF_OBJ_NAME
FROM MMR_ENUM_ATTRS JOIN MMR_OBJECTS ON MMR_ENUM_ATTRS.VALUE = MMR_OBJECTS.ID
JOIN MMR_RESOLVED_OBJECTS ON MMR_OBJECTS.ID = MMR_RESOLVED_OBJECTS.OBJ_ID
JOIN MMR_REFS ON MMR_RESOLVED_OBJECTS.CONTAINER_ID = MMR_REFS.OBJECT_ID
AND MMR_RESOLVED_OBJECTS.OBJ_ID = MMR_REFS.TO_ID
UNION ALL
SELECT OBJECT_ID, MMR_REFS.MODEL_ID AS MODEL_ID, FEATURE_ID, NDX,
NULL AS BOOLEAN_VALUE,
NULL AS BYTE_VALUE,
NULL AS CHAR_VALUE,
NULL AS DOUBLE_VALUE,
NULL AS FLOAT_VALUE,
NULL AS INT_VALUE,
NULL AS LONG_VALUE,
NULL AS SHORT_VALUE,
NULL AS STRING_VALUE,
NULL AS CLOB_VALUE,
NULL AS ENUM_ID,
NULL AS ENUM_VALUE,
NULL AS ENUM_NAME,
MMR_OBJECTS.ID AS REF_OBJ_ID,
MMR_OBJECTS.NAME AS REF_OBJ_NAME
FROM MMR_REFS JOIN MMR_OBJECTS ON MMR_REFS.TO_ID = MMR_OBJECTS.ID;
-- == new DTC end ==
INSERT INTO MMSCHEMAINFO_CA (SCRIPTNAME,SCRIPTEXECUTEDBY,SCRIPTREV,
RELEASEDATE, DATECREATED,DATEUPDATED, UPDATEID,METAMATRIXSERVERURL)
SELECT 'MM_CREATE.SQL',USER(),'Seneca.3117', '10/03/2008 12:01 AM',SYSDATE(),SYSDATE(),'','';
-- ========= STATEMENT 145 ============

View File

@ -0,0 +1,430 @@
--
-- BUILD SCRIPT
-- RDBMS: MYSQL 5.0
--
RENAME TABLE blue_table TO red_table,
orange_table TO green_table,
black_table TO white_table;
RENAME DATABASE blue_db TO red_db;
RENAME SCHEMA blue_schema TO red_schema;
CREATE TABLE RT_VDB_MDLS
(
VDB_UID NUMERIC(10) NOT NULL ,
MDL_UID NUMERIC(10) NOT NULL ,
CNCTR_BNDNG_NM VARCHAR(255)
);
CREATE INDEX AWA_SYS_MSGLEVEL_1E6F845E ON LOGENTRIES (MSGLEVEL);
CREATE UNIQUE INDEX AUTHPERM_UIX ON AUTHPERMISSIONS ( POLICYUID, RESOURCENAME);
CREATE TABLE CS_EXT_FILES (
FILE_UID INTEGER NOT NULL,
CHKSUM NUMERIC(20),
FILE_NAME VARCHAR(255) NOT NULL,
FILE_CONTENTS LONGBLOB,
CONFIG_CONTENTS LONGTEXT,
SEARCH_POS INTEGER,
IS_ENABLED CHAR(1),
FILE_DESC VARCHAR(4000),
CONSTRAINT PK_CS_EXT_FILES PRIMARY KEY (FILE_UID)
)
;
ALTER TABLE CS_EXT_FILES ADD CONSTRAINT CSEXFILS_FIL_NA_UK UNIQUE (FILE_NAME);
CREATE TABLE MMSCHEMAINFO_CA
(
SCRIPTNAME VARCHAR(50),
SCRIPTEXECUTEDBY VARCHAR(50),
SCRIPTREV VARCHAR(50),
RELEASEDATE VARCHAR(50),
DATECREATED DATE,
DATEUPDATED DATE,
UPDATEID VARCHAR(50),
METAMATRIXSERVERURL VARCHAR(100)
)
;
CREATE INDEX MOD_PATH_NDX ON MMR_MODELS (NAME);
-- ============ 10 STATEMENTS ====================
ALTER TABLE MMR_MODELS
ADD CONSTRAINT MOD_PK
PRIMARY KEY (ID);
ALTER TABLE MMR_RESOURCES
ADD CONSTRAINT RSRC_PK
PRIMARY KEY (MODEL_ID);
-- View for obtaining the features by metaclass
-- (don't use parenthesis)
CREATE OR REPLACE VIEW MMR_FEATURES AS
SELECT MMR_MODELS.NAMESPACE AS NAMESPACE,
PARENTS.NAME AS CLASS_NAME,
MMR_OBJECTS.NAME AS FEATURE_NAME,
MMR_ATTR_FEATURES.OBJ_ID AS FEATURE_ID,
'Attribute' AS FEATURE_TYPE
FROM MMR_MODELS JOIN MMR_OBJECTS ON MMR_MODELS.ID=MMR_OBJECTS.MODEL_ID
JOIN MMR_ATTR_FEATURES ON MMR_OBJECTS.ID = MMR_ATTR_FEATURES.OBJ_ID
JOIN MMR_RESOLVED_OBJECTS ON MMR_OBJECTS.ID = MMR_RESOLVED_OBJECTS.OBJ_ID
JOIN MMR_OBJECTS PARENTS ON MMR_RESOLVED_OBJECTS.CONTAINER_ID = PARENTS.ID
UNION ALL
SELECT MMR_MODELS.NAMESPACE AS NAMESPACE,
PARENTS.NAME AS CLASS_NAME,
MMR_OBJECTS.NAME AS FEATURE_NAME,
MMR_REF_FEATURES.OBJ_ID AS FEATURE_ID,
'Reference' AS FEATURE_TYPE
FROM MMR_MODELS JOIN MMR_OBJECTS ON MMR_MODELS.ID=MMR_OBJECTS.MODEL_ID
JOIN MMR_REF_FEATURES ON MMR_OBJECTS.ID = MMR_REF_FEATURES.OBJ_ID
JOIN MMR_RESOLVED_OBJECTS ON MMR_OBJECTS.ID = MMR_RESOLVED_OBJECTS.OBJ_ID
JOIN MMR_OBJECTS PARENTS ON MMR_RESOLVED_OBJECTS.CONTAINER_ID = PARENTS.ID
;
INSERT INTO MMSCHEMAINFO_CA (SCRIPTNAME,SCRIPTEXECUTEDBY,SCRIPTREV,
RELEASEDATE, DATECREATED,DATEUPDATED, UPDATEID,METAMATRIXSERVERURL)
SELECT 'MM_CREATE.SQL',USER(),'Seneca.3117', '10/03/2008 12:01 AM',SYSDATE(),SYSDATE(),'','';
ALTER TABLE t MODIFY latin1_text_col TEXT CHARACTER SET utf8;
ALTER TABLE t MODIFY latin1_varchar_col VARCHAR(M) CHARACTER SET utf8;
ALTER TABLE t1 CHANGE c1 c1 BLOB;
ALTER TABLE t1 CHANGE c1 c1 TEXT CHARACTER SET utf8;
ALTER TABLE t1
PARTITION BY HASH(id)
PARTITIONS 8;
CREATE TABLE t1 (
id INT,
year_col INT
)
PARTITION BY RANGE (year_col) (
PARTITION p0 VALUES LESS THAN (1991),
PARTITION p1 VALUES LESS THAN (1995),
PARTITION p2 VALUES LESS THAN (1999)
);
CREATE TABLE t2 (
name VARCHAR (30),
started DATE,
a BIT,
b VARCHAR(20) NOT NULL
)
PARTITION BY HASH( YEAR(started) )
PARTITIONS 6;
ALTER TABLE t1 DROP PARTITION p0, p1;
-- ============ 20 STATEMENTS ====================
ALTER TABLE t1 RENAME t2;
ALTER TABLE t2 MODIFY a TINYINT NOT NULL, CHANGE b c CHAR(20);
ALTER TABLE t2 ADD d TIMESTAMP;
ALTER TABLE t2 ADD INDEX (d), ADD UNIQUE (a);
ALTER TABLE t2 DROP COLUMN c;
ALTER TABLE t2 ADD c INT UNSIGNED NOT NULL AUTO_INCREMENT,
ADD PRIMARY KEY (c);
ALTER TABLE t1 TABLESPACE ts_1 STORAGE DISK;
ALTER TABLE t2 STORAGE DISK;
ALTER TABLE t3 MODIFY c2 INT STORAGE MEMORY;
ALTER TABLE T2 ADD id INT AUTO_INCREMENT PRIMARY KEY;
-- ============ 30 STATEMENTS ====================
ALTER DATABASE `#mysql50#a-b-c` UPGRADE DATA DIRECTORY NAME;
CREATE EVENT myevent
ON SCHEDULE
EVERY 6 HOUR
COMMENT 'A sample comment.'
DO
UPDATE myschema.mytable SET mycol = mycol + 1;
--ALTER
-- [DEFINER = { user | CURRENT_USER }]
-- EVENT event_name
-- [ON SCHEDULE schedule]
-- [ON COMPLETION [NOT] PRESERVE]
-- [RENAME TO new_event_name]
-- [ENABLE | DISABLE | DISABLE ON SLAVE]
-- [COMMENT 'comment']
-- [DO sql_statement]
ALTER EVENT myevent
ON SCHEDULE
EVERY 12 HOUR
STARTS CURRENT_TIMESTAMP + INTERVAL 4 HOUR;
ALTER TABLE myevent
ON SCHEDULE
AT CURRENT_TIMESTAMP + INTERVAL 1 DAY
DO
TRUNCATE TABLE myschema.mytable;
ALTER EVENT myevent
DISABLE;
ALTER EVENT myevent
RENAME TO yourevent;
ALTER EVENT olddb.myevent
RENAME TO newdb.myevent;
--ALTER LOGFILE GROUP logfile_group
-- ADD UNDOFILE 'file_name'
-- [INITIAL_SIZE [=] size]
-- [WAIT]
-- ENGINE [=] engine_name
ALTER LOGFILE GROUP lg_3
ADD UNDOFILE 'undo_10.dat'
INITIAL_SIZE=32M
ENGINE=NDBCLUSTER;
--ALTER FUNCTION func_name [characteristic ...]
--
--characteristic:
-- { CONTAINS SQL | NO SQL | READS SQL DATA | MODIFIES SQL DATA }
-- | SQL SECURITY { DEFINER | INVOKER }
-- | COMMENT 'string'
ALTER FUNCTION break_wind MODIFIES SQL DATA;
ALTER FUNCTION break_wind SQL SECURITY INVOKER;
-- ============ 40 STATEMENTS ====================
ALTER FUNCTION break_wind COMMENT 'no more wind please';
ALTER PROCEDURE fall_back MODIFIES SQL DATA;
ALTER PROCEDURE fall_back SQL SECURITY INVOKER;
ALTER PROCEDURE fall_back COMMENT 'no more wind please';
ALTER SERVER s OPTIONS (USER 'sally');
--ALTER TABLESPACE tablespace_name
-- {ADD|DROP} DATAFILE 'file_name'
-- [INITIAL_SIZE [=] size]
-- [WAIT]
-- ENGINE [=] engine_name
ALTER TABLESPACE tspace_name ADD DATAFILE 'file_name'INITIAL_SIZE = 9999 WAIT;
--ALTER
-- [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}]
-- [DEFINER = { user | CURRENT_USER }]
-- [SQL SECURITY { DEFINER | INVOKER }]
-- VIEW view_name [(column_list)]
-- AS select_statement
-- [WITH [CASCADED | LOCAL] CHECK OPTION]
ALTER VIEW great_view (c1, c2) AS SELECT * FROM table_a;
ALTER VIEW great_view (c1, c2) AS SELECT * FROM table_a WITH LOCAL CHECK OPTION;
ALTER VIEW ALGORITHM = MERGE great_view AS SELECT * FROM table_a;
ALTER VIEW DEFINER = 'joe'@'there.com' great_view AS SELECT * FROM table_a;
-- ============ 50 STATEMENTS ====================
ALTER VIEW SQL SECURITY INVOKER great_view AS SELECT * FROM table_a;
ALTER VIEW ALGORITHM = MERGE DEFINER = 'joe'@'there.com' SQL SECURITY INVOKER great_view AS SELECT * FROM table_a;
--CREATE {DATABASE | SCHEMA} [IF NOT EXISTS] db_name
-- [create_specification] ...
--
--create_specification:
-- [DEFAULT] CHARACTER SET [=] charset_name
-- | [DEFAULT] COLLATE [=] collation_name
CREATE DATABASE db_1;
CREATE DATABASE db_2 DEFAULT CHARACTER SET = utf8;
CREATE DATABASE db_3 CHARACTER SET utf10;
CREATE DATABASE IF NOT EXISTS db_4 DEFAULT CHARACTER SET = utf8;
CREATE SCHEMA schema_1;
CREATE SCHEMA schema_2 DEFAULT CHARACTER SET = utf8;
CREATE SCHEMA schema_3 CHARACTER SET utf10;
CREATE SCHEMA IF NOT EXISTS schema_4 DEFAULT CHARACTER SET = utf8;
-- ============ 60 STATEMENTS ====================
CREATE TABLE lookup (id INT) ENGINE = MEMORY;
CREATE INDEX id_index USING BTREE ON lookup (id);
-- CREATE [ONLINE|OFFLINE] [UNIQUE|FULLTEXT|SPATIAL] INDEX index_name
CREATE ONLINE INDEX index_1;
CREATE OFFLINE INDEX index_2;
CREATE ONLINE UNIQUE INDEX index_3;
CREATE OFFLINE FULLTEXT INDEX index_4;
CREATE UNIQUE INDEX index_5;
CREATE FULLTEXT INDEX index_6;
CREATE SPATIAL INDEX index_7;
--CREATE LOGFILE GROUP lf_group_name
-- ADD UNDOFILE 'undo_file'
-- [INITIAL_SIZE [=] initial_size]
-- [UNDO_BUFFER_SIZE [=] undo_buffer_size]
-- [REDO_BUFFER_SIZE [=] redo_buffer_size]
-- [NODEGROUP [=] nodegroup_id]
-- [WAIT]
-- [COMMENT [=] comment_text]
-- ENGINE [=] engine_name
CREATE LOGFILE GROUP lf_group_name_1 ADD UNDOFILE 'my_undo_file'
ENGINE some_engine_name;
-- ============ 70 STATEMENTS ====================
CREATE LOGFILE GROUP lf_group_name_2 ADD UNDOFILE 'my_undo_file'
INITIAL_SIZE = 9999 WAIT COMMENT = 'some bogus comment'
ENGINE some_engine_name;
CREATE DEFINER = 'admin'@'localhost' PROCEDURE account_count()
BEGIN
SELECT 'Number of accounts:', COUNT(*) FROM mysql.user;
END;
CREATE DEFINER = 'admin'@'localhost' FUNCTION account_count()
SQL SECURITY INVOKER
BEGIN
SELECT 'Number of accounts:', COUNT(*) FROM mysql.user;
END;
CREATE SERVER server_1
FOREIGN DATA WRAPPER mysql
OPTIONS (USER 'Remote', HOST '192.168.1.106', DATABASE 'test');
--CREATE TABLESPACE tablespace_name
-- ADD DATAFILE 'file_name'
-- USE LOGFILE GROUP logfile_group
-- [EXTENT_SIZE [=] extent_size]
-- [INITIAL_SIZE [=] initial_size]
-- [AUTOEXTEND_SIZE [=] autoextend_size]
-- [MAX_SIZE [=] max_size]
-- [NODEGROUP [=] nodegroup_id]
-- [WAIT]
-- [COMMENT [=] comment_text]
-- ENGINE [=] engine_name
CREATE TABLESPACE tbl_space_1 ADD DATAFILE 'my_data_file' USER LOGFILE GROUP my_lf_group
ENGINE = my_engine_1;
--CREATE
-- [DEFINER = { user | CURRENT_USER }]
-- TRIGGER trigger_name trigger_time trigger_event
-- ON tbl_name FOR EACH ROW trigger_stmt
--
-- trigger_time: BEFORE | AFTER
-- trigger_event: INSERT | UPDATE | DELETE |
CREATE TRIGGER testref BEFORE INSERT ON test1
FOR EACH ROW BEGIN
INSERT INTO test2 SET a2 = NEW.a1;
DELETE FROM test3 WHERE a3 = NEW.a1;
UPDATE test4 SET b4 = b4 + 1 WHERE a4 = NEW.a1;
END;
CREATE DEFINER = 'user'@'hostname' TRIGGER my_trigger_1 INSERT ON test1
FOR EACH ROW BEGIN
INSERT INTO test2 SET a2 = NEW.a1;
DELETE FROM test3 WHERE a3 = NEW.a1;
UPDATE test4 SET b4 = b4 + 1 WHERE a4 = NEW.a1;
END;
--CREATE
-- [OR REPLACE]
-- [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}]
-- [DEFINER = { user | CURRENT_USER }]
-- [SQL SECURITY { DEFINER | INVOKER }]
-- VIEW view_name [(column_list)]
-- AS select_statement
-- [WITH [CASCADED | LOCAL] CHECK OPTION]
CREATE VIEW my_view_1 AS SELECT * FROM table_a;
CREATE ALGORITHM = UNDEFINED VIEW my_view_2 AS SELECT * FROM table_a;
CREATE ALGORITHM = UNDEFINED DEFINER = CURRENT_USER VIEW my_view_3 AS SELECT * FROM table_a;
-- ============ 80 STATEMENTS ====================
CREATE DEFINER = CURRENT_USER SQL SECURITY INVOKER VIEW my_view_4 AS SELECT * FROM table_a;
DROP DATABASE db_name_1;
DROP DATABASE IF EXISTS db_name_2;
DROP SCHEMA schema_name_1;
DROP SCHEMA IF EXISTS schema_name_2;
DROP EVENT my_event_1;
DROP EVENT IF EXISTS my_event_2;
DROP PROCEDURE my_proc_1;
DROP PROCEDURE IF EXISTS my_proc_2;
DROP FUNCTION my_funct_1;
-- ============ 90 STATEMENTS ====================
DROP FUNCTION IF EXISTS my_funct_2;
DROP SERVER my_server_1;
DROP SERVER IF EXISTS my_server_2;
--DROP [TEMPORARY] TABLE [IF EXISTS]
-- tbl_name [, tbl_name] ...
-- [RESTRICT | CASCADE]
DROP TABLE table_1;
DROP TEMPORARY TABLE table_2 CASCADE;
DROP TEMPORARY TABLE IF EXISTS table_3;
DROP TABLE IF EXISTS table_4, table_5, table_6;
DROP TABLE IF EXISTS table_7, table_8 RESTRICT;
--DROP TABLESPACE tablespace_name
-- ENGINE [=] engine_name
DROP TABLESPACE my_tbl_space_1 ENGINE = my_eng;
DROP TABLESPACE my_tbl_space_2 ENGINE my_eng;
-- ============ 100 STATEMENTS ====================
DROP TRIGGER my_schema_1.blue_trigger;
DROP TRIGGER IF EXISTS my_schema_2.red_trigger;
DROP VIEW view_1;
DROP VIEW IF EXISTS view_2 CASCADE;
DROP VIEW IF EXISTS view_3, view_4, view_5;
DROP VIEW IF EXISTS view_6, view_7 RESTRICT;
-- ============ 106 STATEMENTS ====================