Search in sources :

Example 1 with TopicConfigs

use of io.connect.scylladb.topictotable.TopicConfigs in project kafka-connect-scylladb by scylladb.

the class ScyllaDbSinkTaskHelper method getBoundStatementForRecord.

public BoundStatement getBoundStatementForRecord(SinkRecord record) {
    final String tableName = record.topic().replaceAll("\\.", "_").replaceAll("-", "_");
    BoundStatement boundStatement = null;
    TopicConfigs topicConfigs = null;
    if (scyllaDbSinkConnectorConfig.topicWiseConfigs.containsKey(tableName)) {
        topicConfigs = scyllaDbSinkConnectorConfig.topicWiseConfigs.get(tableName);
        if (topicConfigs.getMappingStringForTopic() != null && !topicConfigs.isScyllaColumnsMapped()) {
            topicConfigs.setTablePartitionAndColumnValues(record);
        }
        topicConfigs.setTtlAndTimeStampIfAvailable(record);
    }
    if (null == record.value()) {
        boolean deletionEnabled = topicConfigs != null ? topicConfigs.isDeletesEnabled() : scyllaDbSinkConnectorConfig.deletesEnabled;
        if (deletionEnabled) {
            if (this.session.tableExists(tableName)) {
                final RecordToBoundStatementConverter boundStatementConverter = this.session.delete(tableName);
                final RecordToBoundStatementConverter.State state = boundStatementConverter.convert(record, null, ScyllaDbConstants.DELETE_OPERATION);
                Preconditions.checkState(state.parameters > 0, "key must contain the columns in the primary key.");
                boundStatement = state.statement;
            } else {
                log.warn("put() - table '{}' does not exist. Skipping delete.", tableName);
            }
        } else {
            throw new DataException(String.format("Record with null value found for the key '%s'. If you are trying to delete the record set " + "scylladb.deletes.enabled = true or topic.my_topic.my_ks.my_table.deletesEnabled = true in " + "your connector configuration.", record.key()));
        }
    } else {
        this.session.createOrAlterTable(tableName, record, topicConfigs);
        final RecordToBoundStatementConverter boundStatementConverter = this.session.insert(tableName, topicConfigs);
        final RecordToBoundStatementConverter.State state = boundStatementConverter.convert(record, topicConfigs, ScyllaDbConstants.INSERT_OPERATION);
        boundStatement = state.statement;
    }
    if (topicConfigs != null) {
        log.trace("Topic mapped Consistency level : " + topicConfigs.getConsistencyLevel() + ", Record/Topic mapped timestamp : " + topicConfigs.getTimeStamp());
        boundStatement.setConsistencyLevel(topicConfigs.getConsistencyLevel());
        boundStatement.setDefaultTimestamp(topicConfigs.getTimeStamp());
    } else {
        boundStatement.setConsistencyLevel(this.scyllaDbSinkConnectorConfig.consistencyLevel);
        // Timestamps in Kafka (record.timestamp()) are in millisecond precision,
        // while Scylla expects a microsecond precision: 1 ms = 1000 us.
        boundStatement.setDefaultTimestamp(record.timestamp() * 1000);
    }
    return boundStatement;
}
Also used : DataException(org.apache.kafka.connect.errors.DataException) TopicConfigs(io.connect.scylladb.topictotable.TopicConfigs) BoundStatement(com.datastax.driver.core.BoundStatement)

Example 2 with TopicConfigs

use of io.connect.scylladb.topictotable.TopicConfigs in project kafka-connect-scylladb by scylladb.

the class ScyllaDbSchemaBuilder method alter.

void alter(final ScyllaDbSchemaKey key, String tableName, SinkRecord record, TableMetadata.Table tableMetadata, TopicConfigs topicConfigs) {
    Preconditions.checkNotNull(tableMetadata, "tableMetadata cannot be null.");
    Preconditions.checkNotNull(record.valueSchema(), "valueSchema cannot be null.");
    log.trace("alter() - tableMetadata = '{}' ", tableMetadata);
    Map<String, DataType> addedColumns = new LinkedHashMap<>();
    if (topicConfigs != null && topicConfigs.isScyllaColumnsMapped()) {
        if (topicConfigs.getTablePartitionKeyMap().keySet().size() != tableMetadata.primaryKey().size()) {
            throw new DataException(String.format("Cannot alter primary key of a ScyllaDb Table. Existing primary key: '%s', " + "Primary key mapped in 'topic.my_topic.my_ks.my_table.mapping' config: '%s", Joiner.on("', '").join(tableMetadata.primaryKey()), Joiner.on("', '").join(topicConfigs.getTablePartitionKeyMap().keySet())));
        }
        for (Map.Entry<String, TopicConfigs.KafkaScyllaColumnMapper> entry : topicConfigs.getTableColumnMap().entrySet()) {
            String columnName = entry.getValue().getScyllaColumnName();
            log.trace("alter for mapping() - Checking if table has '{}' column.", columnName);
            final TableMetadata.Column columnMetadata = tableMetadata.columnMetadata(columnName);
            if (null == columnMetadata) {
                log.debug("alter for mapping() - Adding column '{}'", columnName);
                final DataType dataType = dataType(entry.getValue().getKafkaRecordField().schema());
                addedColumns.put(Metadata.quoteIfNecessary(columnName), dataType);
            } else {
                log.trace("alter for mapping() - Table already has '{}' column.", columnName);
            }
        }
    } else {
        for (final Field field : record.valueSchema().fields()) {
            log.trace("alter() - Checking if table has '{}' column.", field.name());
            final TableMetadata.Column columnMetadata = tableMetadata.columnMetadata(field.name());
            if (null == columnMetadata) {
                log.debug("alter() - Adding column '{}'", field.name());
                DataType dataType = dataType(field.schema());
                addedColumns.put(Metadata.quoteIfNecessary(field.name()), dataType);
            } else {
                log.trace("alter() - Table already has '{}' column.", field.name());
            }
        }
    }
    if (!addedColumns.isEmpty()) {
        final Alter alterTable = SchemaBuilder.alterTable(this.config.keyspace, tableName);
        if (!this.config.tableManageEnabled) {
            List<String> requiredAlterStatements = addedColumns.entrySet().stream().map(e -> alterTable.addColumn(e.getKey()).type(e.getValue()).toString()).collect(Collectors.toList());
            throw new DataException(String.format("Alter statement(s) needed. Missing column(s): '%s'\n%s;", Joiner.on("', '").join(addedColumns.keySet()), Joiner.on(';').join(requiredAlterStatements)));
        } else {
            String query = alterTable.withOptions().compressionOptions(config.tableCompressionAlgorithm).buildInternal();
            this.session.executeQuery(query);
            for (Map.Entry<String, DataType> e : addedColumns.entrySet()) {
                final String columnName = e.getKey();
                final DataType dataType = e.getValue();
                final Statement alterStatement = alterTable.addColumn(columnName).type(dataType);
                this.session.executeStatement(alterStatement);
            }
            this.session.onTableChanged(this.config.keyspace, tableName);
        }
    }
    this.schemaLookup.put(key, DEFAULT);
}
Also used : Date(org.apache.kafka.connect.data.Date) DataException(org.apache.kafka.connect.errors.DataException) LoggerFactory(org.slf4j.LoggerFactory) SchemaChangeListenerBase(com.datastax.driver.core.SchemaChangeListenerBase) HashSet(java.util.HashSet) LinkedHashMap(java.util.LinkedHashMap) Strings(com.google.common.base.Strings) Map(java.util.Map) Alter(com.datastax.driver.core.schemabuilder.Alter) org.apache.kafka.connect.data(org.apache.kafka.connect.data) Create(com.datastax.driver.core.schemabuilder.Create) TopicConfig(org.apache.kafka.common.config.TopicConfig) SchemaBuilder(com.datastax.driver.core.schemabuilder.SchemaBuilder) TopicConfigs(io.connect.scylladb.topictotable.TopicConfigs) Logger(org.slf4j.Logger) MoreObjects(com.google.common.base.MoreObjects) Set(java.util.Set) TableOptions(com.datastax.driver.core.schemabuilder.TableOptions) ComparisonChain(com.google.common.collect.ComparisonChain) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) TimeUnit(java.util.concurrent.TimeUnit) Metadata(com.datastax.driver.core.Metadata) List(java.util.List) DataType(com.datastax.driver.core.DataType) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) Preconditions(com.google.common.base.Preconditions) CacheBuilder(com.google.common.cache.CacheBuilder) Cache(com.google.common.cache.Cache) Statement(com.datastax.driver.core.Statement) Joiner(com.google.common.base.Joiner) Statement(com.datastax.driver.core.Statement) LinkedHashMap(java.util.LinkedHashMap) Alter(com.datastax.driver.core.schemabuilder.Alter) DataException(org.apache.kafka.connect.errors.DataException) DataType(com.datastax.driver.core.DataType) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map)

Example 3 with TopicConfigs

use of io.connect.scylladb.topictotable.TopicConfigs in project kafka-connect-scylladb by scylladb.

the class RecordConverter method convert.

public T convert(SinkRecord record, TopicConfigs topicConfigs, String operationType) {
    Object recordObject = ScyllaDbConstants.DELETE_OPERATION.equals(operationType) ? record.key() : record.value();
    T result = this.newValue();
    Map<String, TopicConfigs.KafkaScyllaColumnMapper> columnDetailsMap = null;
    Preconditions.checkNotNull(recordObject, (ScyllaDbConstants.DELETE_OPERATION.equals(operationType) ? "key " : "value ") + "cannot be null.");
    if (topicConfigs != null && topicConfigs.isScyllaColumnsMapped()) {
        columnDetailsMap = topicConfigs.getTableColumnMap();
        Preconditions.checkNotNull(record.key(), "key cannot be null.");
        findRecordTypeAndConvert(result, record.key(), topicConfigs.getTablePartitionKeyMap());
        for (Header header : record.headers()) {
            if (topicConfigs.getTableColumnMap().containsKey(header.key())) {
                TopicConfigs.KafkaScyllaColumnMapper headerKafkaScyllaColumnMapper = topicConfigs.getTableColumnMap().get(header.key());
                parseStructAndSetInStatement(result, header.schema(), headerKafkaScyllaColumnMapper.getKafkaRecordField(), header.value(), headerKafkaScyllaColumnMapper.getScyllaColumnName());
            }
        }
    }
    findRecordTypeAndConvert(result, recordObject, columnDetailsMap);
    return result;
}
Also used : Header(org.apache.kafka.connect.header.Header) TopicConfigs(io.connect.scylladb.topictotable.TopicConfigs)

Aggregations

TopicConfigs (io.connect.scylladb.topictotable.TopicConfigs)3 DataException (org.apache.kafka.connect.errors.DataException)2 BoundStatement (com.datastax.driver.core.BoundStatement)1 DataType (com.datastax.driver.core.DataType)1 Metadata (com.datastax.driver.core.Metadata)1 SchemaChangeListenerBase (com.datastax.driver.core.SchemaChangeListenerBase)1 Statement (com.datastax.driver.core.Statement)1 Alter (com.datastax.driver.core.schemabuilder.Alter)1 Create (com.datastax.driver.core.schemabuilder.Create)1 SchemaBuilder (com.datastax.driver.core.schemabuilder.SchemaBuilder)1 TableOptions (com.datastax.driver.core.schemabuilder.TableOptions)1 Joiner (com.google.common.base.Joiner)1 MoreObjects (com.google.common.base.MoreObjects)1 Preconditions (com.google.common.base.Preconditions)1 Strings (com.google.common.base.Strings)1 Cache (com.google.common.cache.Cache)1 CacheBuilder (com.google.common.cache.CacheBuilder)1 ComparisonChain (com.google.common.collect.ComparisonChain)1 HashSet (java.util.HashSet)1 LinkedHashMap (java.util.LinkedHashMap)1