Search in sources :

Example 11 with EventCreationException

use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.

the class OracleAvroGenericEventFactory method getJdbcArray.

private static Array getJdbcArray(ResultSet rs, Schema schema) throws EventCreationException {
    // fails if schema isn't for array type
    Schema elementSchema = schema.getElementType();
    String dbFieldName = SchemaHelper.getMetaField(elementSchema, "dbFieldName");
    if (dbFieldName == null) {
        throw new EventCreationException("array field is missing required metadata dbFieldName. " + schema.getName());
    }
    Array array;
    try {
        array = rs.getArray(dbFieldName);
    } catch (SQLException e) {
        throw new EventCreationException("unable to read array field: " + dbFieldName + ": " + e.getMessage(), e);
    }
    return array;
}
Also used : GenericArray(org.apache.avro.generic.GenericArray) Array(java.sql.Array) SQLException(java.sql.SQLException) EventCreationException(com.linkedin.databus2.producers.EventCreationException) Schema(org.apache.avro.Schema)

Example 12 with EventCreationException

use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.

the class OpenReplicatorAvroEventFactory method createAndAppendEvent.

public int createAndAppendEvent(DbChangeEntry changeEntry, DbusEventBufferAppendable eventBuffer, boolean enableTracing, DbusEventsStatisticsCollector dbusEventsStatisticsCollector) throws EventCreationException, UnsupportedKeyException, DatabusException {
    Object keyObj = obtainKey(changeEntry);
    //Construct the Databus Event key, determine the key type and construct the key
    DbusEventKey eventKey = new DbusEventKey(keyObj);
    short lPartitionId = _partitionFunction.getPartition(eventKey);
    //Get the md5 for the schema
    SchemaId schemaId = SchemaId.createWithMd5(changeEntry.getSchema());
    byte[] payload = serializeEvent(changeEntry.getRecord());
    DbusEventInfo eventInfo = new DbusEventInfo(changeEntry.getOpCode(), changeEntry.getScn(), (short) _pSourceId, lPartitionId, changeEntry.getTimestampInNanos(), (short) _sourceId, schemaId.getByteArray(), payload, enableTracing, false);
    boolean success = eventBuffer.appendEvent(eventKey, eventInfo, dbusEventsStatisticsCollector);
    return success ? payload.length : -1;
}
Also used : DbusEventInfo(com.linkedin.databus.core.DbusEventInfo) SchemaId(com.linkedin.databus2.schemas.SchemaId) DbusEventKey(com.linkedin.databus.core.DbusEventKey)

Example 13 with EventCreationException

use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.

the class BootstrapSrcDBEventReader method readEventsForSource.

private EventReaderSummary readEventsForSource(OracleTriggerMonitoredSourceInfo sourceInfo, long maxScn) throws DatabusException, EventCreationException, UnsupportedKeyException, SQLException, IOException {
    int retryMax = _numRetries;
    int numRetry = 0;
    Connection conn = null;
    PreparedStatement pstmt = null;
    ResultSet rs = null;
    KeyType keyType = _pKeyTypeMap.get(sourceInfo.getEventView());
    String keyName = _pKeyNameMap.get(sourceInfo.getEventView());
    String sql = _eventQueryMap.get(sourceInfo.getEventView());
    String endSrcKey = _endSrcKeyMap.get(sourceInfo.getEventView());
    if (sql == null) {
        sql = generateEventQuery2(sourceInfo, keyName, keyType, getPKIndex(sourceInfo), getQueryHint(sourceInfo));
    }
    LOG.info("Chunked  Query for Source (" + sourceInfo + ") is :" + sql);
    LOG.info("EndSrcKey for source (" + sourceInfo + ") is :" + endSrcKey);
    PrimaryKeyTxn endKeyTxn = null;
    if ((null != endSrcKey) && (!endSrcKey.trim().isEmpty())) {
        if (KeyType.LONG == keyType)
            endKeyTxn = new PrimaryKeyTxn(new Long(endSrcKey));
        else
            endKeyTxn = new PrimaryKeyTxn(endSrcKey);
    }
    long timestamp = System.currentTimeMillis();
    int numRowsFetched = 0;
    long totalEventSize = 0;
    long timeStart = System.currentTimeMillis();
    long checkpointInterval = _commitInterval;
    boolean done = false;
    long lastTime = timeStart;
    long numRows = 0;
    PrimaryKeyTxn pKey = null;
    String minKeySQL = generateMinKeyQuery(sourceInfo, keyName);
    String srcName = sourceInfo.getEventView();
    LOG.info("Bootstrapping for Source :" + srcName);
    String lastKey = _lastKeys.get(sourceInfo.getEventView());
    File f = _keyTxnFilesMap.get(srcName);
    FileWriter oStream = new FileWriter(f, f.exists());
    BufferedWriter keyTxnWriter = new BufferedWriter(oStream, _keyTxnBufferSizeMap.get(srcName));
    _bootstrapSeedWriter.startEvents();
    RateMonitor seedingRate = new RateMonitor("Seeding Rate");
    RateMonitor queryRate = new RateMonitor("Query Rate");
    seedingRate.start();
    seedingRate.suspend();
    queryRate.start();
    queryRate.suspend();
    boolean isException = false;
    long totProcessTime = 0;
    try {
        conn = _dataSource.getConnection();
        pstmt = conn.prepareStatement(sql);
        if (_enableNumRowsQuery)
            numRows = getNumRows(conn, getTableName(sourceInfo));
        else
            numRows = -1;
        long currRowId = _lastRows.get(sourceInfo.getEventView());
        /**
			 * First Key to be seeded will be decided in the following order:
			 * 1. Use bootstrap_seeder_state's last srcKey as the key for the first chunk.
			 * 2. If (1) is empty, use passed-in begin srcKey.
			 * 3. If (2) is also empty, use Oracle's minKey as the first Chunk Key.
			 */
        if (null == lastKey) {
            lastKey = _beginSrcKeyMap.get(sourceInfo.getEventView());
            LOG.info("No last Src Key available in bootstrap_seeder_state for source (" + sourceInfo + ". Trying beginSrc Key from config :" + lastKey);
        }
        if ((null == lastKey) || (lastKey.trim().isEmpty())) {
            if (KeyType.LONG == keyType)
                pKey = new PrimaryKeyTxn(executeAndGetLong(minKeySQL));
            else
                pKey = new PrimaryKeyTxn(executeAndGetString(minKeySQL));
        } else {
            if (KeyType.LONG == keyType)
                pKey = new PrimaryKeyTxn(Long.parseLong(lastKey));
            else
                pKey = new PrimaryKeyTxn(lastKey);
        }
        PrimaryKeyTxn lastRoundKeyTxn = new PrimaryKeyTxn(pKey);
        PrimaryKeyTxn lastKeyTxn = new PrimaryKeyTxn(pKey);
        long numUniqueKeysThisRound = 0;
        boolean first = true;
        _rate.resume();
        while (!done) {
            LOG.info("MinKey being used for this round:" + pKey);
            numUniqueKeysThisRound = 0;
            try {
                lastRoundKeyTxn.copyFrom(pKey);
                if (KeyType.LONG == keyType) {
                    pstmt.setLong(1, pKey.getKey());
                } else {
                    String key = pKey.getKeyStr();
                    pstmt.setString(1, key);
                }
                pstmt.setLong(2, _numRowsPerQuery);
                pstmt.setFetchSize(_numRowsPrefetch);
                if (_oraclePreparedStatementClass.isInstance(pstmt)) {
                    try {
                        _setLobPrefetchSizeMethod.invoke(pstmt, _LOBPrefetchSize);
                    } catch (Exception e) {
                        throw new EventCreationException("Unable to set Lob Prefetch size" + e.getMessage());
                    }
                }
                LOG.info("Executing Oracle Query :" + sql + ". Key: " + pKey + ",NumRows: " + _numRowsPerQuery);
                queryRate.resume();
                rs = pstmt.executeQuery();
                queryRate.suspend();
                LOG.info("Total Query Latency :" + queryRate.getDuration() / 1000000000L);
                long totLatency = 0;
                long txnId = 0;
                int numRowsThisRound = 0;
                seedingRate.resume();
                while (rs.next()) {
                    _rate.tick();
                    seedingRate.tick();
                    currRowId++;
                    txnId = rs.getLong(2);
                    if (KeyType.LONG == keyType) {
                        pKey.setKeyTxn(rs.getLong(1), txnId);
                    } else {
                        String key = rs.getString(1);
                        pKey.setKeyStrTxn(key, txnId);
                    }
                    //Write TXN to file
                    pKey.writeTo(keyTxnWriter);
                    //LOG.info("TXNId is :" + txnId + ",RowId is :" + currRowId);
                    long start = System.nanoTime();
                    long eventSize = sourceInfo.getFactory().createAndAppendEvent(maxScn, timestamp, rs, _bootstrapSeedWriter, false, null);
                    long latency = System.nanoTime() - start;
                    totLatency += latency;
                    totalEventSize += eventSize;
                    totProcessTime += (totLatency / 1000 * 1000);
                    numRowsFetched++;
                    numRowsThisRound++;
                    if (lastKeyTxn.compareKey(pKey) != 0) {
                        numUniqueKeysThisRound++;
                        lastKeyTxn.copyFrom(pKey);
                    }
                    if (numRowsFetched % checkpointInterval == 0) {
                        // Commit this batch and reinit
                        _bootstrapSeedWriter.endEvents(currRowId, timestamp, null);
                        keyTxnWriter.flush();
                        _bootstrapSeedWriter.startEvents();
                        long procTime = totLatency / 1000000000;
                        long currTime = System.currentTimeMillis();
                        long diff = (currTime - lastTime) / 1000;
                        long timeSinceStart = (currTime - timeStart) / 1000;
                        double currRate = _rate.getRate();
                        currRate = (currRate <= 0) ? 1 : currRate;
                        if (_enableNumRowsQuery) {
                            double remTime = (numRows - currRowId) / (currRate);
                            LOG.info("Processed " + checkpointInterval + " rows in " + diff + " seconds, Processing Time (seconds) so far :" + (procTime) + ",Seconds elapsed since start :" + (timeSinceStart) + ",Approx Seconds remaining :" + remTime + ",Overall Row Rate:" + _rate.getRate() + "(" + seedingRate.getRate() + ")" + ",NumRows Fetched so far:" + numRowsFetched + ". TotalEventSize :" + totalEventSize);
                        } else {
                            LOG.info("Processed " + checkpointInterval + " rows in " + diff + " seconds, Processing Time (seconds) so far :" + (procTime) + ",Seconds elapsed since start :" + (timeSinceStart) + ",Overall Row Rate:" + _rate.getRate() + "(" + seedingRate.getRate() + ")" + ",NumRows Fetched so far:" + numRowsFetched + ". TotalEventSize :" + totalEventSize);
                        }
                        lastTime = currTime;
                    }
                    if ((null != endKeyTxn) && (endKeyTxn.compareKey(lastKeyTxn) < 0)) {
                        LOG.info("Seeding to be stopped for current source as it has completed seeding upto endSrckey :" + endKeyTxn + ", Current SrcKey :" + lastKeyTxn);
                        break;
                    }
                }
                seedingRate.suspend();
                if ((numRowsThisRound <= 1) || ((numRowsThisRound < _numRowsPerQuery) && (numUniqueKeysThisRound <= 1))) {
                    LOG.info("Seeding Done for source :" + sourceInfo.getEventView() + ", numRowsThisRound :" + numRowsThisRound + ", _numRowsPerQuery :" + _numRowsPerQuery + ", numUniqueKeys :" + numUniqueKeysThisRound);
                    done = true;
                } else if ((numRowsThisRound == _numRowsPerQuery) && (numUniqueKeysThisRound <= 1)) {
                    String msg = "Seeding stuck at infinte loop for source : " + sourceInfo.getEventView() + ", numRowsThisRound :" + numRowsThisRound + ", _numRowsPerQuery :" + _numRowsPerQuery + ", numUniqueKeys :" + numUniqueKeysThisRound + ", lastChunkKey :" + lastRoundKeyTxn;
                    LOG.error(msg);
                    throw new DatabusException(msg);
                } else if (null != endKeyTxn) {
                    if (endKeyTxn.compareKey(lastKeyTxn) < 0) {
                        LOG.info("Seeding stopped for source :" + sourceInfo.getEventView() + ", as it has completed seeding upto the endSrckey :" + endKeyTxn + ", numRowsThisRound :" + numRowsThisRound + ", _numRowsPerQuery :" + _numRowsPerQuery + ", numUniqueKeys :" + numUniqueKeysThisRound + " , Current SrcKey :" + lastKeyTxn);
                        done = true;
                    }
                }
                if (currRowId > 0 && (!first || done)) {
                    //Since next time, we will read the last seen record again
                    currRowId--;
                }
                LOG.info("about to call end events with currRowId = " + currRowId);
                first = false;
                _bootstrapSeedWriter.endEvents(currRowId, timestamp, null);
                isException = false;
            } catch (SQLException ex) {
                LOG.error("Got SQLException for source (" + sourceInfo + ")", ex);
                _bootstrapSeedWriter.rollbackEvents();
                numRetry++;
                isException = true;
                if (numRetry >= retryMax) {
                    throw new DatabusException("Error: Reached max retries for reading/processing bootstrap", ex);
                }
            } finally {
                DBHelper.close(rs);
                rs = null;
            }
        }
    } catch (DatabusException ex) {
        isException = true;
        throw ex;
    } finally {
        DBHelper.close(rs, pstmt, conn);
        keyTxnWriter.close();
        rs = null;
        _rate.suspend();
        if (!isException) {
            dedupeKeyTxnFile(_keyTxnFilesMap.get(srcName), keyType);
        }
    }
    long timeEnd = System.currentTimeMillis();
    long elapsedMin = (timeEnd - timeStart) / (MILLISEC_TO_MIN);
    LOG.info("Processed " + numRowsFetched + " rows of Source: " + sourceInfo.getSourceName() + " in " + elapsedMin + " minutes");
    return new EventReaderSummary(sourceInfo.getSourceId(), sourceInfo.getSourceName(), -1, numRowsFetched, totalEventSize, (timeEnd - timeStart), totProcessTime, 0, 0, 0);
}
Also used : KeyType(com.linkedin.databus.core.DbusEventKey.KeyType) SQLException(java.sql.SQLException) EventCreationException(com.linkedin.databus2.producers.EventCreationException) FileWriter(java.io.FileWriter) Connection(java.sql.Connection) PreparedStatement(java.sql.PreparedStatement) RateMonitor(com.linkedin.databus.core.util.RateMonitor) UnsupportedKeyException(com.linkedin.databus.core.UnsupportedKeyException) SQLException(java.sql.SQLException) EventCreationException(com.linkedin.databus2.producers.EventCreationException) DatabusException(com.linkedin.databus2.core.DatabusException) InvalidConfigException(com.linkedin.databus.core.util.InvalidConfigException) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) DatabusException(com.linkedin.databus2.core.DatabusException) EventReaderSummary(com.linkedin.databus2.producers.db.EventReaderSummary) ResultSet(java.sql.ResultSet) File(java.io.File)

Example 14 with EventCreationException

use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.

the class BootstrapAuditTester method compareField.

private boolean compareField(Field f, Object databaseFieldValue, Object avroField) {
    // NULL condition handled
    if (databaseFieldValue == avroField) {
        return true;
    }
    if (databaseFieldValue == null) {
        // avroField cannot also be null or first conditional would have triggered
        LOG.error("compareField error: " + " field=" + f.name() + " null databaseFieldValue but non-null avroField ");
        return false;
    }
    if (avroField == null) {
        // databaseFieldValue cannot also be null or first conditional would have triggered
        LOG.error("compareField error: " + " field=" + f.name() + " non-null databaseFieldValue but null avroField ");
        return false;
    }
    try {
        // == f.schema() if f is not a union
        Schema fieldSchema = SchemaHelper.unwindUnionSchema(f);
        Type avroFieldType = fieldSchema.getType();
        if (_sDebug) {
            LOG.debug("Checking for type:" + avroFieldType + ", Field:" + f.name() + ", Exp:" + databaseFieldValue + ", Got:" + avroField);
        }
        switch(avroFieldType) {
            case BOOLEAN:
                assertEquals(f.name(), databaseFieldValue, avroField);
                break;
            case BYTES:
                byte[] byteArr = null;
                if (databaseFieldValue instanceof Blob) {
                    Blob b = (Blob) databaseFieldValue;
                    byteArr = b.getBytes(1, (int) b.length());
                } else {
                    byteArr = (byte[]) databaseFieldValue;
                }
                assertEquals(f.name(), byteArr, avroField);
                break;
            case DOUBLE:
                assertEquals(f.name(), new Double(((Number) databaseFieldValue).doubleValue()), (avroField));
                break;
            case FLOAT:
                assertEquals(f.name(), new Float(((Number) databaseFieldValue).floatValue()), (avroField));
                break;
            case INT:
                assertEquals(f.name(), Integer.valueOf(((Number) databaseFieldValue).intValue()), (avroField));
                break;
            case LONG:
                if (databaseFieldValue instanceof Number) {
                    long lvalue = ((Number) databaseFieldValue).longValue();
                    assertEquals(f.name(), lvalue, ((Long) avroField).longValue());
                } else if (databaseFieldValue instanceof Timestamp) {
                    long time = ((Timestamp) databaseFieldValue).getTime();
                    assertEquals(f.name(), time, ((Long) avroField).longValue());
                } else if (databaseFieldValue instanceof Date) {
                    long time = ((Date) databaseFieldValue).getTime();
                    assertEquals(f.name(), time, ((Long) avroField).longValue());
                } else {
                    Class timestampClass = null, dateClass = null;
                    try {
                        timestampClass = OracleJarUtils.loadClass("oracle.sql.TIMESTAMP");
                        dateClass = OracleJarUtils.loadClass("oracle.sql.DATE");
                    } catch (Exception e) {
                        String errMsg = "Cannot convert " + databaseFieldValue.getClass() + " to long. Unable to get Oracle datatypes " + e.getMessage();
                        LOG.error(errMsg);
                        throw new EventCreationException(errMsg);
                    }
                    if (timestampClass.isInstance(databaseFieldValue)) {
                        try {
                            Object tsc = timestampClass.cast(databaseFieldValue);
                            Method dateValueMethod = timestampClass.getMethod("dateValue");
                            Date dateValue = (Date) dateValueMethod.invoke(tsc);
                            long time = dateValue.getTime();
                            assertEquals(f.name(), time, ((Long) avroField).longValue());
                        } catch (Exception ex) {
                            String errMsg = "SQLException reading oracle.sql.TIMESTAMP value for field " + f.name();
                            LOG.error(errMsg);
                            throw new RuntimeException(errMsg, ex);
                        }
                    } else if (dateClass.isInstance(databaseFieldValue)) {
                        try {
                            Object dsc = dateClass.cast(databaseFieldValue);
                            Method dateValueMethod = dateClass.getMethod("dateValue");
                            Date dateValue = (Date) dateValueMethod.invoke(dsc);
                            long time = dateValue.getTime();
                            assertEquals(f.name(), time, ((Long) avroField).longValue());
                        } catch (Exception ex) {
                            String errMsg = "SQLException reading oracle.sql.DATE value for field " + f.name();
                            LOG.error(errMsg);
                            throw new RuntimeException(errMsg, ex);
                        }
                    } else {
                        String errMsg = "Cannot convert " + databaseFieldValue.getClass() + " to long for field " + f.name();
                        LOG.error(errMsg);
                        throw new RuntimeException();
                    }
                }
                break;
            case STRING:
                if (databaseFieldValue instanceof Clob) {
                    String text = null;
                    try {
                        text = OracleAvroGenericEventFactory.extractClobText((Clob) databaseFieldValue, f.name());
                    } catch (EventCreationException ex) {
                        LOG.error("compareField error: " + ex.getMessage(), ex);
                    }
                    assertEquals(f.name(), text, ((Utf8) avroField).toString());
                } else {
                    String text = databaseFieldValue.toString();
                    assertEquals(f.name(), text, ((Utf8) avroField).toString());
                }
                break;
            case NULL:
                assertNull(f.name(), databaseFieldValue);
                assertNull(f.name(), avroField);
                break;
            case ARRAY:
                GenericArray<GenericRecord> avroArray = (GenericArray<GenericRecord>) avroField;
                Schema elementSchema = fieldSchema.getElementType();
                Array array = (Array) databaseFieldValue;
                ResultSet arrayResultSet = array.getResultSet();
                int i = 0;
                while (arrayResultSet.next()) {
                    // Get the underlying structure from the database. Oracle returns the structure in the
                    // second column of the array's ResultSet
                    Struct struct = (Struct) arrayResultSet.getObject(2);
                    Object[] attributes = struct.getAttributes();
                    GenericRecord avroElement = avroArray.get(i++);
                    // have to use dbFieldPosition recorded in the schema definition.
                    for (Field field : elementSchema.getFields()) {
                        int dbFieldPosition = Integer.valueOf(SchemaHelper.getMetaField(field, "dbFieldPosition"));
                        Object dbFieldValue = attributes[dbFieldPosition];
                        Object avroFieldValue = avroElement.get(field.name());
                        compareField(field, dbFieldValue, avroFieldValue);
                    }
                }
                break;
            case RECORD:
                assert (compareRecord(fieldSchema, (Struct) databaseFieldValue, (GenericRecord) avroField)) : "comparison of Avro 'record' type failed";
                break;
            case ENUM:
            case FIXED:
            case MAP:
            case UNION:
            default:
                String msg = "Audit for these fields not yet implemented for: " + fieldSchema.getName() + ", Avro type: " + avroFieldType;
                LOG.error(msg);
                throw new RuntimeException(msg);
        }
    } catch (AssertionError err) {
        LOG.error("compareField error: " + err.getMessage() + " field= " + f.name());
        return false;
    } catch (ClassCastException ce) {
        LOG.error("compareField error: " + ce.getMessage() + " field=" + f.name(), ce);
        return false;
    } catch (Exception ex) {
        LOG.error("compareField error: " + ex.getMessage() + " field=" + f.name(), ex);
        return false;
    }
    return true;
}
Also used : Schema(org.apache.avro.Schema) Timestamp(java.sql.Timestamp) Struct(java.sql.Struct) Field(org.apache.avro.Schema.Field) ResultSet(java.sql.ResultSet) GenericArray(org.apache.avro.generic.GenericArray) GenericRecord(org.apache.avro.generic.GenericRecord) Blob(java.sql.Blob) EventCreationException(com.linkedin.databus2.producers.EventCreationException) Method(java.lang.reflect.Method) Date(java.sql.Date) SQLException(java.sql.SQLException) EventCreationException(com.linkedin.databus2.producers.EventCreationException) GenericArray(org.apache.avro.generic.GenericArray) Array(java.sql.Array) Type(org.apache.avro.Schema.Type) Clob(java.sql.Clob)

Example 15 with EventCreationException

use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.

the class OracleAvroGenericEventFactory method put.

private void put(GenericRecord record, Field field, Object databaseFieldValue) throws EventCreationException {
    // Get the field name and type from the event schema
    String schemaFieldName = field.name();
    // == field.schema() if not a union
    Schema fieldSchema = SchemaHelper.unwindUnionSchema(field);
    Type avroFieldType = fieldSchema.getType();
    if (databaseFieldValue == null) {
        // The field value was null. If the field is nullable then we do nothing. Otherwise this is an error.
        boolean isNullAllowedInSchema = SchemaHelper.isNullable(field);
        if (!isNullAllowedInSchema) {
            throw new EventCreationException("Null value not allowed for field " + schemaFieldName);
        }
    } else {
        if (_log.isTraceEnabled()) {
            _log.trace("record.put(\"" + schemaFieldName + "\", (" + avroFieldType + ") \"" + databaseFieldValue + "\"");
        }
        try {
            switch(avroFieldType) {
                case BOOLEAN:
                case BYTES:
                case DOUBLE:
                case FLOAT:
                case INT:
                case LONG:
                case STRING:
                case NULL:
                    putSimpleValue(record, schemaFieldName, avroFieldType, databaseFieldValue);
                    break;
                case RECORD:
                    addOracleRecordToParent(record, schemaFieldName, fieldSchema, (Struct) databaseFieldValue);
                    break;
                case ARRAY:
                    putArray(record, schemaFieldName, fieldSchema, (Array) databaseFieldValue);
                    break;
                // exists in some Espresso schemas:  don't blindly cut and paste!
                case ENUM:
                // ditto
                case MAP:
                case FIXED:
                // shouldn't be possible, given unwindUnionSchema() call above
                case UNION:
                default:
                    throw new EventCreationException("Don't know how to populate this type of field: " + avroFieldType);
            }
        } catch (ClassCastException ex) {
            throw new EventCreationException("Type conversion error for field name (" + field.name() + ") in source " + _sourceId + ". Value was: " + databaseFieldValue + " avro field was: " + avroFieldType, ex);
        }
    }
}
Also used : Type(org.apache.avro.Schema.Type) SourceType(com.linkedin.databus2.relay.config.ReplicationBitSetterStaticConfig.SourceType) EventCreationException(com.linkedin.databus2.producers.EventCreationException) Schema(org.apache.avro.Schema)

Aggregations

EventCreationException (com.linkedin.databus2.producers.EventCreationException)14 SQLException (java.sql.SQLException)11 UnsupportedKeyException (com.linkedin.databus.core.UnsupportedKeyException)6 InvalidConfigException (com.linkedin.databus.core.util.InvalidConfigException)6 DatabusException (com.linkedin.databus2.core.DatabusException)6 IOException (java.io.IOException)6 Schema (org.apache.avro.Schema)6 ArrayList (java.util.ArrayList)5 OracleTriggerMonitoredSourceInfo (com.linkedin.databus2.producers.db.OracleTriggerMonitoredSourceInfo)4 SourceType (com.linkedin.databus2.relay.config.ReplicationBitSetterStaticConfig.SourceType)4 Type (org.apache.avro.Schema.Type)4 GenericRecord (org.apache.avro.generic.GenericRecord)4 EventReaderSummary (com.linkedin.databus2.producers.db.EventReaderSummary)3 NoSuchSchemaException (com.linkedin.databus2.schemas.NoSuchSchemaException)3 Array (java.sql.Array)3 ResultSet (java.sql.ResultSet)3 Field (org.apache.avro.Schema.Field)3 GenericArray (org.apache.avro.generic.GenericArray)3 EventProducer (com.linkedin.databus2.producers.EventProducer)2 PartitionFunction (com.linkedin.databus2.producers.PartitionFunction)2