use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.
the class OracleAvroGenericEventFactory method serializeEvent.
protected byte[] serializeEvent(GenericRecord record, long scn, long timestamp, ResultSet row, DbusEventBufferAppendable eventBuffer, boolean enableTracing, DbusEventsStatisticsCollector dbusEventsStatisticsCollector) throws EventCreationException, UnsupportedKeyException {
// Serialize the row
byte[] serializedValue;
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
Encoder encoder = new BinaryEncoder(bos);
GenericDatumWriter<GenericRecord> writer = new GenericDatumWriter<GenericRecord>(record.getSchema());
writer.write(record, encoder);
serializedValue = bos.toByteArray();
} catch (IOException ex) {
throw new EventCreationException("Failed to serialize the Avro GenericRecord. ResultSet was: (" + row + ")", ex);
} catch (RuntimeException ex) {
// Avro likes to throw RuntimeExceptions instead of checked exceptions when serialization fails.
throw new EventCreationException("Failed to serialize the Avro GenericRecord. ResultSet was: (" + row + ")", ex);
}
return serializedValue;
}
use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.
the class OracleAvroGenericEventFactory method putSimpleValue.
/**
* Copies the value of a simple-type event fields from DB field value to an Avro record
* @param record the Avro record to populate
* @param schemaFieldName the name of the Avro field
* @param avroFieldType the type of the Avro field
* @param databaseFieldValue the JDBC field value from the ResultSet (cannot be null)
* @throws EventCreationException if the conversion from JDBC type to Avro type failed
*/
private void putSimpleValue(GenericRecord record, String schemaFieldName, Type avroFieldType, Object databaseFieldValue) throws EventCreationException {
assert null != databaseFieldValue;
switch(avroFieldType) {
case BOOLEAN:
record.put(schemaFieldName, ((Boolean) databaseFieldValue).booleanValue());
break;
case BYTES:
if (databaseFieldValue instanceof byte[]) {
record.put(schemaFieldName, ByteBuffer.wrap((byte[]) databaseFieldValue));
} else {
record.put(schemaFieldName, extractBlobBytes((Blob) databaseFieldValue, schemaFieldName));
}
break;
case DOUBLE:
record.put(schemaFieldName, ((Number) databaseFieldValue).doubleValue());
break;
case FLOAT:
record.put(schemaFieldName, ((Number) databaseFieldValue).floatValue());
break;
case INT:
record.put(schemaFieldName, ((Number) databaseFieldValue).intValue());
break;
case LONG:
Class<?> timestampClass = null, dateClass = null;
Method timestampValueMethod = null;
try {
timestampClass = OracleJarUtils.loadClass("oracle.sql.TIMESTAMP");
dateClass = OracleJarUtils.loadClass("oracle.sql.DATE");
timestampValueMethod = timestampClass.getMethod("timestampValue");
} catch (Exception e) {
String errMsg = "Cannot convert " + databaseFieldValue.getClass() + " to long for field " + schemaFieldName + " Unable to get oracle datatypes " + e.getMessage();
throw new EventCreationException(errMsg);
}
if (databaseFieldValue instanceof Timestamp) {
long time = ((Timestamp) databaseFieldValue).getTime();
record.put(schemaFieldName, time);
} else if (databaseFieldValue instanceof Date) {
long time = ((Date) databaseFieldValue).getTime();
record.put(schemaFieldName, time);
} else if (timestampClass.isInstance(databaseFieldValue)) {
try {
Object tsc = timestampClass.cast(databaseFieldValue);
Timestamp tsValue = (Timestamp) timestampValueMethod.invoke(tsc);
long time = tsValue.getTime();
record.put(schemaFieldName, time);
} catch (Exception ex) {
throw new EventCreationException("SQLException reading oracle.sql.TIMESTAMP value for field " + schemaFieldName, ex);
}
} else if (dateClass.isInstance(databaseFieldValue)) {
try {
Object dsc = dateClass.cast(databaseFieldValue);
Timestamp tsValue = (Timestamp) timestampValueMethod.invoke(dsc);
long time = tsValue.getTime();
record.put(schemaFieldName, time);
} catch (Exception ex) {
throw new EventCreationException("SQLException reading oracle.sql.TIMESTAMP value for field " + schemaFieldName, ex);
}
} else /**
* This needs to stay after Oracle.sql.Timestamp because the timestamp class extends/implements the Number,BigDecimal classes,
* so it will pass as a number in the instanceof check. To avoid this we stick to this order.
*/
if (databaseFieldValue instanceof Number) {
long lvalue = ((Number) databaseFieldValue).longValue();
record.put(schemaFieldName, lvalue);
} else {
throw new EventCreationException("Cannot convert " + databaseFieldValue.getClass() + " to long for field " + schemaFieldName);
}
break;
case STRING:
if (databaseFieldValue instanceof Clob) {
String text = extractClobText((Clob) databaseFieldValue, schemaFieldName);
record.put(schemaFieldName, text);
} else if (databaseFieldValue instanceof SQLXML) {
SQLXML xmlInst = (SQLXML) databaseFieldValue;
try {
record.put(schemaFieldName, xmlInst.getString());
} catch (SQLException e) {
throw new EventCreationException("Cannot convert " + databaseFieldValue.getClass() + " to string field " + schemaFieldName + " cause: " + e);
}
} else {
String text = databaseFieldValue.toString();
record.put(schemaFieldName, text);
}
break;
case NULL:
record.put(schemaFieldName, null);
break;
default:
throw new EventCreationException("unknown simple type " + avroFieldType.toString() + " for field " + schemaFieldName);
}
}
use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.
the class OracleAvroGenericEventFactory method extractClobText.
/**
* Read the CLOB passed and return the value as a String.
*/
public static String extractClobText(Clob clob, String fieldName) throws EventCreationException {
if (clob == null)
return null;
try {
long length = clob.length();
if (length <= Integer.MAX_VALUE) {
String s = clob.getSubString(1, (int) length);
return s;
} else {
Reader reader = null;
try {
reader = clob.getCharacterStream();
StringWriter writer = new StringWriter();
char[] buffer = new char[1024];
int n;
while ((n = reader.read(buffer)) != -1) {
writer.write(buffer, 0, n);
}
return writer.toString();
} catch (IOException ex) {
throw new SQLException("IOException reading from CLOB column.", ex);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException ex) {
// ignore this
}
}
}
}
} catch (SQLException ex) {
throw new EventCreationException("SQLException reading CLOB value for field " + fieldName, ex);
}
}
use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.
the class OracleTxlogEventReader method readEventsFromAllSources.
@Override
public ReadEventCycleSummary readEventsFromAllSources(long sinceSCN) throws DatabusException, EventCreationException, UnsupportedKeyException {
boolean eventBufferNeedsRollback = true;
boolean debugEnabled = _log.isDebugEnabled();
List<EventReaderSummary> summaries = new ArrayList<EventReaderSummary>();
try {
long cycleStartTS = System.currentTimeMillis();
_eventBuffer.startEvents();
// Open the database connection if it is closed (at start or after an SQLException)
if (_eventSelectConnection == null || _eventSelectConnection.isClosed()) {
resetConnections();
}
/**
* Chunking in Relay:
* =================
*
* Variables used:
* ===============
*
* 1. _inChunking : Flag to indicate if the relay is in chunking mode
* 2. _chunkingType : Type of chunking supported
* 3. _chunkedScnThreshold :
* The threshold Scn diff which triggers chunking. If the relay's maxScn is older
* than DB's maxScn by this threshold, then chunking will be enabled.
* 4. _txnsPerChunk : Chunk size of txns for txn based chunking.
* 5. _scnChunkSize : Chunk Size for scn based chunking.
* 6. _catchupTargetMaxScn : Cached copy of DB's maxScn used as chunking's target SCN.
*
* =========================================
* Behavior of Chunking for Slow Sources:
* =========================================
*
* The slow sources case that is illustrated here is when all the sources in the sourcesList (fetched by relay) is slow.
* In this case, the endOfPeriodSCN will not increase on its own whereas in all other cases, it will.
*
* At startup, if the _catchupTargetMaxScn - currScn > _chunkedScnThreshold, then chunking is enabled.
* 1. Txn_based_chunking
*
* a) If chunking is on at startup, then txn-based chunking query is used. Otherwise, regular query is used.
* b) For a period till SLOW_SOURCE_QUERY_THRESHOLD msec, the endOfPeriodSCN/SinceSCN will not increase.
* c) After SLOW_SOURCE_QUERY_THRESHOLD msec, the sinceScn/endOfPeriodSCN will be increased to current MaxScn. If chunking was previously enabled
* at this time, it will be disabled upto MAX_SCN_DELAY_MS msec after which _catchupTargetMaxScn will be refreshed.
* d) if the new _catchupTargetMaxScn - currScn > _chunkedScnThreshold, then chunking is again enabled.
* e) go to (b)
*
* 2. SCN based Chunking
* a) If chunking is on at startup, then scn-based chunking query is used. Otherwise, regular query is used.
* b) For a period till SLOW_SOURCE_QUERY_THRESHOLD msec, the endOfPeriodSCN/SinceSCN keep increasing by _scnChunkSize with no rows fetched.
* c) When _catchupTargetMaxScn - endOfPeriodSCN < _chunkedScnThreshold, then chunking is disabled and regular query kicks in and in this
* phase sinceSCN/endOfPeriodSCN will not increase. After MAX_SCN_DELAY_MS interval, _catchupTargetSCN will be refreshed.
* d) If the new _catchupTargetMaxScn - currScn > _chunkedScnThreshold, then SCN chunking is again enabled.
* e) go to (b) *
*
*/
if (sinceSCN <= 0) {
_catchupTargetMaxScn = sinceSCN = getMaxTxlogSCN(_eventSelectConnection);
_log.debug("sinceSCN was <= 0. Overriding with the current max SCN=" + sinceSCN);
_eventBuffer.setStartSCN(sinceSCN);
try {
DBHelper.commit(_eventSelectConnection);
} catch (SQLException s) {
DBHelper.rollback(_eventSelectConnection);
}
} else if ((_chunkingType.isChunkingEnabled()) && (_catchupTargetMaxScn <= 0)) {
_catchupTargetMaxScn = getMaxTxlogSCN(_eventSelectConnection);
_log.debug("catchupTargetMaxScn was <= 0. Overriding with the current max SCN=" + _catchupTargetMaxScn);
}
if (_catchupTargetMaxScn <= 0)
_inChunkingMode = false;
// Get events for each source
List<OracleTriggerMonitoredSourceInfo> filteredSources = filterSources(sinceSCN);
long endOfPeriodScn = EventReaderSummary.NO_EVENTS_SCN;
for (OracleTriggerMonitoredSourceInfo source : _sources) {
if (filteredSources.contains(source)) {
long startTS = System.currentTimeMillis();
EventReaderSummary summary = readEventsFromOneSource(_eventSelectConnection, source, sinceSCN);
summaries.add(summary);
endOfPeriodScn = Math.max(endOfPeriodScn, summary.getEndOfPeriodSCN());
long endTS = System.currentTimeMillis();
source.getStatisticsBean().addTimeOfLastDBAccess(endTS);
if (_eventsLog.isDebugEnabled() || (_eventsLog.isInfoEnabled() && summary.getNumberOfEvents() > 0)) {
_eventsLog.info(summary.toString());
}
// Update statistics for the source
if (summary.getNumberOfEvents() > 0) {
source.getStatisticsBean().addEventCycle(summary.getNumberOfEvents(), endTS - startTS, summary.getSizeOfSerializedEvents(), summary.getEndOfPeriodSCN());
} else {
source.getStatisticsBean().addEmptyEventCycle();
}
} else {
source.getStatisticsBean().addEmptyEventCycle();
}
}
_lastSeenEOP = Math.max(_lastSeenEOP, Math.max(endOfPeriodScn, sinceSCN));
// If we did not read any events in this cycle then get the max SCN from the txlog. This
// is for slow sources so that the endOfPeriodScn never lags too far behind the max scn
// in the txlog table.
long curtime = System.currentTimeMillis();
if (endOfPeriodScn == EventReaderSummary.NO_EVENTS_SCN) {
// If in SCN Chunking mode, its possible to get empty batches for a SCN range,
if ((sinceSCN + _scnChunkSize <= _catchupTargetMaxScn) && (ChunkingType.SCN_CHUNKING == _chunkingType)) {
endOfPeriodScn = sinceSCN + _scnChunkSize;
_lastquerytime = curtime;
} else if (ChunkingType.TXN_CHUNKING == _chunkingType && _inChunkingMode) {
long nextBatchScn = getMaxScnSkippedForTxnChunked(_eventSelectConnection, sinceSCN, _txnsPerChunk);
_log.info("No events while in txn chunking. CurrScn : " + sinceSCN + ", jumping to :" + nextBatchScn);
endOfPeriodScn = nextBatchScn;
_lastquerytime = curtime;
} else if ((curtime - _lastquerytime) > _slowQuerySourceThreshold) {
_lastquerytime = curtime;
//get new start scn for subsequent calls;
final long maxTxlogSCN = getMaxTxlogSCN(_eventSelectConnection);
//For performance reasons, getMaxTxlogSCN() returns the max scn only among txlog rows
//which have their scn rewritten (i.e. scn < infinity). This allows the getMaxTxlogSCN
//query to be evaluated using only the SCN index. Getting the true max SCN requires
//scanning the rows where scn == infinity which is expensive.
//On the other hand, readEventsFromOneSource will read the latter events. So it is
//possible that maxTxlogSCN < scn of the last event in the buffer!
//We use max() to guarantee that there are no SCN regressions.
endOfPeriodScn = Math.max(maxTxlogSCN, sinceSCN);
_log.info("SlowSourceQueryThreshold hit. currScn : " + sinceSCN + ". Advanced endOfPeriodScn to " + endOfPeriodScn + " and added the event to relay");
if (debugEnabled) {
_log.debug("No events processed. Read max SCN from txlog table for endOfPeriodScn. endOfPeriodScn=" + endOfPeriodScn);
}
}
if (endOfPeriodScn != EventReaderSummary.NO_EVENTS_SCN && endOfPeriodScn > sinceSCN) {
// If the SCN has moved forward in the above if/else loop, then
_log.info("The endOfPeriodScn has advanced from to " + endOfPeriodScn);
_eventBuffer.endEvents(endOfPeriodScn, _relayInboundStatsCollector);
eventBufferNeedsRollback = false;
} else {
eventBufferNeedsRollback = true;
}
} else {
//we have appended some events; and a new end of period has been found
_lastquerytime = curtime;
_eventBuffer.endEvents(endOfPeriodScn, _relayInboundStatsCollector);
if (debugEnabled) {
_log.debug("End of events: " + endOfPeriodScn + " windown range= " + _eventBuffer.getMinScn() + "," + _eventBuffer.lastWrittenScn());
}
//no need to roll back
eventBufferNeedsRollback = false;
}
//save endOfPeriodScn if new one has been discovered
if (endOfPeriodScn != EventReaderSummary.NO_EVENTS_SCN) {
if (null != _maxScnWriter && (endOfPeriodScn != sinceSCN)) {
_maxScnWriter.saveMaxScn(endOfPeriodScn);
}
for (OracleTriggerMonitoredSourceInfo source : _sources) {
//update maxDBScn here
source.getStatisticsBean().addMaxDBScn(endOfPeriodScn);
source.getStatisticsBean().addTimeOfLastDBAccess(System.currentTimeMillis());
}
}
long cycleEndTS = System.currentTimeMillis();
//check if we should refresh _catchupTargetMaxScn
if (_chunkingType.isChunkingEnabled() && (_lastSeenEOP >= _catchupTargetMaxScn) && (curtime - _lastMaxScnTime >= _maxScnDelayMs)) {
//reset it to -1 so it gets refreshed next time around
_catchupTargetMaxScn = -1;
}
boolean chunkMode = _chunkingType.isChunkingEnabled() && (_catchupTargetMaxScn > 0) && (_lastSeenEOP < _catchupTargetMaxScn);
if (!chunkMode && _inChunkingMode)
_log.info("Disabling chunking for sources !!");
_inChunkingMode = chunkMode;
if (_inChunkingMode && debugEnabled)
_log.debug("_inChunkingMode = true, _catchupTargetMaxScn=" + _catchupTargetMaxScn + ", endOfPeriodScn=" + endOfPeriodScn + ", _lastSeenEOP=" + _lastSeenEOP);
ReadEventCycleSummary summary = new ReadEventCycleSummary(_name, summaries, Math.max(endOfPeriodScn, sinceSCN), (cycleEndTS - cycleStartTS));
// Have to commit the transaction since we are in serializable isolation level
DBHelper.commit(_eventSelectConnection);
// Return the event summaries
return summary;
} catch (SQLException ex) {
try {
DBHelper.rollback(_eventSelectConnection);
} catch (SQLException s) {
throw new DatabusException(s.getMessage());
}
;
handleExceptionInReadEvents(ex);
throw new DatabusException(ex);
} catch (Exception e) {
handleExceptionInReadEvents(e);
throw new DatabusException(e);
} finally {
// If that happens, rollback the event buffer.
if (eventBufferNeedsRollback) {
if (_log.isDebugEnabled()) {
_log.debug("Rolling back the event buffer because eventBufferNeedsRollback is true.");
}
_eventBuffer.rollbackEvents();
}
}
}
use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.
the class DatabusRelayMain method initProducers.
public void initProducers() throws InvalidConfigException, DatabusException, EventCreationException, UnsupportedKeyException, SQLException, ProcessorRegistrationConflictException {
LOG.info("initializing producers");
for (PhysicalSourceStaticConfig pConfig : _pConfigs) {
addOneProducer(pConfig);
}
this.setDbPullerStart(_relayStaticConfig.getStartDbPuller());
LOG.info("done initializing producers");
}
Aggregations