use of com.linkedin.databus.core.DatabusRuntimeException in project databus by linkedin.
the class DbusEventAvroDecoder method getTypedValue.
@Override
public <T extends SpecificRecord> T getTypedValue(DbusEvent e, T reuse, Class<T> targetClass) {
if (null == reuse) {
try {
reuse = targetClass.newInstance();
} catch (InstantiationException e1) {
LOG.error("getTypedValue class instantiation error (" + e1.getMessage() + ") for event " + e, e1);
return null;
} catch (IllegalAccessException e1) {
LOG.error("getTypedValue access error (" + e1.getMessage() + ") for event " + e, e1);
return null;
}
}
byte[] md5 = new byte[16];
e.schemaId(md5);
SchemaId schemaId = new SchemaId(md5);
VersionedSchema writerSchema = _schemaSet.getById(schemaId);
if (null == writerSchema) {
LOG.error("Unable to find schema for id " + schemaId + "; event = " + e);
throw new DatabusRuntimeException("No schema available to decode event " + e);
}
ByteBuffer valueBuffer = e.value();
byte[] valueBytes = new byte[valueBuffer.remaining()];
valueBuffer.get(valueBytes);
try {
//JsonDecoder jsonDec = new JsonDecoder(sourceSchema.getSchema(),new ByteArrayInputStream(valueBytes));
binDecoder.set(DecoderFactory.defaultFactory().createBinaryDecoder(valueBytes, binDecoder.get()));
SpecificDatumReader<SpecificRecord> reader = new SpecificDatumReader<SpecificRecord>(writerSchema.getSchema(), reuse.getSchema());
return targetClass.cast(reader.read(reuse, binDecoder.get()));
} catch (IOException e1) {
LOG.error("getTypedValue IO error (" + e1.getMessage() + ") for event " + e, e1);
}
return reuse;
}
use of com.linkedin.databus.core.DatabusRuntimeException in project databus by linkedin.
the class DbusEventAvroDecoder method getGenericRecord.
@Override
public GenericRecord getGenericRecord(DbusEvent e, GenericRecord reuse) {
byte[] md5 = new byte[16];
e.schemaId(md5);
SchemaId schemaId = new SchemaId(md5);
VersionedSchema writerSchema = _schemaSet.getById(schemaId);
if (null == writerSchema) {
LOG.error("Unable to find schema for id " + schemaId + "; event = " + e);
throw new DatabusRuntimeException("No schema available to decode event " + e);
}
ByteBuffer valueBuffer = e.value();
byte[] valueBytes = null;
if (valueBuffer.hasArray()) {
valueBytes = valueBuffer.array();
} else {
valueBytes = new byte[valueBuffer.remaining()];
valueBuffer.get(valueBytes);
}
return getGenericRecord(valueBytes, writerSchema.getSchema(), reuse);
}
use of com.linkedin.databus.core.DatabusRuntimeException in project databus by linkedin.
the class DbusEventAvroDecoder method getMetadata.
/**
* Deserializes the metadata (if any) of a Databus event to an Avro GenericRecord. This method
* is for INTERNAL USE ONLY (by Espresso and Databus). It is NOT a stable API and may change
* without warning!
*
* @param e the Databus event whose metadata is to be decoded
* @param reuse an existing {@link org.apache.avro.generic.GenericRecord} object where the
* deserialized values will be written to. The object can be <b>null</b>, in
* which case a new object will be allocated.
* @return {@link org.apache.avro.generic.GenericRecord} object with the deserialized data, or
* null if no metadata exists. Returned in <b>reuse</b> if provided, else in a newly
* allocated object.
* @throws DatabusRuntimeException if event contains metadata but schema to decode it is missing
*/
public GenericRecord getMetadata(DbusEvent e, GenericRecord reuse) {
DbusEventPart metadataPart = e.getPayloadMetadataPart();
ByteBuffer dataBuffer = null;
if (null == metadataPart || null == (dataBuffer = metadataPart.getData()) || dataBuffer.remaining() <= 0) {
LOG.debug("No metadata for event " + e);
return null;
}
VersionedSchema schema = getMetadataSchema(metadataPart);
if (null == schema) {
throw new DatabusRuntimeException("No schema available to decode metadata for event " + e);
}
byte[] dataBytes = null;
if (dataBuffer.hasArray()) {
dataBytes = dataBuffer.array();
} else {
dataBytes = new byte[dataBuffer.remaining()];
try {
dataBuffer.get(dataBytes);
} catch (BufferUnderflowException ex) {
LOG.error("metadata buffer error (remaining = " + dataBuffer.remaining() + ") for event " + e, ex);
return null;
}
}
return getGenericRecord(dataBytes, schema.getSchema(), reuse);
}
use of com.linkedin.databus.core.DatabusRuntimeException in project databus by linkedin.
the class CheckpointSerializerMain method updateCheckpoint.
private static Checkpoint updateCheckpoint(Checkpoint cpOld) throws JsonParseException, JsonMappingException, IOException {
Checkpoint cpNew = null != cpOld ? new Checkpoint(cpOld.toString()) : new Checkpoint();
if (null != _scn) {
if (-1L != _scn) {
cpNew.setWindowScn(_scn);
cpNew.setWindowOffset(0);
} else {
cpNew.setFlexible();
}
}
if (null != _startScn) {
cpNew.setBootstrapStartScn(_startScn);
}
if (null != _targetScn) {
cpNew.setBootstrapTargetScn(_targetScn);
}
if (null != _cpType) {
cpNew.setConsumptionMode(_cpType);
switch(_cpType) {
case ONLINE_CONSUMPTION:
cpNew.setWindowOffset(0);
break;
/*
* TODO Disabling as the bootstrap checkpoint creation leaves out important
* information (e.g. catchup/snashot source index) out of the checkpoint
* and thus is incorrect. We have to figure out what types of bootstrap
* checkpoints it makes sense to create.
case BOOTSTRAP_CATCHUP:
{
if (null != _bootstrapSource) cpNew.setCatchupSource(_bootstrapSource);
cpNew.setCatchupOffset(-1);
break;
}*/
case BOOTSTRAP_SNAPSHOT:
{
BootstrapCheckpointHandler handler = new BootstrapCheckpointHandler(_sources);
cpNew = handler.createInitialBootstrapCheckpoint(cpNew, _sinceScn);
//if (null != _bootstrapSource) cpNew.setSnapshotSource(_bootstrapSource);
cpNew.setSnapshotOffset(-1);
break;
}
default:
throw new DatabusRuntimeException("unsupported checkpoint type: " + _cpType);
}
}
return cpNew;
}
use of com.linkedin.databus.core.DatabusRuntimeException in project databus by linkedin.
the class ORListener method endXtion.
/**
* Per {@link http://code.google.com/p/open-replicator/source/browse/trunk/open-replicator/src/main/java/com/google/code/or/binlog/impl/event/XidEvent.java}
* XidEvent signals a commit
*/
private void endXtion(AbstractBinlogEventV4 e) {
_currTxnTimestamp = e.getHeader().getTimestamp() * 1000000L;
long txnReadLatency = System.nanoTime() - _currTxnStartReadTimestamp;
boolean em = ((e instanceof QueryEvent) || (e instanceof XidEvent));
if (!em) {
throw new DatabusRuntimeException("endXtion should be called with either QueryEvent of XidEvent");
}
_transaction.setSizeInBytes(_currTxnSizeInBytes);
_transaction.setTxnNanoTimestamp(_currTxnTimestamp);
_transaction.setTxnReadLatencyNanos(txnReadLatency);
if (_ignoreSource) {
long scn = scn(_currFileNum, (int) e.getHeader().getPosition());
_transaction.setIgnoredSourceScn(scn);
}
try {
_txnProcessor.onEndTransaction(_transaction);
} catch (DatabusException e3) {
_log.error("Got exception in the transaction handler ", e3);
throw new DatabusRuntimeException(e3);
} finally {
reset();
if (_log.isDebugEnabled()) {
_log.debug("endXtion" + e);
}
}
}
Aggregations