use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.
the class BootstrapAvroFileEventReader method readEventsFromAllSources.
@Override
public ReadEventCycleSummary readEventsFromAllSources(long sinceSCN) throws DatabusException, EventCreationException, UnsupportedKeyException {
List<EventReaderSummary> summaries = new ArrayList<EventReaderSummary>();
boolean error = false;
long startTS = System.currentTimeMillis();
long endScn = -1;
long minScn = Long.MAX_VALUE;
try {
for (OracleTriggerMonitoredSourceInfo sourceInfo : _sources) {
endScn = _config.getSeedWindowSCNMap().get(sourceInfo.getEventView());
minScn = Math.min(endScn, minScn);
LOG.info("Bootstrapping " + sourceInfo.getEventView());
_bootstrapEventBuffer.start(endScn);
String dir = _config.getAvroSeedInputDirMap().get(sourceInfo.getEventView());
File d = new File(dir);
EventReaderSummary summary = readEventsFromHadoopFiles(sourceInfo, d, endScn);
// Script assumes seeding is done for one schema at a time
_bootstrapEventBuffer.endEvents(BootstrapEventBuffer.END_OF_SOURCE, endScn, null);
summaries.add(summary);
}
} catch (Exception ex) {
error = true;
throw new DatabusException(ex);
} finally {
// Notify writer that I am done
if (error) {
_bootstrapEventBuffer.endEvents(BootstrapEventBuffer.ERROR_CODE, endScn, null);
LOG.error("Seeder stopping unexpectedly !!");
} else {
_bootstrapEventBuffer.endEvents(BootstrapEventBuffer.END_OF_FILE, endScn, null);
LOG.info("Completed Seeding !!");
}
}
LOG.info("Start SCN :" + minScn);
long endTS = System.currentTimeMillis();
ReadEventCycleSummary cycleSummary = new ReadEventCycleSummary("seeder", summaries, minScn, (endTS - startTS));
return cycleSummary;
}
use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.
the class BootstrapSrcDBEventReader method readEventsFromAllSources.
@Override
public ReadEventCycleSummary readEventsFromAllSources(long sinceSCN) throws DatabusException, EventCreationException, UnsupportedKeyException {
List<EventReaderSummary> summaries = new ArrayList<EventReaderSummary>();
long maxScn = EventReaderSummary.NO_EVENTS_SCN;
long endScn = maxScn;
boolean error = false;
long startTS = System.currentTimeMillis();
try {
_rate.start();
_rate.suspend();
Connection conn = null;
try {
conn = _dataSource.getConnection();
LOG.info("Oracle JDBC Version :" + conn.getMetaData().getDriverVersion());
} finally {
DBHelper.close(conn);
}
if (!_sources.isEmpty()) {
// Script assumes seeding is done for one schema at a time
// just use one source to get the schema name for sy$txlog
maxScn = getMaxScn(_sources.get(0));
}
for (OracleTriggerMonitoredSourceInfo sourceInfo : _sources) {
LOG.info("Bootstrapping " + sourceInfo.getEventView());
_bootstrapSeedWriter.start(maxScn);
EventReaderSummary summary = readEventsForSource(sourceInfo, maxScn);
// Script assumes seeding is done for one schema at a time
// just use one source to get the schema name for sy$txlog
endScn = getMaxScn(_sources.get(0));
_bootstrapSeedWriter.endEvents(BootstrapEventBuffer.END_OF_SOURCE, endScn, null);
summaries.add(summary);
}
} catch (Exception ex) {
error = true;
throw new DatabusException(ex);
} finally {
// Notify writer that I am done
if (error) {
_bootstrapSeedWriter.endEvents(BootstrapEventBuffer.ERROR_CODE, endScn, null);
LOG.error("Seeder stopping unexpectedly !!");
} else {
_bootstrapSeedWriter.endEvents(BootstrapEventBuffer.END_OF_FILE, endScn, null);
LOG.info("Completed Seeding !!");
}
LOG.info("Start SCN :" + maxScn);
LOG.info("End SCN :" + endScn);
}
long endTS = System.currentTimeMillis();
ReadEventCycleSummary cycleSummary = new ReadEventCycleSummary("seeder", summaries, maxScn, (endTS - startTS));
return cycleSummary;
}
use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.
the class DatabusRelayMain method addOneProducer.
/**
* overrides HTTP relay method
*/
@Override
public void addOneProducer(PhysicalSourceStaticConfig pConfig) throws DatabusException, EventCreationException, UnsupportedKeyException, SQLException, InvalidConfigException {
// Register a command to allow start/stop/status of the relay
List<EventProducer> plist = new ArrayList<EventProducer>();
PhysicalPartition pPartition = pConfig.getPhysicalPartition();
MaxSCNReaderWriter maxScnReaderWriters = _maxScnReaderWriters.getOrCreateHandler(pPartition);
LOG.info("Starting server container with maxScnReaderWriter:" + maxScnReaderWriters);
// Get the event buffer
DbusEventBufferAppendable dbusEventBuffer = getEventBuffer().getDbusEventBufferAppendable(pPartition);
// Get the schema registry service
SchemaRegistryService schemaRegistryService = getSchemaRegistryService();
// Get a stats collector per physical source
addPhysicalPartitionCollectors(pPartition);
String statsCollectorName = pPartition.toSimpleString();
/*
* _inBoundStatsCollectors.addStatsCollector(statsCollectorName, new
* DbusEventsStatisticsCollector(getContainerStaticConfig().getId(),
* statsCollectorName+".inbound", true, false, getMbeanServer()));
*
* _outBoundStatsCollectors.addStatsCollector(statsCollectorName, new
* DbusEventsStatisticsCollector(getContainerStaticConfig().getId(),
* statsCollectorName+".outbound", true, false, getMbeanServer()));
*/
// Create the event producer
String uri = pConfig.getUri();
if (uri == null)
throw new DatabusException("Uri is required to start the relay");
uri = uri.trim();
EventProducer producer = null;
if (uri.startsWith("jdbc:")) {
SourceType sourceType = pConfig.getReplBitSetter().getSourceType();
if (SourceType.TOKEN.equals(sourceType))
throw new DatabusException("Token Source-type for Replication bit setter config cannot be set for trigger-based Databus relay !!");
// if a buffer for this partiton exists - we are overwri
producer = new OracleEventProducerFactory().buildEventProducer(pConfig, schemaRegistryService, dbusEventBuffer, getMbeanServer(), _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else if (uri.startsWith("mock")) {
// Get all relevant pConfig attributes
// TODO add real instantiation
EventProducerServiceProvider mockProvider = _producersRegistry.getEventProducerServiceProvider("mock");
if (null == mockProvider) {
throw new DatabusRuntimeException("relay event producer not available: " + "mock");
}
producer = mockProvider.createProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else if (uri.startsWith("gg:")) {
producer = new GoldenGateEventProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else if (uri.startsWith("mysql:")) {
LOG.info("Adding OpenReplicatorEventProducer for uri :" + uri);
final String serviceName = "or";
EventProducerServiceProvider orProvider = _producersRegistry.getEventProducerServiceProvider(serviceName);
if (null == orProvider) {
throw new DatabusRuntimeException("relay event producer not available: " + serviceName);
}
producer = orProvider.createProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else {
// Get all relevant pConfig attributes and initialize the nettyThreadPool objects
RelayEventProducer.DatabusClientNettyThreadPools nettyThreadPools = new RelayEventProducer.DatabusClientNettyThreadPools(0, getNetworkTimeoutTimer(), getBossExecutorService(), getIoExecutorService(), getHttpChannelGroup());
producer = new RelayEventProducer(pConfig, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters, nettyThreadPools);
}
// if a buffer for this partiton exists - we are overwriting it.
_producers.put(pPartition, producer);
plist.add(producer);
// append 'monitoring event producer'
if (producer instanceof OracleEventProducer) {
MonitoringEventProducer monitoringProducer = new MonitoringEventProducer("dbMonitor." + pPartition.toSimpleString(), pConfig.getName(), pConfig.getUri(), ((OracleEventProducer) producer).getMonitoredSourceInfos(), getMbeanServer());
_monitoringProducers.put(pPartition, monitoringProducer);
plist.add(monitoringProducer);
}
if (_csEventRequestProcessor == null)
_csEventRequestProcessor = new ControlSourceEventsRequestProcessor(null, this, plist);
else
_csEventRequestProcessor.addEventProducers(plist);
RequestProcessorRegistry processorRegistry = getProcessorRegistry();
processorRegistry.reregister(ControlSourceEventsRequestProcessor.COMMAND_NAME, _csEventRequestProcessor);
}
use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.
the class OracleAvroGenericEventFactory method processRecordField.
private void processRecordField(GenericRecord fieldRecord, Field field, Object[] structAttribs) throws EventCreationException {
String recordFieldName = field.name();
String dbFieldPositionStr = SchemaHelper.getMetaField(field, "dbFieldPosition");
int dbFieldPosition = 0;
if (null != dbFieldPositionStr && !dbFieldPositionStr.isEmpty()) {
dbFieldPosition = Integer.valueOf(dbFieldPositionStr);
}
Object structAttribValue = structAttribs[dbFieldPosition];
if (structAttribValue == null) {
// The field value was null. If the field is nullable then we do nothing. Otherwise this is an error.
boolean isNullAllowedInSchema = SchemaHelper.isNullable(field.schema());
if (!isNullAllowedInSchema) {
throw new EventCreationException("Null value not allowed for field " + recordFieldName + ":" + field.schema());
}
} else {
// == field.schema() if not a union
Schema recordSchema = SchemaHelper.unwindUnionSchema(field);
Type recordFieldType = recordSchema.getType();
switch(recordFieldType) {
case BOOLEAN:
case BYTES:
case DOUBLE:
case FLOAT:
case INT:
case LONG:
case STRING:
case NULL:
putSimpleValue(fieldRecord, recordFieldName, recordFieldType, structAttribValue);
break;
case RECORD:
addOracleRecordToParent(fieldRecord, recordFieldName, recordSchema, (Struct) structAttribValue);
break;
case ARRAY:
putArray(fieldRecord, recordFieldName, recordSchema, (Array) structAttribValue);
break;
case ENUM:
case FIXED:
case MAP:
case UNION:
default:
throw new EventCreationException("unknown struct field type: " + recordFieldName + ":" + recordFieldType);
}
}
}
use of com.linkedin.databus2.producers.EventCreationException in project databus by linkedin.
the class OracleAvroGenericEventFactory method buildGenericRecord.
/**
* Build a GenericRecord from the contents of the current ResultSet row.
* @param rs
* @return
* @throws SQLException
*/
protected GenericRecord buildGenericRecord(ResultSet rs) throws SQLException, EventCreationException {
boolean traceEnabled = _log.isTraceEnabled();
if (traceEnabled) {
_log.trace("--- New Record ---");
}
// Initialize a new GenericData.Record from the event schema
GenericRecord record = new GenericData.Record(_eventSchema);
// Iterate over the array of fields defined in the Avro schema
List<Field> fields = _eventSchema.getFields();
for (Field field : fields) {
// Get the Avro field type information
String schemaFieldName = field.name();
// This is just field.schema() if field is not a union; but if it IS one,
// this is the schema of the first non-null type within the union:
Schema fieldSchema = SchemaHelper.unwindUnionSchema(field);
Type avroFieldType = fieldSchema.getType();
if (avroFieldType == Type.ARRAY) {
// Process as an array. Note that we're encoding to Avro's internal representation rather
// than to Avro binary format, which is what allows us to directly encode one of the union's
// inner types (here as well as in put()) instead of wrapping the inner type in a union.
// (Avro's binary encoding for unions includes an additional long index value before the
// encoding of the selected inner type.)
putArray(record, schemaFieldName, fieldSchema, getJdbcArray(rs, fieldSchema));
} else {
String databaseFieldName = SchemaHelper.getMetaField(field, "dbFieldName");
try {
Object databaseFieldValue = rs.getObject(databaseFieldName);
put(record, field, databaseFieldValue);
} catch (SQLException ex) {
_log.error("Failed to read column (" + databaseFieldName + ") for source (" + _sourceId + ")");
throw ex;
}
}
}
// Return the Avro record.
return record;
}
Aggregations