use of com.linkedin.databus2.core.DatabusException in project databus by linkedin.
the class OracleEventProducerFactory method buildEventProducer.
public EventProducer buildEventProducer(PhysicalSourceStaticConfig physicalSourceConfig, SchemaRegistryService schemaRegistryService, DbusEventBufferAppendable dbusEventBuffer, MBeanServer mbeanServer, DbusEventsStatisticsCollector dbusEventsStatisticsCollector, MaxSCNReaderWriter _maxScnReaderWriter) throws DatabusException, EventCreationException, UnsupportedKeyException, SQLException, InvalidConfigException {
// Make sure the URI from the configuration file identifies an Oracle JDBC source.
String uri = physicalSourceConfig.getUri();
if (!uri.startsWith("jdbc:oracle")) {
throw new InvalidConfigException("Invalid source URI (" + physicalSourceConfig.getUri() + "). Only jdbc:oracle: URIs are supported.");
}
// Parse each one of the logical sources
List<OracleTriggerMonitoredSourceInfo> sources = new ArrayList<OracleTriggerMonitoredSourceInfo>();
for (LogicalSourceStaticConfig sourceConfig : physicalSourceConfig.getSources()) {
OracleTriggerMonitoredSourceInfo source = buildOracleMonitoredSourceInfo(sourceConfig, physicalSourceConfig, schemaRegistryService);
sources.add(source);
}
DataSource ds = null;
try {
ds = OracleJarUtils.createOracleDataSource(uri);
} catch (Exception e) {
String errMsg = "Oracle URI likely not supported. Trouble creating OracleDataSource";
_log.error(errMsg);
throw new InvalidConfigException(errMsg + e.getMessage());
}
// Create the event producer
EventProducer eventProducer = new OracleEventProducer(sources, ds, dbusEventBuffer, true, dbusEventsStatisticsCollector, _maxScnReaderWriter, physicalSourceConfig, ManagementFactory.getPlatformMBeanServer());
_log.info("Created OracleEventProducer for config: " + physicalSourceConfig + " with slowSourceQueryThreshold = " + physicalSourceConfig.getSlowSourceQueryThreshold());
return eventProducer;
}
use of com.linkedin.databus2.core.DatabusException in project databus by linkedin.
the class OracleEventProducerFactory method buildOracleMonitoredSourceInfo.
public OracleTriggerMonitoredSourceInfo buildOracleMonitoredSourceInfo(LogicalSourceStaticConfig sourceConfig, PhysicalSourceStaticConfig pConfig, SchemaRegistryService schemaRegistryService) throws DatabusException, EventCreationException, UnsupportedKeyException, InvalidConfigException {
String schema = null;
try {
schema = schemaRegistryService.fetchLatestSchemaBySourceName(sourceConfig.getName());
} catch (NoSuchSchemaException e) {
throw new InvalidConfigException("Unable to load the schema for source (" + sourceConfig.getName() + ").");
}
if (schema == null) {
throw new InvalidConfigException("Unable to load the schema for source (" + sourceConfig.getName() + ").");
}
_log.info("Loading schema for source id " + sourceConfig.getId() + ": " + schema);
String eventViewSchema;
String eventView;
if (sourceConfig.getUri().indexOf('.') != -1) {
String[] parts = sourceConfig.getUri().split("\\.");
eventViewSchema = parts[0];
eventView = parts[1];
} else {
eventViewSchema = null;
eventView = sourceConfig.getUri();
}
if (eventView.toLowerCase().startsWith("sy$")) {
eventView = eventView.substring(3);
}
PartitionFunction partitionFunction = buildPartitionFunction(sourceConfig);
EventFactory factory = createEventFactory(eventViewSchema, eventView, sourceConfig, pConfig, schema, partitionFunction);
EventSourceStatistics statisticsBean = new EventSourceStatistics(sourceConfig.getName());
OracleTriggerMonitoredSourceInfo sourceInfo = new OracleTriggerMonitoredSourceInfo(sourceConfig.getId(), sourceConfig.getName(), eventViewSchema, eventView, factory, statisticsBean, sourceConfig.getRegularQueryHints(), sourceConfig.getChunkedTxnQueryHints(), sourceConfig.getChunkedScnQueryHints(), sourceConfig.isSkipInfinityScn());
return sourceInfo;
}
use of com.linkedin.databus2.core.DatabusException in project databus by linkedin.
the class DatabusRelayTestUtil method createDatabusRelay.
public static DatabusRelayMain createDatabusRelay(PhysicalSourceConfig[] sourceConfigs, HttpRelay.Config httpRelayConfig) throws IOException, DatabusException {
PhysicalSourceStaticConfig[] pStaticConfigs = new PhysicalSourceStaticConfig[sourceConfigs.length];
int i = 0;
for (PhysicalSourceConfig pConf : sourceConfigs) {
// prefixed to the id or what?
for (LogicalSourceConfig lsc : pConf.getSources()) {
httpRelayConfig.setSourceName("" + lsc.getId(), lsc.getName());
}
pStaticConfigs[i++] = pConf.build();
}
DatabusRelayMain relayMain = new DatabusRelayMain(httpRelayConfig.build(), pStaticConfigs);
return relayMain;
}
use of com.linkedin.databus2.core.DatabusException in project databus by linkedin.
the class DatabusRelayMain method addOneProducer.
/** overrides HTTP relay method */
@Override
public void addOneProducer(PhysicalSourceStaticConfig pConfig) throws DatabusException, EventCreationException, UnsupportedKeyException, SQLException, InvalidConfigException {
// Register a command to allow start/stop/status of the relay
List<EventProducer> plist = new ArrayList<EventProducer>();
PhysicalPartition pPartition = pConfig.getPhysicalPartition();
MaxSCNReaderWriter maxScnReaderWriters = _maxScnReaderWriters.getOrCreateHandler(pPartition);
LOG.info("Starting server container with maxScnReaderWriter:" + maxScnReaderWriters);
// Get the event buffer
DbusEventBufferAppendable dbusEventBuffer = getEventBuffer().getDbusEventBufferAppendable(pPartition);
// Get the schema registry service
SchemaRegistryService schemaRegistryService = getSchemaRegistryService();
// Get a stats collector per physical source
addPhysicalPartitionCollectors(pPartition);
String statsCollectorName = pPartition.toSimpleString();
/*
* _inBoundStatsCollectors.addStatsCollector(statsCollectorName, new
* DbusEventsStatisticsCollector(getContainerStaticConfig().getId(),
* statsCollectorName+".inbound", true, false, getMbeanServer()));
*
* _outBoundStatsCollectors.addStatsCollector(statsCollectorName, new
* DbusEventsStatisticsCollector(getContainerStaticConfig().getId(),
* statsCollectorName+".outbound", true, false, getMbeanServer()));
*/
// Create the event producer
String uri = pConfig.getUri();
if (uri == null)
throw new DatabusException("Uri is required to start the relay");
uri = uri.trim();
EventProducer producer = null;
if (uri.startsWith("jdbc:")) {
SourceType sourceType = pConfig.getReplBitSetter().getSourceType();
if (SourceType.TOKEN.equals(sourceType))
throw new DatabusException("Token Source-type for Replication bit setter config cannot be set for trigger-based Databus relay !!");
// if a buffer for this partiton exists - we are overwri
producer = new OracleEventProducerFactory().buildEventProducer(pConfig, schemaRegistryService, dbusEventBuffer, getMbeanServer(), _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else if (uri.startsWith("mock")) {
// Get all relevant pConfig attributes
//TODO add real instantiation
EventProducerServiceProvider mockProvider = _producersRegistry.getEventProducerServiceProvider("mock");
if (null == mockProvider) {
throw new DatabusRuntimeException("relay event producer not available: " + "mock");
}
producer = mockProvider.createProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else if (uri.startsWith("gg:")) {
producer = new GoldenGateEventProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else if (uri.startsWith("mysql:")) {
LOG.info("Adding OpenReplicatorEventProducer for uri :" + uri);
final String serviceName = "or";
EventProducerServiceProvider orProvider = _producersRegistry.getEventProducerServiceProvider(serviceName);
if (null == orProvider) {
throw new DatabusRuntimeException("relay event producer not available: " + serviceName);
}
producer = orProvider.createProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else {
// Get all relevant pConfig attributes and initialize the nettyThreadPool objects
RelayEventProducer.DatabusClientNettyThreadPools nettyThreadPools = new RelayEventProducer.DatabusClientNettyThreadPools(0, getNetworkTimeoutTimer(), getBossExecutorService(), getIoExecutorService(), getHttpChannelGroup());
producer = new RelayEventProducer(pConfig, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters, nettyThreadPools);
}
// if a buffer for this partiton exists - we are overwriting it.
_producers.put(pPartition, producer);
plist.add(producer);
// append 'monitoring event producer'
if (producer instanceof OracleEventProducer) {
MonitoringEventProducer monitoringProducer = new MonitoringEventProducer("dbMonitor." + pPartition.toSimpleString(), pConfig.getName(), pConfig.getUri(), ((OracleEventProducer) producer).getMonitoredSourceInfos(), getMbeanServer());
_monitoringProducers.put(pPartition, monitoringProducer);
plist.add(monitoringProducer);
}
if (_csEventRequestProcessor == null)
_csEventRequestProcessor = new ControlSourceEventsRequestProcessor(null, this, plist);
else
_csEventRequestProcessor.addEventProducers(plist);
RequestProcessorRegistry processorRegistry = getProcessorRegistry();
processorRegistry.reregister(ControlSourceEventsRequestProcessor.COMMAND_NAME, _csEventRequestProcessor);
}
use of com.linkedin.databus2.core.DatabusException in project databus by linkedin.
the class GoldenGateEventProducer method locateScnInTrailFile.
/**
* Given an xml directory and prefix, the method identifies the file which has the scn (_scn from event producer class)
* and returns an inputstream reader pointing to the scn location. If the scn is not found:
* 1. If scn less than what is present in the trail file directory (minimum) - throws a fatal exception.
* 2. If exact scn is not found, but it's greater than the minimum scn in the trail file directory, it returns the closest scn greater than _scn (from the event producer class).
* This methods reads and modifies the _scn from the event producer class.
* @param xmlDir The directory where the trail files are located
* @param xmlPrefix The prefix of the xml trail files, eg. x4
* @return
* @throws IOException
* @throws DatabusException
*/
private ConcurrentAppendableCompositeFileInputStream locateScnInTrailFile(String xmlDir, String xmlPrefix) throws Exception {
ConcurrentAppendableCompositeFileInputStream compositeInputStream = null;
TrailFilePositionSetter.FilePositionResult filePositionResult = null;
TrailFilePositionSetter trailFilePositionSetter = null;
while (compositeInputStream == null) {
_log.info("Requesting trail file position setter for scn: " + _scn.get());
trailFilePositionSetter = new TrailFilePositionSetter(xmlDir, xmlPrefix, getName());
filePositionResult = trailFilePositionSetter.locateFilePosition(_scn.get(), new GGXMLTrailTransactionFinder());
_log.info("File position at : " + filePositionResult);
switch(filePositionResult.getStatus()) {
case ERROR:
_log.fatal("Unable to locate the scn in the trail file.");
throw new DatabusException("Unable to find the given scn " + _scn.get() + " in the trail files");
case NO_TXNS_FOUND:
//If the latest scn is not found in the trail files, then use the earliest scn.
if (_scn.get() == TrailFilePositionSetter.USE_LATEST_SCN) {
_log.info("Switching from USE_LATEST_SCN to USE_EARLIEST_SCN because no trail files were not found");
_scn.set(TrailFilePositionSetter.USE_EARLIEST_SCN);
}
//TODO sleep get configuration for sleep time
long noTxnsFoundSleepTime = 500;
_log.info("NO_TXNS_FOUND, sleeping for " + noTxnsFoundSleepTime + " ms before retrying");
Thread.sleep(noTxnsFoundSleepTime);
break;
case EXACT_SCN_NOT_FOUND:
{
_log.info("Exact SCN was not found, the closest scn found was: " + filePositionResult.getTxnPos().getMinScn());
compositeInputStream = new ConcurrentAppendableCompositeFileInputStream(xmlDir, filePositionResult.getTxnPos().getFile(), filePositionResult.getTxnPos().getFileOffset(), new TrailFilePositionSetter.FileFilter(new File(xmlDir), xmlPrefix), false);
long foundScn = filePositionResult.getTxnPos().getMaxScn();
/**
* If exact scn is not found, the trail file position setter returns the next immediate available scn, i.e., the contract guarantees
* a scn always greater than the given scn (foundscn > _scn). We use the _scn (requested scn to be found) as the prevScn to start the event buffer.
* And the scn found as the current scn(first event in the relay).
*/
if (foundScn <= _scn.get())
throw new DatabusException("EXACT_SCN_NOT_FOUND, but foundScn is <= _scn ");
_startPrevScn.set(_scn.get());
_log.info("Changing current scn from " + _scn.get() + " to " + foundScn);
_log.info("Planning to use prevScn " + _startPrevScn);
_scn.set(foundScn);
break;
}
case FOUND:
{
_log.info("Exact SCN was found" + filePositionResult.getTxnPos().getMaxScn());
compositeInputStream = new ConcurrentAppendableCompositeFileInputStream(xmlDir, filePositionResult.getTxnPos().getFile(), filePositionResult.getTxnPos().getFileOffset(), new TrailFilePositionSetter.FileFilter(new File(xmlDir), xmlPrefix), false);
/**
* The trail file position setter returns FOUND in two cases:
* 1. MaxScn was given as input.
* 2. Earliest or Latest scn was given as input.
* For both the cases, we set the prevScn to the foundScn-1 and the foundScn as the currentScn.
*/
long foundScn = filePositionResult.getTxnPos().getMaxScn();
//Assert that if maxScn was requested, the trail file position setter has returned the exact scn (It has returned FOUND).
if (_scn.get() >= 0 && _scn.get() != foundScn) {
throw new DatabusException("The exact scn was not found, but the trail file position setter has returned FOUND!");
}
_startPrevScn.set(foundScn - 1);
_scn.set(foundScn);
break;
}
default:
throw new DatabusException("Unhandled file position result in switch case, terminating producer.");
}
}
if (filePositionResult == null) {
_log.info(trailFilePositionSetter);
throw new DatabusException("file position Result returned by TrailFilePositionSetter is null!");
}
if (_scn.get() <= 0) {
_log.info("The scn is <=0, using scn from file position setter:" + filePositionResult);
_scn.set(filePositionResult.getTxnPos().getMaxScn());
}
return compositeInputStream;
}
Aggregations