use of com.linkedin.databus.core.DatabusRuntimeException in project databus by linkedin.
the class ORListener method orToAvroType.
/**
* Given a OR Column, it returns a corresponding Java object that can be inserted into
* AVRO record
* @param avroField
*/
private Object orToAvroType(Column s, Field avroField) throws DatabusException {
if (s instanceof BitColumn) {
// This is in byte order
BitColumn bc = (BitColumn) s;
byte[] ba = bc.getValue();
ByteBuffer b = ByteBuffer.wrap(ba);
return b;
} else if (s instanceof StringColumn) {
StringColumn sc = (StringColumn) s;
String str = new String(sc.getValue(), StringUtils.DEFAULT_CHARSET);
return str;
} else if (s instanceof BlobColumn) {
BlobColumn bc = (BlobColumn) s;
byte[] ba = bc.getValue();
//distinguish between blobs and clobs
try {
return new String(ba, StringUtils.DEFAULT_CHARSET);
} catch (Exception e) {
return ByteBuffer.wrap(ba);
}
} else if (s instanceof DateColumn) {
DateColumn dc = (DateColumn) s;
Date d = dc.getValue();
Long l = d.getTime();
return l;
} else if (s instanceof DatetimeColumn) {
DatetimeColumn dc = (DatetimeColumn) s;
Date d = dc.getValue();
//Bug in OR for DateTIme and Time data-types. MilliSeconds is not available for these columns but is set with currentMillis() wrongly.
Long t1 = (d.getTime() / 1000) * 1000;
return t1;
} else if (s instanceof DecimalColumn) {
DecimalColumn dc = (DecimalColumn) s;
Object val = Double.valueOf(dc.getValue().doubleValue());
return val;
} else if (s instanceof DoubleColumn) {
DoubleColumn dc = (DoubleColumn) s;
Double d = dc.getValue();
return d;
} else if (s instanceof EnumColumn) {
EnumColumn ec = (EnumColumn) s;
Integer i = ec.getValue();
return i;
} else if (s instanceof FloatColumn) {
FloatColumn fc = (FloatColumn) s;
Float f = fc.getValue();
return f;
} else if (s instanceof Int24Column) {
Int24Column ic = (Int24Column) s;
Integer i = ic.getValue();
if (i < 0 && SchemaHelper.getMetaField(avroField, "dbFieldType").contains("UNSIGNED")) {
i += ORListener.MEDIUMINT_MAX_VALUE;
}
return i;
} else if (s instanceof LongColumn) {
LongColumn lc = (LongColumn) s;
Long l = lc.getValue().longValue();
if (l < 0 && SchemaHelper.getMetaField(avroField, "dbFieldType").contains("UNSIGNED")) {
l += ORListener.INTEGER_MAX_VALUE;
}
return l;
} else if (s instanceof LongLongColumn) {
LongLongColumn llc = (LongLongColumn) s;
BigInteger b = new BigInteger(llc.getValue() + "");
if (b.compareTo(BigInteger.ZERO) < 0 && SchemaHelper.getMetaField(avroField, "dbFieldType").contains("UNSIGNED")) {
b = b.add(ORListener.BIGINT_MAX_VALUE);
}
return b;
} else if (s instanceof NullColumn) {
return null;
} else if (s instanceof SetColumn) {
SetColumn sc = (SetColumn) s;
Long l = sc.getValue();
return l;
} else if (s instanceof ShortColumn) {
ShortColumn sc = (ShortColumn) s;
Integer i = sc.getValue();
if (i < 0 && SchemaHelper.getMetaField(avroField, "dbFieldType").contains("UNSIGNED")) {
i = i + ORListener.SMALLINT_MAX_VALUE;
}
return i;
} else if (s instanceof TimeColumn) {
TimeColumn tc = (TimeColumn) s;
Time t = tc.getValue();
/**
* There is a bug in OR where instead of using the default year as 1970, it is using 0070.
* This is a temporary measure to resolve it by working around at this layer. The value obtained from OR is subtracted from "0070-00-01 00:00:00"
*/
Calendar c = Calendar.getInstance();
c.set(70, 0, 1, 0, 0, 0);
// round off the milli-seconds as TimeColumn type has only seconds granularity but Calendar implementation
// includes milli-second (System.currentTimeMillis() at the time of instantiation)
long rawVal = (c.getTimeInMillis() / 1000) * 1000;
long val2 = (t.getTime() / 1000) * 1000;
long offset = val2 - rawVal;
return offset;
} else if (s instanceof TimestampColumn) {
TimestampColumn tsc = (TimestampColumn) s;
Timestamp ts = tsc.getValue();
Long t = ts.getTime();
return t;
} else if (s instanceof DatetimeColumn) {
DatetimeColumn tsc = (DatetimeColumn) s;
Long t = tsc.getValue().getTime();
return t;
} else if (s instanceof Datetime2Column) {
Datetime2Column tsc = (Datetime2Column) s;
Long t = tsc.getValue().getTime();
return t;
} else if (s instanceof TinyColumn) {
TinyColumn tc = (TinyColumn) s;
Integer i = tc.getValue();
if (i < 0 && SchemaHelper.getMetaField(avroField, "dbFieldType").contains("UNSIGNED")) {
i = i + ORListener.TINYINT_MAX_VALUE;
}
return i;
} else if (s instanceof YearColumn) {
YearColumn yc = (YearColumn) s;
Integer i = yc.getValue();
return i;
} else {
throw new DatabusRuntimeException("Unknown MySQL type in the event" + s.getClass() + " Object = " + s);
}
}
use of com.linkedin.databus.core.DatabusRuntimeException in project databus by linkedin.
the class ORListener method startXtion.
private void startXtion(QueryEvent e) {
_currTxnStartReadTimestamp = System.nanoTime();
_log.info("startXtion" + e);
if (_transaction == null) {
_transaction = new Transaction();
} else {
throw new DatabusRuntimeException("Got startXtion without an endXtion for previous transaction");
}
}
use of com.linkedin.databus.core.DatabusRuntimeException in project databus by linkedin.
the class DatabusRelayMain method addOneProducer.
/** overrides HTTP relay method */
@Override
public void addOneProducer(PhysicalSourceStaticConfig pConfig) throws DatabusException, EventCreationException, UnsupportedKeyException, SQLException, InvalidConfigException {
// Register a command to allow start/stop/status of the relay
List<EventProducer> plist = new ArrayList<EventProducer>();
PhysicalPartition pPartition = pConfig.getPhysicalPartition();
MaxSCNReaderWriter maxScnReaderWriters = _maxScnReaderWriters.getOrCreateHandler(pPartition);
LOG.info("Starting server container with maxScnReaderWriter:" + maxScnReaderWriters);
// Get the event buffer
DbusEventBufferAppendable dbusEventBuffer = getEventBuffer().getDbusEventBufferAppendable(pPartition);
// Get the schema registry service
SchemaRegistryService schemaRegistryService = getSchemaRegistryService();
// Get a stats collector per physical source
addPhysicalPartitionCollectors(pPartition);
String statsCollectorName = pPartition.toSimpleString();
/*
* _inBoundStatsCollectors.addStatsCollector(statsCollectorName, new
* DbusEventsStatisticsCollector(getContainerStaticConfig().getId(),
* statsCollectorName+".inbound", true, false, getMbeanServer()));
*
* _outBoundStatsCollectors.addStatsCollector(statsCollectorName, new
* DbusEventsStatisticsCollector(getContainerStaticConfig().getId(),
* statsCollectorName+".outbound", true, false, getMbeanServer()));
*/
// Create the event producer
String uri = pConfig.getUri();
if (uri == null)
throw new DatabusException("Uri is required to start the relay");
uri = uri.trim();
EventProducer producer = null;
if (uri.startsWith("jdbc:")) {
SourceType sourceType = pConfig.getReplBitSetter().getSourceType();
if (SourceType.TOKEN.equals(sourceType))
throw new DatabusException("Token Source-type for Replication bit setter config cannot be set for trigger-based Databus relay !!");
// if a buffer for this partiton exists - we are overwri
producer = new OracleEventProducerFactory().buildEventProducer(pConfig, schemaRegistryService, dbusEventBuffer, getMbeanServer(), _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else if (uri.startsWith("mock")) {
// Get all relevant pConfig attributes
//TODO add real instantiation
EventProducerServiceProvider mockProvider = _producersRegistry.getEventProducerServiceProvider("mock");
if (null == mockProvider) {
throw new DatabusRuntimeException("relay event producer not available: " + "mock");
}
producer = mockProvider.createProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else if (uri.startsWith("gg:")) {
producer = new GoldenGateEventProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else if (uri.startsWith("mysql:")) {
LOG.info("Adding OpenReplicatorEventProducer for uri :" + uri);
final String serviceName = "or";
EventProducerServiceProvider orProvider = _producersRegistry.getEventProducerServiceProvider(serviceName);
if (null == orProvider) {
throw new DatabusRuntimeException("relay event producer not available: " + serviceName);
}
producer = orProvider.createProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
} else {
// Get all relevant pConfig attributes and initialize the nettyThreadPool objects
RelayEventProducer.DatabusClientNettyThreadPools nettyThreadPools = new RelayEventProducer.DatabusClientNettyThreadPools(0, getNetworkTimeoutTimer(), getBossExecutorService(), getIoExecutorService(), getHttpChannelGroup());
producer = new RelayEventProducer(pConfig, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters, nettyThreadPools);
}
// if a buffer for this partiton exists - we are overwriting it.
_producers.put(pPartition, producer);
plist.add(producer);
// append 'monitoring event producer'
if (producer instanceof OracleEventProducer) {
MonitoringEventProducer monitoringProducer = new MonitoringEventProducer("dbMonitor." + pPartition.toSimpleString(), pConfig.getName(), pConfig.getUri(), ((OracleEventProducer) producer).getMonitoredSourceInfos(), getMbeanServer());
_monitoringProducers.put(pPartition, monitoringProducer);
plist.add(monitoringProducer);
}
if (_csEventRequestProcessor == null)
_csEventRequestProcessor = new ControlSourceEventsRequestProcessor(null, this, plist);
else
_csEventRequestProcessor.addEventProducers(plist);
RequestProcessorRegistry processorRegistry = getProcessorRegistry();
processorRegistry.reregister(ControlSourceEventsRequestProcessor.COMMAND_NAME, _csEventRequestProcessor);
}
use of com.linkedin.databus.core.DatabusRuntimeException in project databus by linkedin.
the class DatabusSubscription method createPrettyNameFromSubscription.
/**
* Given a subscription, the method below constructs a pretty name
*
* The expected input/output is of the following formats:
* 1. subs = ["com.linkedin.events.db.dbPrefix.tableName"]
* prettyName = "dbPrefix_tableName"
*
* 2. subs = ["com.linkedin.events.db.dbPrefix1.tableName1","com.linkedin.events.db.dbPrefix2.tableName2"],
* prettyName = "dbPrefix1_tableName1_dbPrefix2_tableName2"
*
* 3. subs =["espresso:/db/1/tableName1"
* prettyName = "db_tableName1_1"
*
* 4. subs =["espresso:/db/<wildcard>/tableName1"]. where wildcard=*
* prettyName = "db_tableName1"
*
* 5. subs =["espresso:/db/1/<wildcard>"]. where wildcard=*
* prettyName = "db_1"
*/
public String createPrettyNameFromSubscription() {
String s = generateSubscriptionString();
URI u = null;
try {
u = new URI(s);
} catch (URISyntaxException e) {
throw new DatabusRuntimeException("Unable to decode a URI from the string s = " + s + " subscription = " + toString());
}
if (null == u.getScheme()) {
// TODO: Have V2 style subscriptions have an explicit codec type. Make it return "legacy" codec
// here. Given a subscription string, we should be able to convert it to DatabusSubscription
// in an idempotent way. That is, converting back and forth should give the same value
// Subscription of type com.linkedin.databus.events.dbName.tableName
String[] parts = s.split("\\.");
int len = parts.length;
if (len == 0) {
// Error case
String errMsg = "Unexpected format for subscription. sub = " + toString() + " string = " + s;
throw new DatabusRuntimeException(errMsg);
} else if (len == 1) {
// Unit-tests case: logicalSource is specified as "source1"
return parts[0];
} else {
// Expected case. com.linkedin.databus.events.dbName.tableName
String pn = parts[len - 2] + "_" + parts[len - 1];
return pn;
}
} else if (u.getScheme().equals("espresso")) {
// Given that this subscription conforms to EspressoSubscriptionUriCodec,
// logicalSourceName (DBName.TableName) and partitionNumber(1) are guaranteed to be non-null
String dbName = getPhysicalPartition().getName();
boolean isWildCardOnTables = getLogicalSource().isAllSourcesWildcard();
String name = getLogicalPartition().getSource().getName();
boolean isWildCardOnPartitions = getPhysicalPartition().isAnyPartitionWildcard();
String pId = getPhysicalPartition().getId().toString();
StringBuilder sb = new StringBuilder();
sb.append(dbName);
if (!isWildCardOnTables) {
sb.append("_");
String[] parts = name.split("\\.");
assert (parts.length == 2);
sb.append(parts[1]);
}
if (!isWildCardOnPartitions) {
sb.append("_");
sb.append(pId);
}
s = sb.toString();
} else {
String errMsg = "The subscription object described as " + toString() + " is not of null or espresso type codec";
throw new DatabusRuntimeException(errMsg);
}
return s;
}
use of com.linkedin.databus.core.DatabusRuntimeException in project databus by linkedin.
the class DatabusBootstrapClient method getNextBatch.
public Checkpoint getNextBatch(int batchSize, BootstrapEventCallback callBack) throws SQLException, BootstrapProcessingException, BootstrapDatabaseTooOldException, BootstrapDBException {
boolean phaseCompleted = false;
if (_currState.getConsumptionMode() == DbusClientMode.ONLINE_CONSUMPTION) {
return _currState;
}
if (_currState.getConsumptionMode() == DbusClientMode.BOOTSTRAP_SNAPSHOT) {
phaseCompleted = getProcessor().streamSnapShotRows(_currState, callBack);
_currState.bootstrapCheckPoint();
// phase.
if (phaseCompleted) {
_bstCheckpointHandler.finalizeSnapshotPhase(_currState);
_bstCheckpointHandler.advanceAfterSnapshotPhase(_currState);
_currState.setBootstrapTargetScn(_targetScn);
_bstCheckpointHandler.advanceAfterTargetScn(_currState);
}
} else if (_currState.getConsumptionMode() == DbusClientMode.BOOTSTRAP_CATCHUP) {
phaseCompleted = getProcessor().streamCatchupRows(_currState, callBack);
_currState.bootstrapCheckPoint();
// source
if (phaseCompleted) {
_bstCheckpointHandler.finalizeCatchupPhase(_currState);
_bstCheckpointHandler.advanceAfterCatchupPhase(_currState);
if (!_bstCheckpointHandler.needsMoreSnapshot(_currState)) {
_currState.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
}
}
} else {
throw new DatabusRuntimeException("Unknown checkpoint type:" + _currState);
}
return _currState;
}
Aggregations