use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class InMemoryStreamConsumerStateStoreFactory method create.
@Override
public synchronized StreamConsumerStateStore create(StreamConfig streamConfig) throws IOException {
NamespaceId namespace = streamConfig.getStreamId().getParent();
TableId tableId = StreamUtils.getStateStoreTableId(namespace);
InMemoryTableAdmin admin = new InMemoryTableAdmin(DatasetContext.from(tableId.getNamespace()), tableId.getTableName(), cConf);
if (!admin.exists()) {
admin.create();
}
InMemoryTable table = new NoTxInMemoryTable(DatasetContext.from(tableId.getNamespace()), tableId.getTableName(), cConf);
return new InMemoryStreamConsumerStateStore(streamConfig, table);
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class LevelDBStreamConsumerStateStoreFactory method create.
@Override
public synchronized StreamConsumerStateStore create(StreamConfig streamConfig) throws IOException {
NamespaceId namespace = streamConfig.getStreamId().getParent();
TableId tableId = StreamUtils.getStateStoreTableId(namespace);
getLevelDBTableAdmin(tableId).create();
String levelDBTableName = PrefixedNamespaces.namespace(cConf, tableId.getNamespace(), tableId.getTableName());
LevelDBTableCore coreTable = new LevelDBTableCore(levelDBTableName, tableService);
return new LevelDBStreamConsumerStateStore(streamConfig, coreTable);
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class LevelDBStreamFileConsumerFactory method create.
@Override
protected StreamConsumer create(TableId tableId, StreamConfig streamConfig, ConsumerConfig consumerConfig, StreamConsumerStateStore stateStore, StreamConsumerState beginConsumerState, FileReader<StreamEventOffset, Iterable<StreamFileOffset>> reader, @Nullable ReadFilter extraFilter) throws IOException {
String tableName = fromTableId(tableId);
tableService.ensureTableExists(tableName);
LevelDBTableCore tableCore = new LevelDBTableCore(tableName, tableService);
Object dbLock = getDBLock(tableName);
return new LevelDBStreamFileConsumer(cConf, streamConfig, consumerConfig, reader, stateStore, beginConsumerState, extraFilter, tableCore, dbLock);
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class AbstractStreamCoordinatorClient method createStream.
@Override
public StreamConfig createStream(StreamId streamId, Callable<StreamConfig> action) throws Exception {
Lock lock = getLock(streamId);
lock.lock();
try {
StreamConfig config = action.call();
if (config != null) {
streamCreated(streamId);
}
return config;
} finally {
lock.unlock();
}
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class LocalStreamService method runOneIteration.
@Override
protected void runOneIteration() throws Exception {
// Get stream size - which will be the entire size - and send a notification if the size is big enough
for (Map.Entry<NamespaceId, StreamSpecification> streamSpecEntry : streamMetaStore.listStreams().entries()) {
StreamId streamId = streamSpecEntry.getKey().stream(streamSpecEntry.getValue().getName());
StreamSizeAggregator streamSizeAggregator = aggregators.get(streamId);
try {
if (streamSizeAggregator == null) {
// First time that we see this Stream here
StreamConfig config;
try {
config = streamAdmin.getConfig(streamId);
} catch (FileNotFoundException e) {
// this is a stream that has no configuration: ignore it to avoid flooding the logs with exceptions
continue;
}
streamSizeAggregator = createSizeAggregator(streamId, 0, config.getNotificationThresholdMB());
}
streamSizeAggregator.checkAggregatedSize();
} catch (Exception e) {
// Need to catch and not to propagate the exception, otherwise this scheduled service will be terminated
// Just log the exception here as the next run iteration should have the problem fixed
LOG.warn("Exception in aggregating stream size for {}", streamId, e);
}
}
}
Aggregations