use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class FileStreamAdmin method updateProperties.
private StreamProperties updateProperties(StreamId streamId, StreamProperties properties) throws Exception {
StreamConfig config = getConfig(streamId);
StreamConfig.Builder builder = StreamConfig.builder(config);
if (properties.getTTL() != null) {
builder.setTTL(properties.getTTL());
}
if (properties.getFormat() != null) {
builder.setFormatSpec(properties.getFormat());
}
if (properties.getNotificationThresholdMB() != null) {
builder.setNotificationThreshold(properties.getNotificationThresholdMB());
}
// update stream description
String description = properties.getDescription();
if (description != null) {
streamMetaStore.addStream(streamId, description);
}
final StreamConfig newConfig = builder.build();
impersonator.doAs(streamId, new Callable<Void>() {
@Override
public Void call() throws Exception {
writeConfig(newConfig);
return null;
}
});
// Update system metadata for stream
SystemMetadataWriter systemMetadataWriter = new StreamSystemMetadataWriter(metadataStore, streamId, newConfig, description);
systemMetadataWriter.write();
return new StreamProperties(config.getTTL(), config.getFormat(), config.getNotificationThresholdMB());
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class HBaseStreamFileConsumerFactory method create.
@Override
protected StreamConsumer create(TableId tableId, StreamConfig streamConfig, ConsumerConfig consumerConfig, StreamConsumerStateStore stateStore, StreamConsumerState beginConsumerState, FileReader<StreamEventOffset, Iterable<StreamFileOffset>> reader, @Nullable ReadFilter extraFilter) throws IOException {
int splits = cConf.getInt(Constants.Stream.CONSUMER_TABLE_PRESPLITS);
AbstractRowKeyDistributor distributor = new RowKeyDistributorByHashPrefix(new RowKeyDistributorByHashPrefix.OneByteSimpleHash(splits));
byte[][] splitKeys = HBaseTableUtil.getSplitKeys(splits, splits, distributor);
TableId hBaseTableId = tableUtil.createHTableId(new NamespaceId(tableId.getNamespace()), tableId.getTableName());
TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(hBaseTableId, cConf);
ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(QueueEntryRow.COLUMN_FAMILY), hConf);
tdBuilder.addColumnFamily(cfdBuilder.build());
tdBuilder.addProperty(QueueConstants.DISTRIBUTOR_BUCKETS, Integer.toString(splits));
try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
ddlExecutor.createTableIfNotExists(tdBuilder.build(), splitKeys);
}
HTable hTable = tableUtil.createHTable(hConf, hBaseTableId);
hTable.setWriteBufferSize(Constants.Stream.HBASE_WRITE_BUFFER_SIZE);
hTable.setAutoFlushTo(false);
return new HBaseStreamFileConsumer(cConf, streamConfig, consumerConfig, tableUtil, hTable, reader, stateStore, beginConsumerState, extraFilter, createKeyDistributor(hTable.getTableDescriptor()));
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class AbstractStreamFileConsumerFactory method create.
@Override
public final StreamConsumer create(StreamId streamId, String namespace, ConsumerConfig consumerConfig) throws IOException {
StreamConfig streamConfig = StreamUtils.ensureExists(streamAdmin, streamId);
TableId tableId = getTableId(streamId, namespace);
StreamConsumerStateStore stateStore = stateStoreFactory.create(streamConfig);
StreamConsumerState consumerState = stateStore.get(consumerConfig.getGroupId(), consumerConfig.getInstanceId());
return create(tableId, streamConfig, consumerConfig, stateStore, consumerState, createReader(streamConfig, consumerState), new TTLReadFilter(streamConfig.getTTL()));
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class HBaseStreamConsumerStateStoreFactory method create.
@Override
public synchronized StreamConsumerStateStore create(StreamConfig streamConfig) throws IOException {
NamespaceId namespace = streamConfig.getStreamId().getParent();
TableId streamStateStoreTableId = StreamUtils.getStateStoreTableId(namespace);
TableId hbaseTableId = tableUtil.createHTableId(new NamespaceId(streamStateStoreTableId.getNamespace()), streamStateStoreTableId.getTableName());
boolean tableExist;
try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
tableExist = tableUtil.tableExists(admin, hbaseTableId);
}
if (!tableExist) {
try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(hbaseTableId, cConf);
ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(QueueEntryRow.COLUMN_FAMILY), hConf);
tdBuilder.addColumnFamily(cfdBuilder.build());
ddlExecutor.createTableIfNotExists(tdBuilder.build(), null);
}
}
HTable hTable = tableUtil.createHTable(hConf, hbaseTableId);
hTable.setWriteBufferSize(Constants.Stream.HBASE_WRITE_BUFFER_SIZE);
hTable.setAutoFlushTo(false);
return new HBaseStreamConsumerStateStore(streamConfig, hTable);
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class MultiLiveStreamFileReaderTestBase method testMultiFileReader.
@Test
public void testMultiFileReader() throws Exception {
String streamName = "multiReader";
StreamId streamId = NamespaceId.DEFAULT.stream(streamName);
Location location = getLocationFactory().create(streamName);
location.mkdirs();
// Create a stream with 1 partition.
StreamConfig config = new StreamConfig(streamId, Long.MAX_VALUE, 10000, Long.MAX_VALUE, location, null, 1000);
// Write out 200 events in 5 files, with interleaving timestamps
List<FileWriter<StreamEvent>> writers = Lists.newArrayList();
for (int i = 0; i < 5; i++) {
FileWriter<StreamEvent> writer = createWriter(config, "bucket" + i);
writers.add(writer);
for (int j = 0; j < 200; j++) {
long timestamp = j * 5 + i;
writer.append(StreamFileTestUtils.createEvent(timestamp, "Testing " + timestamp));
}
}
// Flush all writers.
for (FileWriter<StreamEvent> writer : writers) {
writer.flush();
}
// Create a multi stream file reader
List<StreamFileOffset> sources = Lists.newArrayList();
Location partitionLocation = StreamUtils.createPartitionLocation(config.getLocation(), 0, Long.MAX_VALUE);
for (int i = 0; i < 5; i++) {
Location eventFile = StreamUtils.createStreamLocation(partitionLocation, "bucket" + i, 0, StreamFileType.EVENT);
sources.add(new StreamFileOffset(eventFile, 0L, 0));
}
// Reads all events written so far.
MultiLiveStreamFileReader reader = new MultiLiveStreamFileReader(config, sources);
List<StreamEvent> events = Lists.newArrayList();
long expectedTimestamp = 0L;
for (int i = 0; i < 10; i++) {
Assert.assertEquals(100, reader.read(events, 100, 0, TimeUnit.SECONDS));
Assert.assertEquals(100, events.size());
for (StreamEvent event : events) {
Assert.assertEquals(expectedTimestamp, event.getTimestamp());
Assert.assertEquals("Testing " + expectedTimestamp, Charsets.UTF_8.decode(event.getBody()).toString());
expectedTimestamp++;
}
events.clear();
}
Assert.assertEquals(0, reader.read(events, 1, 1, TimeUnit.SECONDS));
// Writes some more events to the first three writers.
for (int i = 0; i < 3; i++) {
FileWriter<StreamEvent> writer = writers.get(i);
for (int j = 0; j < 10; j++) {
long timestamp = 1000 + j * 3 + i;
writer.append(StreamFileTestUtils.createEvent(timestamp, "Testing " + timestamp));
}
}
// Close all writers
for (FileWriter<StreamEvent> writer : writers) {
writer.close();
}
// Continue to read
Assert.assertEquals(30, reader.read(events, 30, 2, TimeUnit.SECONDS));
Assert.assertEquals(30, events.size());
for (StreamEvent event : events) {
Assert.assertEquals(expectedTimestamp, event.getTimestamp());
Assert.assertEquals("Testing " + expectedTimestamp, Charsets.UTF_8.decode(event.getBody()).toString());
expectedTimestamp++;
}
// Should get no more events.
Assert.assertEquals(0, reader.read(events, 1, 1, TimeUnit.SECONDS));
reader.close();
}
Aggregations