use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class InMemoryStreamFileWriterFactory method create.
@Override
public FileWriter<StreamEvent> create(StreamConfig config, int generation) throws IOException {
final QueueProducer producer = queueClientFactory.createProducer(QueueName.fromStream(config.getStreamId()));
final List<TransactionAware> txAwares = Lists.newArrayList();
if (producer instanceof TransactionAware) {
txAwares.add((TransactionAware) producer);
}
final TransactionExecutor txExecutor = executorFactory.createExecutor(txAwares);
// Adapt the FileWriter interface into Queue2Producer
return new FileWriter<StreamEvent>() {
private final List<StreamEvent> events = Lists.newArrayList();
@Override
public void append(StreamEvent event) throws IOException {
events.add(event);
}
@Override
public void appendAll(Iterator<? extends StreamEvent> events) throws IOException {
Iterators.addAll(this.events, events);
}
@Override
public void close() throws IOException {
producer.close();
}
@Override
public void flush() throws IOException {
try {
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (StreamEvent event : events) {
producer.enqueue(new QueueEntry(STREAM_EVENT_CODEC.encodePayload(event)));
}
events.clear();
}
});
} catch (TransactionFailureException e) {
throw new IOException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new InterruptedIOException();
}
}
};
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class StreamConsumerTestBase method testTTLStartingFile.
@Category(SlowTests.class)
@Test
public void testTTLStartingFile() throws Exception {
String stream = "testTTLStartingFile";
StreamId streamId = TEST_NAMESPACE.stream(stream);
StreamAdmin streamAdmin = getStreamAdmin();
// Create stream with ttl of 3 seconds and partition duration of 3 seconds
final long ttl = TimeUnit.SECONDS.toMillis(3);
Properties streamProperties = new Properties();
streamProperties.setProperty(Constants.Stream.TTL, Long.toString(ttl));
streamProperties.setProperty(Constants.Stream.PARTITION_DURATION, Long.toString(ttl));
streamAdmin.create(streamId, streamProperties);
StreamConfig streamConfig = streamAdmin.getConfig(streamId);
streamAdmin.configureGroups(streamId, ImmutableMap.of(0L, 1, 1L, 1));
StreamConsumerFactory consumerFactory = getConsumerFactory();
StreamConsumer consumer = consumerFactory.create(streamId, stream, new ConsumerConfig(0L, 0, 1, DequeueStrategy.FIFO, null));
StreamConsumer newConsumer;
Set<StreamEvent> expectedEvents = Sets.newTreeSet(STREAM_EVENT_COMPARATOR);
try {
// Create a new consumer for second consumer verification.
// Need to create consumer before write event because in HBase, creation of consumer took couple seconds.
newConsumer = consumerFactory.create(streamId, stream, new ConsumerConfig(1L, 0, 1, DequeueStrategy.FIFO, null));
// write 20 events in a partition that will be expired due to sleeping the TTL
writeEvents(streamConfig, "Phase 0 expired event ", 20);
Thread.sleep(ttl);
verifyEvents(consumer, expectedEvents);
// also verify for a new consumer
try {
verifyEvents(newConsumer, expectedEvents);
} finally {
newConsumer.close();
}
// Create a new consumer for second consumer verification (with clean state)
// Need to create consumer before write event because in HBase, creation of consumer took couple seconds.
streamAdmin.configureGroups(streamId, ImmutableMap.of(0L, 1));
streamAdmin.configureGroups(streamId, ImmutableMap.of(0L, 1, 1L, 1));
newConsumer = consumerFactory.create(streamId, stream, new ConsumerConfig(1L, 0, 1, DequeueStrategy.FIFO, null));
// write 20 events in a partition and read it back immediately. They shouldn't expired.
expectedEvents.addAll(writeEvents(streamConfig, "Phase 1 non-expired event ", 20));
verifyEvents(consumer, expectedEvents);
// also verify for a new consumer
try {
verifyEvents(newConsumer, expectedEvents);
} finally {
newConsumer.close();
}
// Create a new consumer for second consumer verification (with clean state)
// Need to create consumer before write event because in HBase, creation of consumer took couple seconds.
streamAdmin.configureGroups(streamId, ImmutableMap.of(0L, 1));
streamAdmin.configureGroups(streamId, ImmutableMap.of(0L, 1, 1L, 1));
newConsumer = consumerFactory.create(streamId, stream, new ConsumerConfig(1L, 0, 1, DequeueStrategy.FIFO, null));
// write 20 events in a partition that will be expired due to sleeping the TTL
// This will write to a new partition different then the first batch write.
// Also, because it sleep TTL time, the previous batch write would also get expired.
expectedEvents.clear();
writeEvents(streamConfig, "Phase 2 expired event ", 20);
Thread.sleep(ttl);
verifyEvents(consumer, expectedEvents);
// also verify for a new consumer
try {
verifyEvents(newConsumer, expectedEvents);
} finally {
newConsumer.close();
}
// Create a new consumer for second consumer verification (with clean state)
// Need to create consumer before write event because in HBase, creation of consumer took couple seconds.
streamAdmin.configureGroups(streamId, ImmutableMap.of(0L, 1));
streamAdmin.configureGroups(streamId, ImmutableMap.of(0L, 1, 1L, 1));
newConsumer = consumerFactory.create(streamId, stream, new ConsumerConfig(1L, 0, 1, DequeueStrategy.FIFO, null));
// write 20 events in a partition and read it back immediately. They shouldn't expire.
expectedEvents.addAll(writeEvents(streamConfig, "Phase 3 non-expired event ", 20));
verifyEvents(consumer, expectedEvents);
// also verify for a new consumer
try {
verifyEvents(newConsumer, expectedEvents);
} finally {
newConsumer.close();
}
// Should be no more pending events
expectedEvents.clear();
verifyEvents(consumer, expectedEvents);
} finally {
consumer.close();
}
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class StreamConsumerTestBase method testFIFORollback.
@Test
public void testFIFORollback() throws Exception {
String stream = "testFIFORollback";
StreamId streamId = TEST_NAMESPACE.stream(stream);
StreamAdmin streamAdmin = getStreamAdmin();
streamAdmin.create(streamId);
StreamConfig streamConfig = streamAdmin.getConfig(streamId);
// Writes 5 events
writeEvents(streamConfig, "Testing ", 5);
streamAdmin.configureInstances(streamId, 0L, 2);
StreamConsumerFactory consumerFactory = getConsumerFactory();
StreamConsumer consumer0 = consumerFactory.create(streamId, "fifo.rollback", new ConsumerConfig(0L, 0, 2, DequeueStrategy.FIFO, null));
StreamConsumer consumer1 = consumerFactory.create(streamId, "fifo.rollback", new ConsumerConfig(0L, 1, 2, DequeueStrategy.FIFO, null));
// Try to dequeue using both consumers
TransactionContext context0 = createTxContext(consumer0);
TransactionContext context1 = createTxContext(consumer1);
context0.start();
context1.start();
DequeueResult<StreamEvent> result0 = consumer0.poll(1, 1, TimeUnit.SECONDS);
DequeueResult<StreamEvent> result1 = consumer1.poll(1, 1, TimeUnit.SECONDS);
Assert.assertEquals("Testing 0", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
Assert.assertEquals("Testing 1", Charsets.UTF_8.decode(result1.iterator().next().getBody()).toString());
// Commit the first one, rollback the second one.
context0.finish();
context1.abort();
// Dequeue again with the consuemrs
context0.start();
context1.start();
result0 = consumer0.poll(1, 1, TimeUnit.SECONDS);
result1 = consumer1.poll(1, 1, TimeUnit.SECONDS);
// Expect consumer 0 keep proceeding while consumer 1 will retry with what it claimed in previous transaction.
// This is the optimization in FIFO mode to avoid going back and rescanning.
Assert.assertEquals("Testing 2", Charsets.UTF_8.decode(result0.iterator().next().getBody()).toString());
Assert.assertEquals("Testing 1", Charsets.UTF_8.decode(result1.iterator().next().getBody()).toString());
// Commit both
context0.finish();
context1.finish();
consumer0.close();
consumer1.close();
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class StreamTailer method main.
public static void main(String[] args) throws Exception {
if (args.length < 1) {
System.out.println(String.format("Usage: java %s [streamName]", StreamTailer.class.getName()));
return;
}
String streamName = args[0];
CConfiguration cConf = CConfiguration.create();
Configuration hConf = new Configuration();
String txClientId = StreamTailer.class.getName();
Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), new DataFabricModules(txClientId).getDistributedModules(), new DataSetsModules().getDistributedModules(), new LocationRuntimeModule().getDistributedModules(), new ExploreClientModule(), new ViewAdminModules().getDistributedModules(), new StreamAdminModules().getDistributedModules(), new AuthorizationEnforcementModule().getDistributedModules(), new AuthenticationContextModules().getMasterModule(), new NotificationFeedClientModule());
StreamAdmin streamAdmin = injector.getInstance(StreamAdmin.class);
//TODO: get namespace from commandline arguments
StreamId streamId = NamespaceId.DEFAULT.stream(streamName);
StreamConfig streamConfig = streamAdmin.getConfig(streamId);
Location streamLocation = streamConfig.getLocation();
List<Location> eventFiles = Lists.newArrayList();
for (Location partition : streamLocation.list()) {
if (!partition.isDirectory()) {
continue;
}
for (Location file : partition.list()) {
if (StreamFileType.EVENT.isMatched(file.getName())) {
eventFiles.add(file);
}
}
}
int generation = StreamUtils.getGeneration(streamConfig);
MultiLiveStreamFileReader reader = new MultiLiveStreamFileReader(streamConfig, ImmutableList.copyOf(Iterables.transform(eventFiles, createOffsetConverter(generation))));
List<StreamEvent> events = Lists.newArrayList();
while (reader.read(events, 10, 100, TimeUnit.MILLISECONDS) >= 0) {
for (StreamEvent event : events) {
System.out.println(event.getTimestamp() + " " + Charsets.UTF_8.decode(event.getBody()));
}
events.clear();
}
reader.close();
}
use of co.cask.cdap.data2.transaction.stream.StreamConfig in project cdap by caskdata.
the class LocalStreamService method initialize.
@Override
protected void initialize() throws Exception {
for (Map.Entry<NamespaceId, StreamSpecification> streamSpecEntry : streamMetaStore.listStreams().entries()) {
StreamId streamId = streamSpecEntry.getKey().stream(streamSpecEntry.getValue().getName());
StreamConfig config;
try {
config = streamAdmin.getConfig(streamId);
} catch (FileNotFoundException e) {
// TODO: this kind of inconsistency should not happen. [CDAP-5722]
LOG.warn("Inconsistent stream state: Stream '{}' exists in meta store " + "but its configuration file does not exist", streamId);
continue;
} catch (Exception e) {
LOG.warn("Inconsistent stream state: Stream '{}' exists in meta store " + "but its configuration cannot be read:", streamId, e);
continue;
}
long eventsSizes = getStreamEventsSize(streamId);
createSizeAggregator(streamId, eventsSizes, config.getNotificationThresholdMB());
}
}
Aggregations