use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class DequeueFilter method filterRowCells.
@Override
public void filterRowCells(List<Cell> cells) {
byte[] dataBytes = null;
byte[] metaBytes = null;
byte[] stateBytes = null;
// list is very short so it is ok to loop thru to find columns
for (Cell cell : cells) {
if (CellUtil.matchingQualifier(cell, QueueEntryRow.DATA_COLUMN)) {
dataBytes = CellUtil.cloneValue(cell);
} else if (CellUtil.matchingQualifier(cell, QueueEntryRow.META_COLUMN)) {
metaBytes = CellUtil.cloneValue(cell);
} else if (CellUtil.matchingQualifier(cell, stateColumnName)) {
stateBytes = CellUtil.cloneValue(cell);
}
}
if (dataBytes == null || metaBytes == null) {
skipRow = true;
return;
}
QueueEntryRow.CanConsume canConsume = QueueEntryRow.canConsume(consumerConfig, transaction, writePointer, counter, metaBytes, stateBytes);
// Only skip the row when canConsumer == NO, so that in case of NO_INCLUDING_ALL_OLDER, the client
// can still see the row and move the scan start row.
skipRow = canConsume == QueueEntryRow.CanConsume.NO;
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class DequeueFilter method filterRowCells.
@Override
public void filterRowCells(List<Cell> cells) {
byte[] dataBytes = null;
byte[] metaBytes = null;
byte[] stateBytes = null;
// list is very short so it is ok to loop thru to find columns
for (Cell cell : cells) {
if (CellUtil.matchingQualifier(cell, QueueEntryRow.DATA_COLUMN)) {
dataBytes = CellUtil.cloneValue(cell);
} else if (CellUtil.matchingQualifier(cell, QueueEntryRow.META_COLUMN)) {
metaBytes = CellUtil.cloneValue(cell);
} else if (CellUtil.matchingQualifier(cell, stateColumnName)) {
stateBytes = CellUtil.cloneValue(cell);
}
}
if (dataBytes == null || metaBytes == null) {
skipRow = true;
return;
}
QueueEntryRow.CanConsume canConsume = QueueEntryRow.canConsume(consumerConfig, transaction, writePointer, counter, metaBytes, stateBytes);
// Only skip the row when canConsumer == NO, so that in case of NO_INCLUDING_ALL_OLDER, the client
// can still see the row and move the scan start row.
skipRow = canConsume == QueueEntryRow.CanConsume.NO;
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class StreamConsumerTestBase method testFIFOReconfigure.
@Test
public void testFIFOReconfigure() throws Exception {
String stream = "testReconfigure";
StreamId streamId = TEST_NAMESPACE.stream(stream);
StreamAdmin streamAdmin = getStreamAdmin();
streamAdmin.create(streamId);
StreamConfig streamConfig = streamAdmin.getConfig(streamId);
// Writes 5 events
writeEvents(streamConfig, "Testing ", 5);
// Configure 3 consumers.
streamAdmin.configureInstances(streamId, 0L, 3);
StreamConsumerFactory consumerFactory = getConsumerFactory();
// Starts three consumers
List<StreamConsumer> consumers = Lists.newArrayList();
for (int i = 0; i < 3; i++) {
consumers.add(consumerFactory.create(streamId, "fifo.reconfigure", new ConsumerConfig(0L, i, 3, DequeueStrategy.FIFO, null)));
}
List<TransactionContext> txContexts = Lists.newArrayList();
for (StreamConsumer consumer : consumers) {
txContexts.add(createTxContext(consumer));
}
for (TransactionContext txContext : txContexts) {
txContext.start();
}
// Consumer an item from each consumer, but only have the first one commit.
for (int i = 0; i < consumers.size(); i++) {
DequeueResult<StreamEvent> result = consumers.get(i).poll(1, 1, TimeUnit.SECONDS);
Assert.assertEquals("Testing " + i, Charsets.UTF_8.decode(result.iterator().next().getBody()).toString());
if (i == 0) {
txContexts.get(i).finish();
} else {
txContexts.get(i).abort();
}
}
for (StreamConsumer consumer : consumers) {
consumer.close();
}
// Reconfigure to have two consumers.
streamAdmin.configureInstances(streamId, 0L, 2);
consumers.clear();
for (int i = 0; i < 2; i++) {
consumers.add(consumerFactory.create(streamId, "fifo.reconfigure", new ConsumerConfig(0L, i, 2, DequeueStrategy.FIFO, null)));
}
txContexts.clear();
for (StreamConsumer consumer : consumers) {
txContexts.add(createTxContext(consumer));
}
// Consumer an item from each consumer, they should see all four items.
Set<String> messages = Sets.newTreeSet();
boolean done;
do {
for (TransactionContext txContext : txContexts) {
txContext.start();
}
done = true;
for (int i = 0; i < consumers.size(); i++) {
DequeueResult<StreamEvent> result = consumers.get(i).poll(1, 1, TimeUnit.SECONDS);
if (result.isEmpty()) {
continue;
}
done = false;
messages.add(Charsets.UTF_8.decode(result.iterator().next().getBody()).toString());
txContexts.get(i).finish();
}
} while (!done);
Assert.assertEquals(4, messages.size());
int count = 1;
for (String msg : messages) {
Assert.assertEquals("Testing " + count, msg);
count++;
}
for (StreamConsumer consumer : consumers) {
consumer.close();
}
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class StreamConsumerTestBase method testTTLMultipleEventsWithSameTimestamp.
@Test
public void testTTLMultipleEventsWithSameTimestamp() throws Exception {
String stream = "testTTLMultipleEventsWithSameTimestamp";
StreamId streamId = TEST_NAMESPACE.stream(stream);
StreamAdmin streamAdmin = getStreamAdmin();
// Create stream with ttl of 1 day
final long ttl = TimeUnit.DAYS.toMillis(1);
final long currentTime = System.currentTimeMillis();
final long increment = TimeUnit.SECONDS.toMillis(1);
final long approxEarliestNonExpiredTime = currentTime - TimeUnit.HOURS.toMillis(1);
Properties streamProperties = new Properties();
streamProperties.setProperty(Constants.Stream.TTL, Long.toString(ttl));
streamProperties.setProperty(Constants.Stream.PARTITION_DURATION, Long.toString(ttl));
streamAdmin.create(streamId, streamProperties);
StreamConfig streamConfig = streamAdmin.getConfig(streamId);
streamAdmin.configureInstances(streamId, 0L, 1);
StreamConsumerFactory consumerFactory = getConsumerFactory();
Assert.assertEquals(ttl, streamConfig.getTTL());
Assert.assertEquals(ttl, streamConfig.getPartitionDuration());
// Write 100 expired messages to stream with expired timestamp
writeEvents(streamConfig, "Old event ", 10, new ConstantClock(0));
// Write 500 non-expired messages to stream with timestamp approxEarliestNonExpiredTime..currentTime
Set<StreamEvent> expectedEvents = Sets.newTreeSet(STREAM_EVENT_COMPARATOR);
FileWriter<StreamEvent> writer = getFileWriterFactory().create(streamConfig, 0);
try {
expectedEvents.addAll(writeEvents(writer, "New event pre-flush ", 20, new IncrementingClock(approxEarliestNonExpiredTime, increment, 5)));
writer.flush();
expectedEvents.addAll(writeEvents(writer, "New event post-flush ", 20, new IncrementingClock(approxEarliestNonExpiredTime + 1, increment, 5)));
} finally {
writer.close();
}
StreamConsumer consumer = consumerFactory.create(streamId, stream, new ConsumerConfig(0L, 0, 1, DequeueStrategy.FIFO, null));
verifyEvents(consumer, expectedEvents);
TransactionContext txContext = createTxContext(consumer);
txContext.start();
try {
// Should be no more pending events
DequeueResult<StreamEvent> result = consumer.poll(1, 1, TimeUnit.SECONDS);
Assert.assertTrue(result.isEmpty());
} finally {
txContext.finish();
}
consumer.close();
}
use of co.cask.cdap.data2.queue.ConsumerConfig in project cdap by caskdata.
the class StreamConsumerTestBase method testTTL.
@Test
public void testTTL() throws Exception {
String stream = "testTTL";
StreamId streamId = TEST_NAMESPACE.stream(stream);
StreamAdmin streamAdmin = getStreamAdmin();
// Create stream with ttl of 1 day
final long ttl = TimeUnit.DAYS.toMillis(1);
final long currentTime = System.currentTimeMillis();
final long increment = TimeUnit.SECONDS.toMillis(1);
final long approxEarliestNonExpiredTime = currentTime - TimeUnit.HOURS.toMillis(1);
Properties streamProperties = new Properties();
streamProperties.setProperty(Constants.Stream.TTL, Long.toString(ttl));
streamProperties.setProperty(Constants.Stream.PARTITION_DURATION, Long.toString(ttl));
streamAdmin.create(streamId, streamProperties);
StreamConfig streamConfig = streamAdmin.getConfig(streamId);
streamAdmin.configureInstances(streamId, 0L, 1);
StreamConsumerFactory consumerFactory = getConsumerFactory();
Assert.assertEquals(ttl, streamConfig.getTTL());
Assert.assertEquals(ttl, streamConfig.getPartitionDuration());
Set<StreamEvent> expectedEvents = Sets.newTreeSet(STREAM_EVENT_COMPARATOR);
FileWriter<StreamEvent> writer = getFileWriterFactory().create(streamConfig, 0);
try {
// Write 10 expired messages
writeEvents(streamConfig, "Old event ", 20, new IncrementingClock(0, 1));
// Write 5 non-expired messages
expectedEvents.addAll(writeEvents(streamConfig, "New event ", 12, new IncrementingClock(approxEarliestNonExpiredTime, increment)));
} finally {
writer.close();
}
// Dequeue from stream. Should only get the 5 unexpired events.
StreamConsumer consumer = consumerFactory.create(streamId, stream, new ConsumerConfig(0L, 0, 1, DequeueStrategy.FIFO, null));
try {
verifyEvents(consumer, expectedEvents);
TransactionContext txContext = createTxContext(consumer);
txContext.start();
try {
// Should be no more pending events
DequeueResult<StreamEvent> result = consumer.poll(1, 2, TimeUnit.SECONDS);
Assert.assertTrue(result.isEmpty());
} finally {
txContext.finish();
}
} finally {
consumer.close();
}
}
Aggregations