use of co.cask.cdap.messaging.TopicMetadata in project cdap by caskdata.
the class ConcurrentMessageWriterTest method testMaxSequence.
@Test
public void testMaxSequence() throws IOException {
// This test the case when a single StoreRequest has more than SEQUENCE_ID_LIMIT (65536) payload.
// Expected entries beyond the max seqId will be rolled to the next timestamp with seqId reset to start from 0
// Generate SEQUENCE_ID_LIMIT + 1 payloads
int msgCount = StoreRequestWriter.SEQUENCE_ID_LIMIT + 1;
List<String> payloads = new ArrayList<>(msgCount);
for (int i = 0; i < msgCount; i++) {
payloads.add(Integer.toString(i));
}
TopicId topicId = new NamespaceId("ns1").topic("t1");
TopicMetadata metadata = new TopicMetadata(topicId, new HashMap<String, String>(), 1);
// Write the payloads
TestStoreRequestWriter testWriter = new TestStoreRequestWriter(new TimeProvider.IncrementalTimeProvider());
ConcurrentMessageWriter writer = new ConcurrentMessageWriter(testWriter);
writer.persist(new TestStoreRequest(topicId, payloads), metadata);
List<RawMessage> messages = testWriter.getMessages().get(topicId);
Assert.assertEquals(msgCount, messages.size());
// The first SEQUENCE_ID_LIMIT messages should be with the same timestamp, with seqId from 0 to SEQUENCE_ID_LIMIT
for (int i = 0; i < StoreRequestWriter.SEQUENCE_ID_LIMIT; i++) {
MessageId id = new MessageId(messages.get(i).getId());
Assert.assertEquals(0L, id.getPublishTimestamp());
Assert.assertEquals((short) i, id.getSequenceId());
}
// The (SEQUENCE_ID_LIMIT + 1)th message should have a different timestamp and seqId = 0
MessageId id = new MessageId(messages.get(msgCount - 1).getId());
Assert.assertEquals(1L, id.getPublishTimestamp());
Assert.assertEquals(0, id.getPayloadSequenceId());
}
use of co.cask.cdap.messaging.TopicMetadata in project cdap by caskdata.
the class ConcurrentMessageWriterTest method testBasic.
@Test
public void testBasic() throws IOException {
TopicId topicId1 = new NamespaceId("ns1").topic("t1");
TopicId topicId2 = new NamespaceId("ns2").topic("t2");
TopicMetadata metadata1 = new TopicMetadata(topicId1, new HashMap<String, String>(), 1);
TopicMetadata metadata2 = new TopicMetadata(topicId2, new HashMap<String, String>(), 1);
TestStoreRequestWriter testWriter = new TestStoreRequestWriter(new TimeProvider.IncrementalTimeProvider());
ConcurrentMessageWriter writer = new ConcurrentMessageWriter(testWriter);
writer.persist(new TestStoreRequest(topicId1, Arrays.asList("1", "2", "3")), metadata1);
// There should be 3 messages being written
List<RawMessage> messages = testWriter.getMessages().get(topicId1);
Assert.assertEquals(3, messages.size());
// All messages should be written with timestamp 0
List<String> payloads = new ArrayList<>();
for (RawMessage message : messages) {
Assert.assertEquals(0L, new MessageId(message.getId()).getPublishTimestamp());
payloads.add(Bytes.toString(message.getPayload()));
}
Assert.assertEquals(Arrays.asList("1", "2", "3"), payloads);
// Write to another topic
writer.persist(new TestStoreRequest(topicId2, Arrays.asList("a", "b", "c")), metadata2);
// There should be 3 messages being written to topic2
messages = testWriter.getMessages().get(topicId2);
Assert.assertEquals(3, messages.size());
// All messages should be written with timestamp 1
payloads.clear();
for (RawMessage message : messages) {
Assert.assertEquals(1L, new MessageId(message.getId()).getPublishTimestamp());
payloads.add(Bytes.toString(message.getPayload()));
}
Assert.assertEquals(Arrays.asList("a", "b", "c"), payloads);
}
use of co.cask.cdap.messaging.TopicMetadata in project cdap by caskdata.
the class ConcurrentMessageWriterTest method testConcurrentWrites.
@Test
public void testConcurrentWrites() throws InterruptedException, BrokenBarrierException {
int payloadsPerRequest = 200;
int threadCount = 20;
final int requestPerThread = 20;
long writeLatencyMillis = 50L;
final TopicId topicId = NamespaceId.DEFAULT.topic("t");
final TopicMetadata metadata = new TopicMetadata(topicId, new HashMap<String, String>(), 1);
TestStoreRequestWriter testWriter = new TestStoreRequestWriter(new TimeProvider.IncrementalTimeProvider(), writeLatencyMillis);
final ConcurrentMessageWriter writer = new ConcurrentMessageWriter(testWriter);
final List<String> payload = new ArrayList<>(payloadsPerRequest);
for (int i = 0; i < payloadsPerRequest; i++) {
payload.add(Integer.toString(i));
}
ExecutorService executor = Executors.newFixedThreadPool(threadCount);
final CyclicBarrier barrier = new CyclicBarrier(threadCount + 1);
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
executor.submit(new Runnable() {
@Override
public void run() {
Stopwatch stopwatch = new Stopwatch();
try {
barrier.await();
stopwatch.start();
for (int i = 0; i < requestPerThread; i++) {
writer.persist(new TestStoreRequest(topicId, payload), metadata);
}
LOG.info("Complete time for thread {} is {} ms", threadId, stopwatch.elapsedMillis());
} catch (Exception e) {
LOG.error("Exception raised when persisting.", e);
}
}
});
}
Stopwatch stopwatch = new Stopwatch();
barrier.await();
stopwatch.start();
executor.shutdown();
Assert.assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
LOG.info("Total time passed: {} ms", stopwatch.elapsedMillis());
// Validate that the total number of messages written is correct
List<RawMessage> messages = testWriter.getMessages().get(topicId);
Assert.assertEquals(payloadsPerRequest * threadCount * requestPerThread, messages.size());
// The message id must be sorted
RawMessage lastMessage = null;
for (RawMessage message : messages) {
if (lastMessage != null) {
Assert.assertTrue(Bytes.compareTo(lastMessage.getId(), message.getId()) < 0);
}
lastMessage = message;
}
}
use of co.cask.cdap.messaging.TopicMetadata in project cdap by caskdata.
the class LevelDBMetadataTable method scanTopics.
private CloseableIterator<TopicMetadata> scanTopics(@Nullable byte[] startKey, @Nullable byte[] stopKey) throws IOException {
final CloseableIterator<Map.Entry<byte[], byte[]>> iterator = new DBScanIterator(levelDB, startKey, stopKey);
return new AbstractCloseableIterator<TopicMetadata>() {
private boolean closed = false;
@Override
protected TopicMetadata computeNext() {
if (closed || (!iterator.hasNext())) {
return endOfData();
}
Map.Entry<byte[], byte[]> entry = iterator.next();
TopicId topicId = MessagingUtils.toTopicId(entry.getKey());
Map<String, String> properties = GSON.fromJson(Bytes.toString(entry.getValue()), MAP_TYPE);
return new TopicMetadata(topicId, properties);
}
@Override
public void close() {
try {
iterator.close();
} finally {
endOfData();
closed = true;
}
}
};
}
use of co.cask.cdap.messaging.TopicMetadata in project cdap by caskdata.
the class LevelDBMetadataTable method deleteTopic.
@Override
public void deleteTopic(TopicId topicId) throws TopicNotFoundException, IOException {
byte[] rowKey = MessagingUtils.toMetadataRowKey(topicId);
try {
synchronized (this) {
byte[] tableValue = levelDB.get(rowKey);
if (tableValue == null) {
throw new TopicNotFoundException(topicId.getNamespace(), topicId.getTopic());
}
Map<String, String> oldProperties = GSON.fromJson(Bytes.toString(tableValue), MAP_TYPE);
TopicMetadata metadata = new TopicMetadata(topicId, oldProperties);
if (!metadata.exists()) {
throw new TopicNotFoundException(topicId.getNamespace(), topicId.getTopic());
}
// Mark the topic as deleted
TreeMap<String, String> newProperties = new TreeMap<>(metadata.getProperties());
newProperties.put(TopicMetadata.GENERATION_KEY, Integer.toString(-1 * metadata.getGeneration()));
levelDB.put(rowKey, Bytes.toBytes(GSON.toJson(newProperties, MAP_TYPE)), WRITE_OPTIONS);
}
} catch (DBException e) {
throw new IOException(e);
}
}
Aggregations