use of co.cask.cdap.messaging.TopicMetadata in project cdap by caskdata.
the class MessagingHttpServiceTest method testGeMetadata.
@Test
public void testGeMetadata() throws Exception {
TopicId topicId = new NamespaceId("ns2").topic("d");
TopicMetadata metadata = new TopicMetadata(topicId, "ttl", "100");
for (int i = 1; i <= 5; i++) {
client.createTopic(metadata);
TopicMetadata topicMetadata = client.getTopic(topicId);
Assert.assertEquals(100, topicMetadata.getTTL());
Assert.assertEquals(i, topicMetadata.getGeneration());
client.deleteTopic(topicId);
}
}
use of co.cask.cdap.messaging.TopicMetadata in project cdap by caskdata.
the class MessagingHttpServiceTest method testDeletes.
@Test
public void testDeletes() throws Exception {
TopicId topicId = new NamespaceId("ns1").topic("del");
TopicMetadata metadata = new TopicMetadata(topicId, "ttl", "100");
for (int j = 0; j < 10; j++) {
client.createTopic(metadata);
String m1 = String.format("m%d", j);
String m2 = String.format("m%d", j + 1);
Assert.assertNull(client.publish(StoreRequestBuilder.of(topicId).addPayloads(m1, m2).build()));
// Fetch messages non-transactionally
List<RawMessage> messages = new ArrayList<>();
try (CloseableIterator<RawMessage> iterator = client.prepareFetch(topicId).fetch()) {
Iterators.addAll(messages, iterator);
}
Assert.assertEquals(2, messages.size());
Set<String> receivedMessages = new HashSet<>();
for (RawMessage message : messages) {
receivedMessages.add(Bytes.toString(message.getPayload()));
}
Assert.assertTrue(receivedMessages.contains(m1));
Assert.assertTrue(receivedMessages.contains(m2));
client.deleteTopic(topicId);
}
}
use of co.cask.cdap.messaging.TopicMetadata in project cdap by caskdata.
the class MessagingHttpServiceTest method testMetadataEndpoints.
@Test
public void testMetadataEndpoints() throws Exception {
NamespaceId nsId = new NamespaceId("metadata");
TopicId topic1 = nsId.topic("t1");
TopicId topic2 = nsId.topic("t2");
// Get a non exist topic should fail
try {
client.getTopic(topic1);
Assert.fail("Expected TopicNotFoundException");
} catch (TopicNotFoundException e) {
// Expected
}
// Create the topic t1
client.createTopic(new TopicMetadata(topic1));
// Create an existing topic should fail
try {
client.createTopic(new TopicMetadata(topic1));
Assert.fail("Expect TopicAlreadyExistsException");
} catch (TopicAlreadyExistsException e) {
// Expected
}
// Get the topic properties. Verify TTL is the same as the default one
Assert.assertEquals(cConf.getInt(Constants.MessagingSystem.TOPIC_DEFAULT_TTL_SECONDS), client.getTopic(topic1).getTTL());
// Update the topic t1 with new TTL
client.updateTopic(new TopicMetadata(topic1, "ttl", "5"));
// Get the topic t1 properties. Verify TTL is updated
Assert.assertEquals(5, client.getTopic(topic1).getTTL());
// Try to add another topic t2 with invalid ttl, it should fail
try {
client.createTopic(new TopicMetadata(topic2, "ttl", "xyz"));
Assert.fail("Expect BadRequestException");
} catch (IllegalArgumentException e) {
// Expected
}
// Add topic t2 with valid ttl
client.createTopic(new TopicMetadata(topic2, "ttl", "5"));
// Get the topic t2 properties. It should have TTL set based on what provided
Assert.assertEquals(5, client.getTopic(topic2).getTTL());
// Listing topics under namespace ns1
List<TopicId> topics = client.listTopics(nsId);
Assert.assertEquals(Arrays.asList(topic1, topic2), topics);
// Delete both topics
client.deleteTopic(topic1);
client.deleteTopic(topic2);
// Delete a non exist topic should fail
try {
client.deleteTopic(topic1);
Assert.fail("Expect TopicNotFoundException");
} catch (TopicNotFoundException e) {
// Expected
}
// Update a non exist topic should fail
try {
client.updateTopic(new TopicMetadata(topic1));
Assert.fail("Expect TopicNotFoundException");
} catch (TopicNotFoundException e) {
// Expected
}
// Listing topics under namespace ns1 again, it should be empty
Assert.assertTrue(client.listTopics(nsId).isEmpty());
}
use of co.cask.cdap.messaging.TopicMetadata in project cdap by caskdata.
the class ConcurrentMessageWriterTest method testMultiMaxSequence.
@Test
public void testMultiMaxSequence() throws IOException, InterruptedException {
TopicId topicId = new NamespaceId("ns1").topic("t1");
final TopicMetadata metadata = new TopicMetadata(topicId, new HashMap<String, String>(), 1);
// This test the case when multiple StoreRequests combined exceeding the 65536 payload.
// See testMaxSequence() for more details when it matters
// Generate 3 StoreRequests, each with 43690 messages
int msgCount = StoreRequestWriter.SEQUENCE_ID_LIMIT / 3 * 2;
int requestCount = 3;
List<StoreRequest> requests = new ArrayList<>();
for (int i = 0; i < requestCount; i++) {
List<String> payloads = new ArrayList<>(msgCount);
for (int j = 0; j < msgCount; j++) {
payloads.add(Integer.toString(j));
}
requests.add(new TestStoreRequest(topicId, payloads));
}
TestStoreRequestWriter testWriter = new TestStoreRequestWriter(new TimeProvider.IncrementalTimeProvider());
// We use a custom metrics collector here to make all the persist calls reached the same latch,
// since we know that the ConcurrentMessageWriter will emit a metrics "persist.requested" after enqueued but
// before flushing.
// This will make all requests batched together
final CountDownLatch latch = new CountDownLatch(requestCount);
final ConcurrentMessageWriter writer = new ConcurrentMessageWriter(testWriter, new MetricsCollector() {
@Override
public void increment(String metricName, long value) {
if ("persist.requested".equals(metricName)) {
latch.countDown();
Uninterruptibles.awaitUninterruptibly(latch);
}
}
@Override
public void gauge(String metricName, long value) {
LOG.info("MetricsContext.gauge: {} = {}", metricName, value);
}
});
ExecutorService executor = Executors.newFixedThreadPool(3);
for (final StoreRequest request : requests) {
executor.submit(new Runnable() {
@Override
public void run() {
try {
writer.persist(request, metadata);
} catch (IOException e) {
LOG.error("Failed to persist", e);
}
}
});
}
executor.shutdown();
Assert.assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
// Validates all messages are being written
List<RawMessage> messages = testWriter.getMessages().get(topicId);
Assert.assertEquals(requestCount * msgCount, messages.size());
// We expect the payload is in repeated sequence of [0..msgCount-1]
int expectedPayload = 0;
// The sequenceId should be (i % SEQUENCE_ID_LIMIT)
for (int i = 0; i < messages.size(); i++) {
RawMessage message = messages.get(i);
MessageId messageId = new MessageId(message.getId());
Assert.assertEquals(i / StoreRequestWriter.SEQUENCE_ID_LIMIT, messageId.getPublishTimestamp());
Assert.assertEquals((short) (i % StoreRequestWriter.SEQUENCE_ID_LIMIT), messageId.getSequenceId());
Assert.assertEquals(expectedPayload, Integer.parseInt(Bytes.toString(message.getPayload())));
expectedPayload = (expectedPayload + 1) % msgCount;
}
}
use of co.cask.cdap.messaging.TopicMetadata in project cdap by caskdata.
the class LevelDBMetadataTable method createTopic.
@Override
public void createTopic(TopicMetadata topicMetadata) throws TopicAlreadyExistsException, IOException {
try {
TopicId topicId = topicMetadata.getTopicId();
byte[] key = MessagingUtils.toMetadataRowKey(topicId);
TreeMap<String, String> properties = new TreeMap<>(topicMetadata.getProperties());
properties.put(TopicMetadata.GENERATION_KEY, MessagingUtils.Constants.DEFAULT_GENERATION);
synchronized (this) {
byte[] tableValue = levelDB.get(key);
if (tableValue != null) {
Map<String, String> oldProperties = GSON.fromJson(Bytes.toString(tableValue), MAP_TYPE);
TopicMetadata metadata = new TopicMetadata(topicId, oldProperties);
if (metadata.exists()) {
throw new TopicAlreadyExistsException(topicId.getNamespace(), topicId.getTopic());
}
int newGenerationId = (metadata.getGeneration() * -1) + 1;
properties.put(TopicMetadata.GENERATION_KEY, Integer.toString(newGenerationId));
}
byte[] value = Bytes.toBytes(GSON.toJson(properties, MAP_TYPE));
levelDB.put(key, value, WRITE_OPTIONS);
}
} catch (DBException e) {
throw new IOException(e);
}
}
Aggregations