use of co.cask.cdap.messaging.data.RawMessage in project cdap by caskdata.
the class MessagingHttpServiceTest method testTxMaxLifeTime.
@Test
public void testTxMaxLifeTime() throws Exception {
NamespaceId nsId = new NamespaceId("txCheck");
TopicId topic1 = nsId.topic("t1");
// Create a topic
client.createTopic(new TopicMetadata(topic1));
final RollbackDetail rollbackDetail = client.publish(StoreRequestBuilder.of(topic1).setTransaction(1L).addPayloads("a", "b").build());
try {
client.publish(StoreRequestBuilder.of(topic1).setTransaction(-Long.MAX_VALUE).addPayloads("c", "d").build());
Assert.fail("Expected IOException");
} catch (IOException ex) {
// expected
}
Set<String> msgs = new HashSet<>();
CloseableIterator<RawMessage> messages = client.prepareFetch(topic1).fetch();
while (messages.hasNext()) {
RawMessage message = messages.next();
msgs.add(Bytes.toString(message.getPayload()));
}
Assert.assertEquals(2, msgs.size());
Assert.assertTrue(msgs.contains("a"));
Assert.assertTrue(msgs.contains("b"));
messages.close();
client.rollback(topic1, rollbackDetail);
client.deleteTopic(topic1);
}
use of co.cask.cdap.messaging.data.RawMessage in project cdap by caskdata.
the class MessagingMetricsCollectionServiceTest method assertMetricsFromMessaging.
private void assertMetricsFromMessaging(final Schema schema, ReflectionDatumReader recordReader, Table<String, String, Long> expected) throws InterruptedException, TopicNotFoundException, IOException {
// Consume from kafka
final Map<String, MetricValues> metrics = Maps.newHashMap();
ByteBufferInputStream is = new ByteBufferInputStream(null);
for (int i = 0; i < PARTITION_SIZE; i++) {
TopicId topicId = NamespaceId.SYSTEM.topic(TOPIC_PREFIX + i);
try (CloseableIterator<RawMessage> iterator = messagingService.prepareFetch(topicId).fetch()) {
while (iterator.hasNext()) {
RawMessage message = iterator.next();
MetricValues metricsRecord = (MetricValues) recordReader.read(new BinaryDecoder(is.reset(ByteBuffer.wrap(message.getPayload()))), schema);
StringBuilder flattenContext = new StringBuilder();
// for verifying expected results, sorting tags
Map<String, String> tags = Maps.newTreeMap();
tags.putAll(metricsRecord.getTags());
for (Map.Entry<String, String> tag : tags.entrySet()) {
flattenContext.append(tag.getKey()).append(".").append(tag.getValue()).append(".");
}
// removing trailing "."
if (flattenContext.length() > 0) {
flattenContext.deleteCharAt(flattenContext.length() - 1);
}
metrics.put(flattenContext.toString(), metricsRecord);
}
} catch (IOException e) {
LOG.info("Failed to decode message to MetricValue. Skipped. {}", e.getMessage());
}
}
Assert.assertEquals(expected.rowKeySet().size(), metrics.size());
checkReceivedMetrics(expected, metrics);
}
use of co.cask.cdap.messaging.data.RawMessage in project cdap by caskdata.
the class FetchHandler method poll.
@POST
@Path("poll")
public void poll(HttpRequest request, HttpResponder responder, @PathParam("namespace") String namespace, @PathParam("topic") String topic) throws Exception {
TopicId topicId = new NamespaceId(namespace).topic(topic);
// Currently only support avro
if (!"avro/binary".equals(request.getHeader(HttpHeaders.Names.CONTENT_TYPE))) {
throw new BadRequestException("Only avro/binary content type is supported.");
}
// Decode the poll request
Decoder decoder = DecoderFactory.get().directBinaryDecoder(new ChannelBufferInputStream(request.getContent()), null);
DatumReader<GenericRecord> datumReader = new GenericDatumReader<>(Schemas.V1.ConsumeRequest.SCHEMA);
// Fetch the messages
CloseableIterator<RawMessage> iterator = fetchMessages(datumReader.read(null, decoder), topicId);
try {
responder.sendContent(HttpResponseStatus.OK, new MessagesBodyProducer(iterator, messageChunkSize), ImmutableMultimap.of(HttpHeaders.Names.CONTENT_TYPE, "avro/binary"));
} catch (Throwable t) {
iterator.close();
throw t;
}
}
use of co.cask.cdap.messaging.data.RawMessage in project cdap by caskdata.
the class ConcurrentMessageWriterTest method testMaxSequence.
@Test
public void testMaxSequence() throws IOException {
// This test the case when a single StoreRequest has more than SEQUENCE_ID_LIMIT (65536) payload.
// Expected entries beyond the max seqId will be rolled to the next timestamp with seqId reset to start from 0
// Generate SEQUENCE_ID_LIMIT + 1 payloads
int msgCount = StoreRequestWriter.SEQUENCE_ID_LIMIT + 1;
List<String> payloads = new ArrayList<>(msgCount);
for (int i = 0; i < msgCount; i++) {
payloads.add(Integer.toString(i));
}
TopicId topicId = new NamespaceId("ns1").topic("t1");
TopicMetadata metadata = new TopicMetadata(topicId, new HashMap<String, String>(), 1);
// Write the payloads
TestStoreRequestWriter testWriter = new TestStoreRequestWriter(new TimeProvider.IncrementalTimeProvider());
ConcurrentMessageWriter writer = new ConcurrentMessageWriter(testWriter);
writer.persist(new TestStoreRequest(topicId, payloads), metadata);
List<RawMessage> messages = testWriter.getMessages().get(topicId);
Assert.assertEquals(msgCount, messages.size());
// The first SEQUENCE_ID_LIMIT messages should be with the same timestamp, with seqId from 0 to SEQUENCE_ID_LIMIT
for (int i = 0; i < StoreRequestWriter.SEQUENCE_ID_LIMIT; i++) {
MessageId id = new MessageId(messages.get(i).getId());
Assert.assertEquals(0L, id.getPublishTimestamp());
Assert.assertEquals((short) i, id.getSequenceId());
}
// The (SEQUENCE_ID_LIMIT + 1)th message should have a different timestamp and seqId = 0
MessageId id = new MessageId(messages.get(msgCount - 1).getId());
Assert.assertEquals(1L, id.getPublishTimestamp());
Assert.assertEquals(0, id.getPayloadSequenceId());
}
use of co.cask.cdap.messaging.data.RawMessage in project cdap by caskdata.
the class ConcurrentMessageWriterTest method testBasic.
@Test
public void testBasic() throws IOException {
TopicId topicId1 = new NamespaceId("ns1").topic("t1");
TopicId topicId2 = new NamespaceId("ns2").topic("t2");
TopicMetadata metadata1 = new TopicMetadata(topicId1, new HashMap<String, String>(), 1);
TopicMetadata metadata2 = new TopicMetadata(topicId2, new HashMap<String, String>(), 1);
TestStoreRequestWriter testWriter = new TestStoreRequestWriter(new TimeProvider.IncrementalTimeProvider());
ConcurrentMessageWriter writer = new ConcurrentMessageWriter(testWriter);
writer.persist(new TestStoreRequest(topicId1, Arrays.asList("1", "2", "3")), metadata1);
// There should be 3 messages being written
List<RawMessage> messages = testWriter.getMessages().get(topicId1);
Assert.assertEquals(3, messages.size());
// All messages should be written with timestamp 0
List<String> payloads = new ArrayList<>();
for (RawMessage message : messages) {
Assert.assertEquals(0L, new MessageId(message.getId()).getPublishTimestamp());
payloads.add(Bytes.toString(message.getPayload()));
}
Assert.assertEquals(Arrays.asList("1", "2", "3"), payloads);
// Write to another topic
writer.persist(new TestStoreRequest(topicId2, Arrays.asList("a", "b", "c")), metadata2);
// There should be 3 messages being written to topic2
messages = testWriter.getMessages().get(topicId2);
Assert.assertEquals(3, messages.size());
// All messages should be written with timestamp 1
payloads.clear();
for (RawMessage message : messages) {
Assert.assertEquals(1L, new MessageId(message.getId()).getPublishTimestamp());
payloads.add(Bytes.toString(message.getPayload()));
}
Assert.assertEquals(Arrays.asList("a", "b", "c"), payloads);
}
Aggregations