use of org.apache.samza.system.eventhub.admin.PassThroughInterceptor in project samza by apache.
the class TestEventHubSystemConsumer method testNonTransientErrorRetry.
@Test
public void testNonTransientErrorRetry() throws Exception {
String systemName = "eventhubs";
String streamName = "testNonTransientErrorRetry";
// needs to be less than BLOCKING_QUEUE_SIZE
int numEvents = 10;
int partitionId = 0;
TestClock testClock = new TestClock();
TestMetricsRegistry testMetrics = new TestMetricsRegistry();
Map<SystemStreamPartition, List<EventData>> eventData = new HashMap<>();
SystemStreamPartition ssp = new SystemStreamPartition(systemName, streamName, new Partition(partitionId));
Map<String, Interceptor> interceptors = new HashMap<>();
interceptors.put(streamName, new PassThroughInterceptor());
// create EventData
List<EventData> singlePartitionEventData = MockEventData.generateEventData(numEvents);
eventData.put(ssp, singlePartitionEventData);
// Set configs
Map<String, String> configMap = new HashMap<>();
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_LIST, systemName), streamName);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_NAMESPACE, streamName), EVENTHUB_NAMESPACE);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_KEY_NAME, streamName), EVENTHUB_KEY_NAME);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_TOKEN, streamName), EVENTHUB_KEY);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_ENTITYPATH, streamName), MOCK_ENTITY_1);
configMap.put(String.format(EventHubConfig.CONFIG_MAX_RETRY_COUNT, systemName), "1");
MapConfig config = new MapConfig(configMap);
MockEventHubClientManagerFactory eventHubClientWrapperFactory = new MockEventHubClientManagerFactory(eventData);
EventHubSystemConsumer consumer = new EventHubSystemConsumer(new EventHubConfig(config), systemName, eventHubClientWrapperFactory, interceptors, testMetrics, testClock);
consumer.register(ssp, EventHubSystemConsumer.END_OF_STREAM);
consumer.start();
// 1st error should retry instead of throw
testClock.advanceTime(System.currentTimeMillis());
eventHubClientWrapperFactory.triggerError(consumer.streamPartitionHandlers, new EventHubException(false, /* is transient */
"test"));
consumer.poll(Collections.singleton(ssp), 0).get(ssp);
// assert that the reconnect task was submitted and completed eventually
Assert.assertNotNull("reconnect task should have been submitted", consumer.reconnectTaskStatus);
Future lastReconnectTask = consumer.reconnectTaskStatus;
// should return instantaneously
lastReconnectTask.get(10000, TimeUnit.MILLISECONDS);
Assert.assertEquals(consumer.recentRetryAttempts.size(), 1);
// after retry should receive events normally
testClock.advanceTime(1);
eventHubClientWrapperFactory.sendToHandlers(consumer.streamPartitionHandlers);
List<IncomingMessageEnvelope> result = consumer.poll(Collections.singleton(ssp), 0).get(ssp);
verifyEvents(result, singlePartitionEventData);
Assert.assertEquals(testMetrics.getCounters(streamName).size(), 3);
Assert.assertEquals(testMetrics.getGauges(streamName).size(), 2);
Map<String, Counter> counters = testMetrics.getCounters(streamName).stream().collect(Collectors.toMap(Counter::getName, Function.identity()));
Assert.assertEquals(counters.get(EventHubSystemConsumer.EVENT_READ_RATE).getCount(), numEvents);
// 2nd error: advance into next window, the older retry should have been evicted so this error should cause retry
testClock.advanceTime(EventHubConfig.DEFAULT_CONFIG_RETRY_WINDOW_MS + 1);
Assert.assertEquals(consumer.recentRetryAttempts.size(), 0);
eventHubClientWrapperFactory.triggerError(consumer.streamPartitionHandlers, new EventHubException(false, /* is transient */
"test"));
consumer.poll(Collections.singleton(ssp), 0).get(ssp);
Assert.assertNotNull("reconnect task should have been submitted", consumer.reconnectTaskStatus);
lastReconnectTask = consumer.reconnectTaskStatus;
// should return instantaneously
lastReconnectTask.get(10000, TimeUnit.MILLISECONDS);
Assert.assertEquals(consumer.recentRetryAttempts.size(), 1);
// 3rd error: 1 ms is within the min retry interval; so poll should do nothing
testClock.advanceTime(1);
eventHubClientWrapperFactory.triggerError(consumer.streamPartitionHandlers, new EventHubException(false, /* is transient */
"test"));
consumer.poll(Collections.singleton(ssp), 0).get(ssp);
Assert.assertEquals("there shouldn't be another retry task within min retry interval", consumer.reconnectTaskStatus, lastReconnectTask);
// 4th error: now the poll should throw
testClock.advanceTime(EventHubConfig.DEFAULT_CONFIG_RETRY_INTERVAL_MS + 1);
eventHubClientWrapperFactory.triggerError(consumer.streamPartitionHandlers, new EventHubException(false, /* is transient */
"test"));
try {
consumer.poll(Collections.singleton(ssp), 0).get(ssp);
Assert.fail("poll should have thrown");
} catch (Exception e) {
Assert.assertEquals(e.getCause().getMessage(), "test");
}
Assert.assertEquals(counters.get(EventHubSystemConsumer.READ_ERRORS).getCount(), 4);
}
use of org.apache.samza.system.eventhub.admin.PassThroughInterceptor in project samza by apache.
the class TestEventHubSystemConsumer method testSinglePartitionConsumptionHappyPath.
@Test
public void testSinglePartitionConsumptionHappyPath() throws Exception {
String systemName = "eventhubs";
String streamName = "testStream";
// needs to be less than BLOCKING_QUEUE_SIZE
int numEvents = 10;
int partitionId = 0;
TestMetricsRegistry testMetrics = new TestMetricsRegistry();
Map<SystemStreamPartition, List<EventData>> eventData = new HashMap<>();
SystemStreamPartition ssp = new SystemStreamPartition(systemName, streamName, new Partition(partitionId));
Map<String, Interceptor> interceptors = new HashMap<>();
interceptors.put(streamName, new PassThroughInterceptor());
// create EventData
List<EventData> singlePartitionEventData = MockEventData.generateEventData(numEvents);
eventData.put(ssp, singlePartitionEventData);
// Set configs
Map<String, String> configMap = new HashMap<>();
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_LIST, systemName), streamName);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_NAMESPACE, streamName), EVENTHUB_NAMESPACE);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_KEY_NAME, streamName), EVENTHUB_KEY_NAME);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_TOKEN, streamName), EVENTHUB_KEY);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_ENTITYPATH, streamName), MOCK_ENTITY_1);
MapConfig config = new MapConfig(configMap);
MockEventHubClientManagerFactory eventHubClientWrapperFactory = new MockEventHubClientManagerFactory(eventData);
EventHubSystemConsumer consumer = new EventHubSystemConsumer(new EventHubConfig(config), systemName, eventHubClientWrapperFactory, interceptors, testMetrics);
consumer.register(ssp, EventHubSystemConsumer.END_OF_STREAM);
consumer.start();
// Mock received data from EventHub
eventHubClientWrapperFactory.sendToHandlers(consumer.streamPartitionHandlers);
List<IncomingMessageEnvelope> result = consumer.poll(Collections.singleton(ssp), 1000).get(ssp);
verifyEvents(result, singlePartitionEventData);
Assert.assertEquals(testMetrics.getCounters(streamName).size(), 3);
Assert.assertEquals(testMetrics.getGauges(streamName).size(), 2);
Map<String, Counter> counters = testMetrics.getCounters(streamName).stream().collect(Collectors.toMap(Counter::getName, Function.identity()));
Assert.assertEquals(counters.get(EventHubSystemConsumer.EVENT_READ_RATE).getCount(), numEvents);
Assert.assertEquals(counters.get(EventHubSystemConsumer.READ_ERRORS).getCount(), 0);
}
use of org.apache.samza.system.eventhub.admin.PassThroughInterceptor in project samza by apache.
the class TestEventHubSystemConsumer method testMultiPartitionConsumptionHappyPath.
private void testMultiPartitionConsumptionHappyPath(boolean perPartitionConnection) throws Exception {
String systemName = "eventhubs";
String streamName = "testStream";
// needs to be less than BLOCKING_QUEUE_SIZE
int numEvents = 10;
int partitionId1 = 0;
int partitionId2 = 1;
TestMetricsRegistry testMetrics = new TestMetricsRegistry();
Map<SystemStreamPartition, List<EventData>> eventData = new HashMap<>();
SystemStreamPartition ssp1 = new SystemStreamPartition(systemName, streamName, new Partition(partitionId1));
SystemStreamPartition ssp2 = new SystemStreamPartition(systemName, streamName, new Partition(partitionId2));
Map<String, Interceptor> interceptor = new HashMap<>();
interceptor.put(streamName, new PassThroughInterceptor());
// create EventData
List<EventData> singlePartitionEventData1 = MockEventData.generateEventData(numEvents);
List<EventData> singlePartitionEventData2 = MockEventData.generateEventData(numEvents);
eventData.put(ssp1, singlePartitionEventData1);
eventData.put(ssp2, singlePartitionEventData2);
// Set configs
Map<String, String> configMap = new HashMap<>();
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_LIST, systemName), streamName);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_ENTITYPATH, streamName), MOCK_ENTITY_1);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_NAMESPACE, streamName), EVENTHUB_NAMESPACE);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_KEY_NAME, streamName), EVENTHUB_KEY_NAME);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_TOKEN, streamName), EVENTHUB_KEY);
configMap.put(String.format(EventHubConfig.CONFIG_PER_PARTITION_CONNECTION, systemName), String.valueOf(perPartitionConnection));
MapConfig config = new MapConfig(configMap);
MockEventHubClientManagerFactory eventHubClientWrapperFactory = new MockEventHubClientManagerFactory(eventData);
EventHubSystemConsumer consumer = new EventHubSystemConsumer(new EventHubConfig(config), systemName, eventHubClientWrapperFactory, interceptor, testMetrics);
consumer.register(ssp1, EventHubSystemConsumer.START_OF_STREAM);
consumer.register(ssp2, EventHubSystemConsumer.START_OF_STREAM);
consumer.start();
// Mock received data from EventHub
eventHubClientWrapperFactory.sendToHandlers(consumer.streamPartitionHandlers);
Set<SystemStreamPartition> ssps = new HashSet<>();
ssps.add(ssp1);
ssps.add(ssp2);
Map<SystemStreamPartition, List<IncomingMessageEnvelope>> results = consumer.poll(ssps, 1000);
verifyEvents(results.get(ssp1), singlePartitionEventData1);
verifyEvents(results.get(ssp2), singlePartitionEventData2);
Assert.assertEquals(testMetrics.getCounters(streamName).size(), 3);
Assert.assertEquals(testMetrics.getGauges(streamName).size(), 2);
Map<String, Counter> counters = testMetrics.getCounters(streamName).stream().collect(Collectors.toMap(Counter::getName, Function.identity()));
Assert.assertEquals(counters.get(EventHubSystemConsumer.EVENT_READ_RATE).getCount(), numEvents * 2);
Assert.assertEquals(counters.get(EventHubSystemConsumer.READ_ERRORS).getCount(), 0);
if (perPartitionConnection) {
Assert.assertNotEquals("perPartitionConnection=true; SSPs should not share the same client", consumer.perPartitionEventHubManagers.get(ssp1), consumer.perPartitionEventHubManagers.get(ssp2));
} else {
Assert.assertEquals("perPartitionConnection=false; SSPs should share the same client", consumer.perPartitionEventHubManagers.get(ssp1), consumer.perPartitionEventHubManagers.get(ssp2));
}
}
use of org.apache.samza.system.eventhub.admin.PassThroughInterceptor in project samza by apache.
the class TestEventHubSystemProducer method testSendingToEventHubHashing.
@Test
public void testSendingToEventHubHashing() throws Exception {
String systemName = "eventhubs";
String streamName = "testStream";
int numEvents = 10;
String partitionId0 = "124";
String partitionId1 = "235";
TestMetricsRegistry testMetrics = new TestMetricsRegistry();
Map<String, Interceptor> interceptor = new HashMap<>();
interceptor.put(streamName, new PassThroughInterceptor());
List<String> outgoingMessagesP0 = generateMessages(numEvents);
List<String> outgoingMessagesP1 = generateMessages(numEvents);
// Set configs
Map<String, String> configMap = new HashMap<>();
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_LIST, systemName), streamName);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_NAMESPACE, streamName), EVENTHUB_NAMESPACE);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_KEY_NAME, streamName), EVENTHUB_KEY_NAME);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_TOKEN, streamName), EVENTHUB_KEY);
configMap.put(String.format(EventHubConfig.CONFIG_STREAM_ENTITYPATH, streamName), EVENTHUB_ENTITY1);
// mod 2 on the partitionid to simulate consistent hashing
configMap.put(String.format(EventHubConfig.CONFIG_PRODUCER_PARTITION_METHOD, systemName), PartitioningMethod.EVENT_HUB_HASHING.toString());
MapConfig config = new MapConfig(configMap);
MockEventHubClientManagerFactory factory = new MockEventHubClientManagerFactory();
EventHubSystemProducer producer = new EventHubSystemProducer(new EventHubConfig(config), systemName, factory, interceptor, testMetrics);
SystemStream systemStream = new SystemStream(systemName, streamName);
producer.register(SOURCE);
producer.start();
outgoingMessagesP0.forEach(message -> producer.send(SOURCE, new OutgoingMessageEnvelope(systemStream, partitionId0, null, message.getBytes())));
outgoingMessagesP1.forEach(message -> producer.send(SOURCE, new OutgoingMessageEnvelope(systemStream, partitionId1, null, message.getBytes())));
// Retrieve sent data
List<String> receivedData0 = factory.getSentData(systemName, streamName, 0).stream().map(eventData -> new String(eventData.getBytes())).collect(Collectors.toList());
List<String> receivedData1 = factory.getSentData(systemName, streamName, 1).stream().map(eventData -> new String(eventData.getBytes())).collect(Collectors.toList());
Assert.assertTrue(outgoingMessagesP0.equals(receivedData0));
Assert.assertTrue(outgoingMessagesP1.equals(receivedData1));
}
Aggregations