use of org.apache.samza.util.NoOpMetricsRegistry in project samza by apache.
the class TestHdfsSystemConsumer method testHdfsSystemConsumerE2E.
/*
* A simple end to end test that covers the workflow from system admin to
* partitioner, system consumer, and so on, making sure the basic functionality
* works as expected.
*/
@Test
public void testHdfsSystemConsumerE2E() throws Exception {
Config config = generateDefaultConfig();
HdfsSystemFactory systemFactory = new HdfsSystemFactory();
// create admin and do partitioning
HdfsSystemAdmin systemAdmin = systemFactory.getAdmin(SYSTEM_NAME, config);
String streamName = WORKING_DIRECTORY;
Set<String> streamNames = new HashSet<>();
streamNames.add(streamName);
generateAvroDataFiles();
Map<String, SystemStreamMetadata> streamMetadataMap = systemAdmin.getSystemStreamMetadata(streamNames);
SystemStreamMetadata systemStreamMetadata = streamMetadataMap.get(streamName);
Assert.assertEquals(NUM_FILES, systemStreamMetadata.getSystemStreamPartitionMetadata().size());
// create consumer and read from files
HdfsSystemConsumer systemConsumer = systemFactory.getConsumer(SYSTEM_NAME, config, new NoOpMetricsRegistry());
Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> metadataMap = systemStreamMetadata.getSystemStreamPartitionMetadata();
Set<SystemStreamPartition> systemStreamPartitionSet = new HashSet<>();
metadataMap.forEach((partition, metadata) -> {
SystemStreamPartition ssp = new SystemStreamPartition(SYSTEM_NAME, streamName, partition);
systemStreamPartitionSet.add(ssp);
String offset = metadata.getOldestOffset();
systemConsumer.register(ssp, offset);
});
systemConsumer.start();
// verify events read from consumer
int eventsReceived = 0;
// one "End of Stream" event in the end
int totalEvents = (NUM_EVENTS + 1) * NUM_FILES;
int remainingRetires = 100;
Map<SystemStreamPartition, List<IncomingMessageEnvelope>> overallResults = new HashMap<>();
while (eventsReceived < totalEvents && remainingRetires > 0) {
remainingRetires--;
Map<SystemStreamPartition, List<IncomingMessageEnvelope>> result = systemConsumer.poll(systemStreamPartitionSet, 200);
for (SystemStreamPartition ssp : result.keySet()) {
List<IncomingMessageEnvelope> messageEnvelopeList = result.get(ssp);
overallResults.putIfAbsent(ssp, new ArrayList<>());
overallResults.get(ssp).addAll(messageEnvelopeList);
if (overallResults.get(ssp).size() >= NUM_EVENTS + 1) {
systemStreamPartitionSet.remove(ssp);
}
eventsReceived += messageEnvelopeList.size();
}
}
Assert.assertEquals(eventsReceived, totalEvents);
Assert.assertEquals(NUM_FILES, overallResults.size());
overallResults.values().forEach(messages -> {
Assert.assertEquals(NUM_EVENTS + 1, messages.size());
for (int index = 0; index < NUM_EVENTS; index++) {
GenericRecord record = (GenericRecord) messages.get(index).getMessage();
Assert.assertEquals(index % NUM_EVENTS, record.get(FIELD_1));
Assert.assertEquals("string_" + (index % NUM_EVENTS), record.get(FIELD_2).toString());
}
Assert.assertEquals(messages.get(NUM_EVENTS).getOffset(), IncomingMessageEnvelope.END_OF_STREAM_OFFSET);
});
}
use of org.apache.samza.util.NoOpMetricsRegistry in project samza by apache.
the class TestHdfsSystemConsumer method testEmptyStagingDirectory.
/*
* Ensure that empty staging directory will not break system admin,
* but should fail system consumer
*/
@Test
public void testEmptyStagingDirectory() throws Exception {
Map<String, String> configMap = new HashMap<>();
configMap.put(String.format(HdfsConfig.CONSUMER_PARTITIONER_WHITELIST(), SYSTEM_NAME), ".*avro");
Config config = new MapConfig(configMap);
HdfsSystemFactory systemFactory = new HdfsSystemFactory();
// create admin and do partitioning
HdfsSystemAdmin systemAdmin = systemFactory.getAdmin(SYSTEM_NAME, config);
String stream = WORKING_DIRECTORY;
Set<String> streamNames = new HashSet<>();
streamNames.add(stream);
generateAvroDataFiles();
Map<String, SystemStreamMetadata> streamMetadataMap = systemAdmin.getSystemStreamMetadata(streamNames);
SystemStreamMetadata systemStreamMetadata = streamMetadataMap.get(stream);
Assert.assertEquals(NUM_FILES, systemStreamMetadata.getSystemStreamPartitionMetadata().size());
// create consumer and read from files
HdfsSystemConsumer systemConsumer = systemFactory.getConsumer(SYSTEM_NAME, config, new NoOpMetricsRegistry());
Partition partition = new Partition(0);
SystemStreamPartition ssp = new SystemStreamPartition(SYSTEM_NAME, stream, partition);
try {
systemConsumer.register(ssp, "0");
Assert.fail("Empty staging directory should fail system consumer");
} catch (UncheckedExecutionException e) {
Assert.assertTrue(e.getCause() instanceof SamzaException);
}
}
use of org.apache.samza.util.NoOpMetricsRegistry in project samza by apache.
the class TestLocalStoreMonitor method setUp.
@Before
public void setUp() throws Exception {
// Make scaffold directories for testing.
FileUtils.forceMkdir(taskStoreDir);
taskStoreSize = taskStoreDir.getTotalSpace();
// Set default return values for methods.
Mockito.when(jobsClientMock.getJobStatus(Mockito.any())).thenReturn(JobStatus.STOPPED);
Task task = new Task("localHost", "test-task", "0", new ArrayList<>(), ImmutableList.of("test-store"));
Mockito.when(jobsClientMock.getTasks(Mockito.any())).thenReturn(ImmutableList.of(task));
localStoreMonitorMetrics = new LocalStoreMonitorMetrics("TestMonitorName", new NoOpMetricsRegistry());
// Initialize the local store monitor with mock and config
localStoreMonitor = new LocalStoreMonitor(new LocalStoreMonitorConfig(new MapConfig(config)), localStoreMonitorMetrics, jobsClientMock);
}
Aggregations