use of org.apache.druid.segment.incremental.RowIngestionMetersTotals in project druid by druid-io.
the class SinglePhaseParallelIndexingTest method getExpectedTaskReportSequential.
private Map<String, Object> getExpectedTaskReportSequential(String taskId, List<ParseExceptionReport> expectedUnparseableEvents, RowIngestionMetersTotals expectedTotals) {
Map<String, Object> returnMap = new HashMap<>();
Map<String, Object> ingestionStatsAndErrors = new HashMap<>();
Map<String, Object> payload = new HashMap<>();
payload.put("ingestionState", IngestionState.COMPLETED);
payload.put("unparseableEvents", ImmutableMap.of("determinePartitions", ImmutableList.of(), "buildSegments", expectedUnparseableEvents));
Map<String, Object> emptyAverageMinuteMap = ImmutableMap.of("processed", 0.0, "unparseable", 0.0, "thrownAway", 0.0, "processedWithError", 0.0);
Map<String, Object> emptyAverages = ImmutableMap.of("1m", emptyAverageMinuteMap, "5m", emptyAverageMinuteMap, "15m", emptyAverageMinuteMap);
payload.put("rowStats", ImmutableMap.of("movingAverages", ImmutableMap.of("determinePartitions", emptyAverages, "buildSegments", emptyAverages), "totals", ImmutableMap.of("determinePartitions", new RowIngestionMetersTotals(0, 0, 0, 0), "buildSegments", expectedTotals)));
ingestionStatsAndErrors.put("taskId", taskId);
ingestionStatsAndErrors.put("payload", payload);
ingestionStatsAndErrors.put("type", "ingestionStatsAndErrors");
returnMap.put("ingestionStatsAndErrors", ingestionStatsAndErrors);
return returnMap;
}
use of org.apache.druid.segment.incremental.RowIngestionMetersTotals in project druid by druid-io.
the class KafkaIndexTaskTest method testRunAfterDataInsertedLiveReport.
@Test(timeout = 60_000L)
public void testRunAfterDataInsertedLiveReport() throws Exception {
// Insert data
insertData();
final KafkaIndexTask task = createTask(null, new KafkaIndexTaskIOConfig(0, "sequence0", new SeekableStreamStartSequenceNumbers<>(topic, ImmutableMap.of(0, 2L), ImmutableSet.of()), new SeekableStreamEndSequenceNumbers<>(topic, ImmutableMap.of(0, 12L)), kafkaServer.consumerProperties(), KafkaSupervisorIOConfig.DEFAULT_POLL_TIMEOUT_MILLIS, true, null, null, INPUT_FORMAT));
final ListenableFuture<TaskStatus> future = runTask(task);
SeekableStreamIndexTaskRunner runner = task.getRunner();
while (true) {
Thread.sleep(1000);
if (runner.getStatus() == Status.PUBLISHING) {
break;
}
}
Map rowStats = runner.doGetRowStats();
Map totals = (Map) rowStats.get("totals");
RowIngestionMetersTotals buildSegments = (RowIngestionMetersTotals) totals.get("buildSegments");
Map movingAverages = (Map) rowStats.get("movingAverages");
Map buildSegments2 = (Map) movingAverages.get("buildSegments");
HashMap avg_1min = (HashMap) buildSegments2.get("1m");
HashMap avg_5min = (HashMap) buildSegments2.get("5m");
HashMap avg_15min = (HashMap) buildSegments2.get("15m");
runner.resume();
// Check metrics
Assert.assertEquals(buildSegments.getProcessed(), task.getRunner().getRowIngestionMeters().getProcessed());
Assert.assertEquals(buildSegments.getUnparseable(), task.getRunner().getRowIngestionMeters().getUnparseable());
Assert.assertEquals(buildSegments.getThrownAway(), task.getRunner().getRowIngestionMeters().getThrownAway());
Assert.assertEquals(avg_1min.get("processed"), 0.0);
Assert.assertEquals(avg_5min.get("processed"), 0.0);
Assert.assertEquals(avg_15min.get("processed"), 0.0);
// Wait for task to exit
Assert.assertEquals(TaskState.SUCCESS, future.get().getStatusCode());
}
Aggregations