use of org.apache.druid.testing.clients.TaskResponseObject in project druid by druid-io.
the class AbstractStreamIndexingTest method testIndexWithStreamReshardHelper.
private void testIndexWithStreamReshardHelper(@Nullable Boolean transactionEnabled, int newShardCount) throws Exception {
final GeneratedTestConfig generatedTestConfig = new GeneratedTestConfig(INPUT_FORMAT, getResourceAsString(JSON_INPUT_FORMAT_PATH));
try (final Closeable closer = createResourceCloser(generatedTestConfig);
final StreamEventWriter streamEventWriter = createStreamEventWriter(config, transactionEnabled)) {
final String taskSpec = generatedTestConfig.getStreamIngestionPropsTransform().apply(getResourceAsString(SUPERVISOR_SPEC_TEMPLATE_PATH));
LOG.info("supervisorSpec: [%s]\n", taskSpec);
// Start supervisor
generatedTestConfig.setSupervisorId(indexer.submitSupervisor(taskSpec));
LOG.info("Submitted supervisor");
// Start generating one third of the data (before resharding)
int secondsToGenerateRemaining = TOTAL_NUMBER_OF_SECOND;
int secondsToGenerateFirstRound = TOTAL_NUMBER_OF_SECOND / 3;
secondsToGenerateRemaining = secondsToGenerateRemaining - secondsToGenerateFirstRound;
final StreamGenerator streamGenerator = new WikipediaStreamEventStreamGenerator(new JsonEventSerializer(jsonMapper), EVENTS_PER_SECOND, CYCLE_PADDING_MS);
long numWritten = streamGenerator.run(generatedTestConfig.getStreamName(), streamEventWriter, secondsToGenerateFirstRound, FIRST_EVENT_TIME);
// Verify supervisor is healthy before resahrding
ITRetryUtil.retryUntil(() -> SupervisorStateManager.BasicState.RUNNING.equals(indexer.getSupervisorStatus(generatedTestConfig.getSupervisorId())), true, 10000, 30, "Waiting for supervisor to be healthy");
// Reshard the supervisor by split from STREAM_SHARD_COUNT to newShardCount and waits until the resharding starts
streamAdminClient.updatePartitionCount(generatedTestConfig.getStreamName(), newShardCount, true);
// Start generating one third of the data (while resharding)
int secondsToGenerateSecondRound = TOTAL_NUMBER_OF_SECOND / 3;
secondsToGenerateRemaining = secondsToGenerateRemaining - secondsToGenerateSecondRound;
numWritten += streamGenerator.run(generatedTestConfig.getStreamName(), streamEventWriter, secondsToGenerateSecondRound, FIRST_EVENT_TIME.plusSeconds(secondsToGenerateFirstRound));
// Wait for stream to finish resharding
ITRetryUtil.retryUntil(() -> streamAdminClient.isStreamActive(generatedTestConfig.getStreamName()), true, 10000, 30, "Waiting for stream to finish resharding");
ITRetryUtil.retryUntil(() -> streamAdminClient.verfiyPartitionCountUpdated(generatedTestConfig.getStreamName(), STREAM_SHARD_COUNT, newShardCount), true, 10000, 30, "Waiting for stream to finish resharding");
// Start generating remaining data (after resharding)
numWritten += streamGenerator.run(generatedTestConfig.getStreamName(), streamEventWriter, secondsToGenerateRemaining, FIRST_EVENT_TIME.plusSeconds(secondsToGenerateFirstRound + secondsToGenerateSecondRound));
// Verify supervisor is healthy after resahrding
ITRetryUtil.retryUntil(() -> SupervisorStateManager.BasicState.RUNNING.equals(indexer.getSupervisorStatus(generatedTestConfig.getSupervisorId())), true, 10000, 30, "Waiting for supervisor to be healthy");
// Verify that supervisor can catch up with the stream
verifyIngestedData(generatedTestConfig, numWritten);
}
// Verify that event thrown away count was not incremented by the reshard
List<TaskResponseObject> completedTasks = indexer.getCompleteTasksForDataSource(generatedTestConfig.getFullDatasourceName());
for (TaskResponseObject task : completedTasks) {
try {
RowIngestionMetersTotals stats = indexer.getTaskStats(task.getId());
Assert.assertEquals(0L, stats.getThrownAway());
} catch (Exception e) {
// Failed task may not have a task stats report. We can ignore it as the task did not consume any data
if (!task.getStatus().isFailure()) {
throw e;
}
}
}
}
use of org.apache.druid.testing.clients.TaskResponseObject in project druid by druid-io.
the class AbstractStreamIndexingTest method doMethodTeardown.
private void doMethodTeardown(GeneratedTestConfig generatedTestConfig) {
if (generatedTestConfig.getSupervisorId() != null) {
try {
LOG.info("Terminating supervisor");
indexer.terminateSupervisor(generatedTestConfig.getSupervisorId());
// Shutdown all tasks of supervisor
List<TaskResponseObject> runningTasks = indexer.getUncompletedTasksForDataSource(generatedTestConfig.getFullDatasourceName());
for (TaskResponseObject task : runningTasks) {
indexer.shutdownTask(task.getId());
}
} catch (Exception e) {
// Best effort cleanup as the supervisor may have already been cleanup
LOG.warn(e, "Failed to cleanup supervisor. This might be expected depending on the test method");
}
}
try {
unloader(generatedTestConfig.getFullDatasourceName());
} catch (Exception e) {
// Best effort cleanup as the datasource may have already been cleanup
LOG.warn(e, "Failed to cleanup datasource. This might be expected depending on the test method");
}
try {
streamAdminClient.deleteStream(generatedTestConfig.getStreamName());
} catch (Exception e) {
// Best effort cleanup as the stream may have already been cleanup
LOG.warn(e, "Failed to cleanup stream. This might be expected depending on the test method");
}
}
use of org.apache.druid.testing.clients.TaskResponseObject in project druid by druid-io.
the class ITAutoCompactionTest method testAutoCompactionDutyWithOverlappingInterval.
@Test
public void testAutoCompactionDutyWithOverlappingInterval() throws Exception {
final ISOChronology chrono = ISOChronology.getInstance(DateTimes.inferTzFromString("America/Los_Angeles"));
Map<String, Object> specs = ImmutableMap.of("%%GRANULARITYSPEC%%", new UniformGranularitySpec(Granularities.WEEK, Granularities.NONE, false, ImmutableList.of(new Interval("2013-08-31/2013-09-02", chrono))));
// Create WEEK segment with 2013-08-26 to 2013-09-02
loadData(INDEX_TASK_WITH_GRANULARITY_SPEC, specs);
specs = ImmutableMap.of("%%GRANULARITYSPEC%%", new UniformGranularitySpec(Granularities.MONTH, Granularities.NONE, false, ImmutableList.of(new Interval("2013-09-01/2013-09-02", chrono))));
// Create MONTH segment with 2013-09-01 to 2013-10-01
loadData(INDEX_TASK_WITH_GRANULARITY_SPEC, specs);
try (final Closeable ignored = unloader(fullDatasourceName)) {
verifySegmentsCount(2);
// Result is not rollup
// For dim "page", result has values "Gypsy Danger" and "Striker Eureka"
Map<String, Object> expectedResult = ImmutableMap.of("%%FIELD_TO_QUERY%%", "added", "%%EXPECTED_COUNT_RESULT%%", 2, "%%EXPECTED_SCAN_RESULT%%", ImmutableList.of(ImmutableMap.of("events", ImmutableList.of(ImmutableList.of(57.0), ImmutableList.of(459.0)))));
verifyQuery(INDEX_ROLLUP_QUERIES_RESOURCE, expectedResult);
submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, null, null, null, null, false);
// Compact the MONTH segment
forceTriggerAutoCompaction(2);
verifyQuery(INDEX_ROLLUP_QUERIES_RESOURCE, expectedResult);
// Compact the WEEK segment
forceTriggerAutoCompaction(2);
verifyQuery(INDEX_ROLLUP_QUERIES_RESOURCE, expectedResult);
// Verify all task succeed
List<TaskResponseObject> compactTasksBefore = indexer.getCompleteTasksForDataSource(fullDatasourceName);
for (TaskResponseObject taskResponseObject : compactTasksBefore) {
Assert.assertEquals(TaskState.SUCCESS, taskResponseObject.getStatus());
}
// Verify compacted segments does not get compacted again
forceTriggerAutoCompaction(2);
List<TaskResponseObject> compactTasksAfter = indexer.getCompleteTasksForDataSource(fullDatasourceName);
Assert.assertEquals(compactTasksAfter.size(), compactTasksBefore.size());
}
}
use of org.apache.druid.testing.clients.TaskResponseObject in project druid by druid-io.
the class ITAutoCompactionTest method testAutoCompactionDutyWithSegmentGranularityAndExistingCompactedSegmentsHaveDifferentSegmentGranularity.
@Test
public void testAutoCompactionDutyWithSegmentGranularityAndExistingCompactedSegmentsHaveDifferentSegmentGranularity() throws Exception {
loadData(INDEX_TASK);
try (final Closeable ignored = unloader(fullDatasourceName)) {
final List<String> intervalsBeforeCompaction = coordinator.getSegmentIntervals(fullDatasourceName);
intervalsBeforeCompaction.sort(null);
// 4 segments across 2 days (4 total)...
verifySegmentsCount(4);
verifyQuery(INDEX_QUERIES_RESOURCE);
// Compacted without SegmentGranularity in auto compaction config
submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET);
forceTriggerAutoCompaction(2);
verifyQuery(INDEX_QUERIES_RESOURCE);
verifySegmentsCompacted(2, MAX_ROWS_PER_SEGMENT_COMPACTED);
List<TaskResponseObject> compactTasksBefore = indexer.getCompleteTasksForDataSource(fullDatasourceName);
// Segments were compacted and already has DAY granularity since it was initially ingested with DAY granularity.
// Now set auto compaction with DAY granularity in the granularitySpec
Granularity newGranularity = Granularities.YEAR;
submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, new UserCompactionTaskGranularityConfig(newGranularity, null, null));
forceTriggerAutoCompaction(1);
verifyQuery(INDEX_QUERIES_RESOURCE);
verifySegmentsCompacted(1, MAX_ROWS_PER_SEGMENT_COMPACTED);
// There should be new compaction tasks since SegmentGranularity changed from DAY to YEAR
List<TaskResponseObject> compactTasksAfter = indexer.getCompleteTasksForDataSource(fullDatasourceName);
Assert.assertTrue(compactTasksAfter.size() > compactTasksBefore.size());
}
}
use of org.apache.druid.testing.clients.TaskResponseObject in project druid by druid-io.
the class ITAutoCompactionTest method testAutoCompactionDutyWithQueryGranularity.
@Test
public void testAutoCompactionDutyWithQueryGranularity() throws Exception {
final ISOChronology chrono = ISOChronology.getInstance(DateTimes.inferTzFromString("America/Los_Angeles"));
Map<String, Object> specs = ImmutableMap.of("%%GRANULARITYSPEC%%", new UniformGranularitySpec(Granularities.DAY, Granularities.NONE, true, ImmutableList.of(new Interval("2013-08-31/2013-09-02", chrono))));
loadData(INDEX_TASK_WITH_GRANULARITY_SPEC, specs);
try (final Closeable ignored = unloader(fullDatasourceName)) {
Map<String, Object> expectedResult = ImmutableMap.of("%%FIELD_TO_QUERY%%", "added", "%%EXPECTED_COUNT_RESULT%%", 2, "%%EXPECTED_SCAN_RESULT%%", ImmutableList.of(ImmutableMap.of("events", ImmutableList.of(ImmutableList.of(57.0), ImmutableList.of(459.0)))));
verifyQuery(INDEX_ROLLUP_QUERIES_RESOURCE, expectedResult);
submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, new UserCompactionTaskGranularityConfig(null, Granularities.DAY, null), false);
forceTriggerAutoCompaction(2);
expectedResult = ImmutableMap.of("%%FIELD_TO_QUERY%%", "added", "%%EXPECTED_COUNT_RESULT%%", 1, "%%EXPECTED_SCAN_RESULT%%", ImmutableList.of(ImmutableMap.of("events", ImmutableList.of(ImmutableList.of(516.0)))));
verifyQuery(INDEX_ROLLUP_QUERIES_RESOURCE, expectedResult);
verifySegmentsCompacted(2, MAX_ROWS_PER_SEGMENT_COMPACTED);
List<TaskResponseObject> compactTasksBefore = indexer.getCompleteTasksForDataSource(fullDatasourceName);
// Verify rollup segments does not get compacted again
forceTriggerAutoCompaction(2);
List<TaskResponseObject> compactTasksAfter = indexer.getCompleteTasksForDataSource(fullDatasourceName);
Assert.assertEquals(compactTasksAfter.size(), compactTasksBefore.size());
}
}
Aggregations