use of org.apache.druid.indexer.partitions.DynamicPartitionsSpec in project druid by druid-io.
the class AbstractLocalInputSourceParallelIndexTest method doIndexTest.
public void doIndexTest(InputFormatDetails inputFormatDetails, @Nonnull Map<String, Object> extraInputFormatMap, Pair<Boolean, Boolean> segmentAvailabilityConfirmationPair) throws Exception {
final String indexDatasource = "wikipedia_index_test_" + UUID.randomUUID();
Map inputFormatMap = new ImmutableMap.Builder<String, Object>().putAll(extraInputFormatMap).put("type", inputFormatDetails.getInputFormatType()).build();
try (final Closeable ignored1 = unloader(indexDatasource + config.getExtraDatasourceNameSuffix())) {
final Function<String, String> sqlInputSourcePropsTransform = spec -> {
try {
spec = StringUtils.replace(spec, "%%PARTITIONS_SPEC%%", jsonMapper.writeValueAsString(new DynamicPartitionsSpec(null, null)));
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_FILTER%%", "*" + inputFormatDetails.getFileExtension());
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_BASE_DIR%%", "/resources/data/batch_index" + inputFormatDetails.getFolderSuffix());
spec = StringUtils.replace(spec, "%%INPUT_FORMAT%%", jsonMapper.writeValueAsString(inputFormatMap));
spec = StringUtils.replace(spec, "%%APPEND_TO_EXISTING%%", jsonMapper.writeValueAsString(false));
spec = StringUtils.replace(spec, "%%DROP_EXISTING%%", jsonMapper.writeValueAsString(false));
spec = StringUtils.replace(spec, "%%FORCE_GUARANTEED_ROLLUP%%", jsonMapper.writeValueAsString(false));
return spec;
} catch (Exception e) {
throw new RuntimeException(e);
}
};
doIndexTest(indexDatasource, INDEX_TASK, sqlInputSourcePropsTransform, INDEX_QUERIES_RESOURCE, false, true, true, segmentAvailabilityConfirmationPair);
}
}
use of org.apache.druid.indexer.partitions.DynamicPartitionsSpec in project druid by druid-io.
the class AbstractHdfsInputSourceParallelIndexTest method doTest.
void doTest(Pair<String, List> hdfsInputSource, InputFormatDetails inputFormatDetails, Pair<Boolean, Boolean> segmentAvailabilityConfirmationPair) throws Exception {
final String indexDatasource = "wikipedia_index_test_" + UUID.randomUUID();
try (final Closeable ignored1 = unloader(indexDatasource + config.getExtraDatasourceNameSuffix())) {
final Function<String, String> hdfsPropsTransform = spec -> {
try {
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_TYPE%%", "hdfs");
spec = StringUtils.replace(spec, "%%PARTITIONS_SPEC%%", jsonMapper.writeValueAsString(new DynamicPartitionsSpec(null, null)));
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_PROPERTY_KEY%%", hdfsInputSource.lhs);
spec = StringUtils.replace(spec, "%%INPUT_FORMAT_TYPE%%", inputFormatDetails.getInputFormatType());
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_PROPERTY_VALUE%%", jsonMapper.writeValueAsString(hdfsInputSource.rhs));
spec = StringUtils.replace(spec, "%%FOLDER_SUFFIX%%", inputFormatDetails.getFolderSuffix());
spec = StringUtils.replace(spec, "%%FILE_EXTENSION%%", inputFormatDetails.getFileExtension());
return spec;
} catch (Exception e) {
throw new RuntimeException(e);
}
};
doIndexTest(indexDatasource, INDEX_TASK, hdfsPropsTransform, INDEX_QUERIES_RESOURCE, false, true, true, segmentAvailabilityConfirmationPair);
}
}
use of org.apache.druid.indexer.partitions.DynamicPartitionsSpec in project druid by druid-io.
the class ITAutoCompactionLockContentionTest method verifyCompactedIntervals.
/**
* Verifies that the given intervals have been compacted.
*/
private void verifyCompactedIntervals(Interval... compactedIntervals) {
List<DataSegment> segments = coordinator.getFullSegmentsMetadata(fullDatasourceName);
List<DataSegment> observedCompactedSegments = new ArrayList<>();
Set<Interval> observedCompactedIntervals = new HashSet<>();
for (DataSegment segment : segments) {
if (segment.getLastCompactionState() != null) {
observedCompactedSegments.add(segment);
observedCompactedIntervals.add(segment.getInterval());
}
}
Set<Interval> expectedCompactedIntervals = new HashSet<>(Arrays.asList(compactedIntervals));
Assert.assertEquals(observedCompactedIntervals, expectedCompactedIntervals);
DynamicPartitionsSpec expectedPartitionSpec = new DynamicPartitionsSpec(Specs.MAX_ROWS_PER_SEGMENT, Long.MAX_VALUE);
for (DataSegment compactedSegment : observedCompactedSegments) {
Assert.assertNotNull(compactedSegment.getLastCompactionState());
Assert.assertEquals(compactedSegment.getLastCompactionState().getPartitionsSpec(), expectedPartitionSpec);
}
}
use of org.apache.druid.indexer.partitions.DynamicPartitionsSpec in project druid by druid-io.
the class ITAutoCompactionUpgradeTest method testUpgradeAutoCompactionConfigurationWhenConfigurationFromOlderVersionAlreadyExist.
@Test
public void testUpgradeAutoCompactionConfigurationWhenConfigurationFromOlderVersionAlreadyExist() throws Exception {
// Verify that compaction config already exist. This config was inserted manually into the database using SQL script.
// This auto compaction configuration payload is from Druid 0.21.0
CoordinatorCompactionConfig coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
DataSourceCompactionConfig foundDataSourceCompactionConfig = null;
for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
if (dataSourceCompactionConfig.getDataSource().equals(UPGRADE_DATASOURCE_NAME)) {
foundDataSourceCompactionConfig = dataSourceCompactionConfig;
}
}
Assert.assertNotNull(foundDataSourceCompactionConfig);
// Now submit a new auto compaction configuration
PartitionsSpec newPartitionsSpec = new DynamicPartitionsSpec(4000, null);
Period newSkipOffset = Period.seconds(0);
DataSourceCompactionConfig compactionConfig = new DataSourceCompactionConfig(UPGRADE_DATASOURCE_NAME, null, null, null, newSkipOffset, new UserCompactionTaskQueryTuningConfig(null, null, null, new MaxSizeSplitHintSpec(null, 1), newPartitionsSpec, null, null, null, null, null, 1, null, null, null, null, null, 1), new UserCompactionTaskGranularityConfig(Granularities.YEAR, null, null), null, null, null, new UserCompactionTaskIOConfig(true), null);
compactionResource.submitCompactionConfig(compactionConfig);
// Wait for compaction config to persist
Thread.sleep(2000);
// Verify that compaction was successfully updated
coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
foundDataSourceCompactionConfig = null;
for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
if (dataSourceCompactionConfig.getDataSource().equals(UPGRADE_DATASOURCE_NAME)) {
foundDataSourceCompactionConfig = dataSourceCompactionConfig;
}
}
Assert.assertNotNull(foundDataSourceCompactionConfig);
Assert.assertNotNull(foundDataSourceCompactionConfig.getTuningConfig());
Assert.assertEquals(foundDataSourceCompactionConfig.getTuningConfig().getPartitionsSpec(), newPartitionsSpec);
Assert.assertEquals(foundDataSourceCompactionConfig.getSkipOffsetFromLatest(), newSkipOffset);
}
use of org.apache.druid.indexer.partitions.DynamicPartitionsSpec in project druid by druid-io.
the class ITAppendBatchIndexTest method submitIngestionTaskAndVerify.
private void submitIngestionTaskAndVerify(String indexDatasource, PartitionsSpec partitionsSpec, boolean appendToExisting, Pair<Boolean, Boolean> segmentAvailabilityConfirmationPair) throws Exception {
InputFormatDetails inputFormatDetails = InputFormatDetails.JSON;
Map inputFormatMap = new ImmutableMap.Builder<String, Object>().put("type", inputFormatDetails.getInputFormatType()).build();
final Function<String, String> sqlInputSourcePropsTransform = spec -> {
try {
spec = StringUtils.replace(spec, "%%PARTITIONS_SPEC%%", jsonMapper.writeValueAsString(partitionsSpec));
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_FILTER%%", "*" + inputFormatDetails.getFileExtension());
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_BASE_DIR%%", "/resources/data/batch_index" + inputFormatDetails.getFolderSuffix());
spec = StringUtils.replace(spec, "%%INPUT_FORMAT%%", jsonMapper.writeValueAsString(inputFormatMap));
spec = StringUtils.replace(spec, "%%APPEND_TO_EXISTING%%", jsonMapper.writeValueAsString(appendToExisting));
spec = StringUtils.replace(spec, "%%DROP_EXISTING%%", jsonMapper.writeValueAsString(false));
if (partitionsSpec instanceof DynamicPartitionsSpec) {
spec = StringUtils.replace(spec, "%%FORCE_GUARANTEED_ROLLUP%%", jsonMapper.writeValueAsString(false));
} else if (partitionsSpec instanceof HashedPartitionsSpec || partitionsSpec instanceof SingleDimensionPartitionsSpec) {
spec = StringUtils.replace(spec, "%%FORCE_GUARANTEED_ROLLUP%%", jsonMapper.writeValueAsString(true));
}
return spec;
} catch (Exception e) {
throw new RuntimeException(e);
}
};
doIndexTest(indexDatasource, INDEX_TASK, sqlInputSourcePropsTransform, null, false, false, true, segmentAvailabilityConfirmationPair);
}
Aggregations