use of org.apache.druid.indexer.partitions.PartitionsSpec in project druid by druid-io.
the class PartialRangeSegmentGenerateTaskTest method requiresMultiDimensionPartitions.
@Test
public void requiresMultiDimensionPartitions() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("range or single_dim partitionsSpec required");
PartitionsSpec partitionsSpec = new HashedPartitionsSpec(null, 1, null);
ParallelIndexTuningConfig tuningConfig = new ParallelIndexTestingFactory.TuningConfigBuilder().partitionsSpec(partitionsSpec).build();
new PartialRangeSegmentGenerateTaskBuilder().tuningConfig(tuningConfig).build();
}
use of org.apache.druid.indexer.partitions.PartitionsSpec in project druid by druid-io.
the class ParallelIndexSupervisorTaskSerdeTest method forceGuaranteedRollupWithHashPartitionsValid.
@Test
public void forceGuaranteedRollupWithHashPartitionsValid() {
Integer numShards = 2;
ParallelIndexSupervisorTask task = new ParallelIndexSupervisorTaskBuilder().ingestionSpec(new ParallelIndexIngestionSpecBuilder().forceGuaranteedRollup(true).partitionsSpec(new HashedPartitionsSpec(null, numShards, null)).inputIntervals(INTERVALS).build()).build();
PartitionsSpec partitionsSpec = task.getIngestionSchema().getTuningConfig().getPartitionsSpec();
Assert.assertThat(partitionsSpec, CoreMatchers.instanceOf(HashedPartitionsSpec.class));
}
use of org.apache.druid.indexer.partitions.PartitionsSpec in project druid by druid-io.
the class ITAutoCompactionUpgradeTest method testUpgradeAutoCompactionConfigurationWhenConfigurationFromOlderVersionAlreadyExist.
@Test
public void testUpgradeAutoCompactionConfigurationWhenConfigurationFromOlderVersionAlreadyExist() throws Exception {
// Verify that compaction config already exist. This config was inserted manually into the database using SQL script.
// This auto compaction configuration payload is from Druid 0.21.0
CoordinatorCompactionConfig coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
DataSourceCompactionConfig foundDataSourceCompactionConfig = null;
for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
if (dataSourceCompactionConfig.getDataSource().equals(UPGRADE_DATASOURCE_NAME)) {
foundDataSourceCompactionConfig = dataSourceCompactionConfig;
}
}
Assert.assertNotNull(foundDataSourceCompactionConfig);
// Now submit a new auto compaction configuration
PartitionsSpec newPartitionsSpec = new DynamicPartitionsSpec(4000, null);
Period newSkipOffset = Period.seconds(0);
DataSourceCompactionConfig compactionConfig = new DataSourceCompactionConfig(UPGRADE_DATASOURCE_NAME, null, null, null, newSkipOffset, new UserCompactionTaskQueryTuningConfig(null, null, null, new MaxSizeSplitHintSpec(null, 1), newPartitionsSpec, null, null, null, null, null, 1, null, null, null, null, null, 1), new UserCompactionTaskGranularityConfig(Granularities.YEAR, null, null), null, null, null, new UserCompactionTaskIOConfig(true), null);
compactionResource.submitCompactionConfig(compactionConfig);
// Wait for compaction config to persist
Thread.sleep(2000);
// Verify that compaction was successfully updated
coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
foundDataSourceCompactionConfig = null;
for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
if (dataSourceCompactionConfig.getDataSource().equals(UPGRADE_DATASOURCE_NAME)) {
foundDataSourceCompactionConfig = dataSourceCompactionConfig;
}
}
Assert.assertNotNull(foundDataSourceCompactionConfig);
Assert.assertNotNull(foundDataSourceCompactionConfig.getTuningConfig());
Assert.assertEquals(foundDataSourceCompactionConfig.getTuningConfig().getPartitionsSpec(), newPartitionsSpec);
Assert.assertEquals(foundDataSourceCompactionConfig.getSkipOffsetFromLatest(), newSkipOffset);
}
use of org.apache.druid.indexer.partitions.PartitionsSpec in project druid by druid-io.
the class ITAppendBatchIndexTest method submitIngestionTaskAndVerify.
private void submitIngestionTaskAndVerify(String indexDatasource, PartitionsSpec partitionsSpec, boolean appendToExisting, Pair<Boolean, Boolean> segmentAvailabilityConfirmationPair) throws Exception {
InputFormatDetails inputFormatDetails = InputFormatDetails.JSON;
Map inputFormatMap = new ImmutableMap.Builder<String, Object>().put("type", inputFormatDetails.getInputFormatType()).build();
final Function<String, String> sqlInputSourcePropsTransform = spec -> {
try {
spec = StringUtils.replace(spec, "%%PARTITIONS_SPEC%%", jsonMapper.writeValueAsString(partitionsSpec));
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_FILTER%%", "*" + inputFormatDetails.getFileExtension());
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_BASE_DIR%%", "/resources/data/batch_index" + inputFormatDetails.getFolderSuffix());
spec = StringUtils.replace(spec, "%%INPUT_FORMAT%%", jsonMapper.writeValueAsString(inputFormatMap));
spec = StringUtils.replace(spec, "%%APPEND_TO_EXISTING%%", jsonMapper.writeValueAsString(appendToExisting));
spec = StringUtils.replace(spec, "%%DROP_EXISTING%%", jsonMapper.writeValueAsString(false));
if (partitionsSpec instanceof DynamicPartitionsSpec) {
spec = StringUtils.replace(spec, "%%FORCE_GUARANTEED_ROLLUP%%", jsonMapper.writeValueAsString(false));
} else if (partitionsSpec instanceof HashedPartitionsSpec || partitionsSpec instanceof SingleDimensionPartitionsSpec) {
spec = StringUtils.replace(spec, "%%FORCE_GUARANTEED_ROLLUP%%", jsonMapper.writeValueAsString(true));
}
return spec;
} catch (Exception e) {
throw new RuntimeException(e);
}
};
doIndexTest(indexDatasource, INDEX_TASK, sqlInputSourcePropsTransform, null, false, false, true, segmentAvailabilityConfirmationPair);
}
use of org.apache.druid.indexer.partitions.PartitionsSpec in project druid by druid-io.
the class ITBestEffortRollupParallelIndexTest method testIndexDataAwaitSegmentAvailabilityFailsButTaskSucceeds.
/**
* Test a non zero value for awaitSegmentAvailabilityTimeoutMillis. Setting the config value to 1 millis
* and pausing coordination to confirm that the task will still succeed even if the job was not able to confirm the
* segments were loaded by the time the timeout occurs.
*
* @param partitionsSpec
* @throws Exception
*/
@Test(dataProvider = "resources")
public void testIndexDataAwaitSegmentAvailabilityFailsButTaskSucceeds(PartitionsSpec partitionsSpec) throws Exception {
try (final Closeable ignored1 = unloader(INDEX_DATASOURCE + config.getExtraDatasourceNameSuffix())) {
coordinatorClient.postDynamicConfig(DYNAMIC_CONFIG_PAUSED);
boolean forceGuaranteedRollup = partitionsSpec.isForceGuaranteedRollupCompatible();
Assert.assertFalse(forceGuaranteedRollup, "parititionSpec does not support best-effort rollup");
final Function<String, String> rollupTransform = spec -> {
try {
spec = StringUtils.replace(spec, "%%FORCE_GUARANTEED_ROLLUP%%", Boolean.toString(false));
spec = StringUtils.replace(spec, "%%SEGMENT_AVAIL_TIMEOUT_MILLIS%%", jsonMapper.writeValueAsString("1"));
return StringUtils.replace(spec, "%%PARTITIONS_SPEC%%", jsonMapper.writeValueAsString(partitionsSpec));
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
};
doIndexTest(INDEX_DATASOURCE, INDEX_TASK, rollupTransform, INDEX_QUERIES_RESOURCE, false, false, false, new Pair<>(true, false));
coordinatorClient.postDynamicConfig(DYNAMIC_CONFIG_DEFAULT);
ITRetryUtil.retryUntilTrue(() -> coordinator.areSegmentsLoaded(INDEX_DATASOURCE + config.getExtraDatasourceNameSuffix()), "Segment Load");
}
}
Aggregations