use of io.druid.timeline.partition.ShardSpec in project druid by druid-io.
the class IndexGeneratorJobTest method loadShardSpecs.
private Map<Long, List<HadoopyShardSpec>> loadShardSpecs(String partitionType, Object[][][] shardInfoForEachShard) {
Map<Long, List<HadoopyShardSpec>> shardSpecs = Maps.newTreeMap(DateTimeComparator.getInstance());
int shardCount = 0;
int segmentNum = 0;
for (Interval segmentGranularity : config.getSegmentGranularIntervals().get()) {
List<ShardSpec> specs = constructShardSpecFromShardInfo(partitionType, shardInfoForEachShard[segmentNum++]);
List<HadoopyShardSpec> actualSpecs = Lists.newArrayListWithExpectedSize(specs.size());
for (int i = 0; i < specs.size(); ++i) {
actualSpecs.add(new HadoopyShardSpec(specs.get(i), shardCount++));
}
shardSpecs.put(segmentGranularity.getStartMillis(), actualSpecs);
}
return shardSpecs;
}
use of io.druid.timeline.partition.ShardSpec in project hive by apache.
the class DruidStorageHandlerUtils method publishSegmentsAndCommit.
/**
* First computes the segments timeline to accommodate new segments for insert into case
* Then moves segments to druid deep storage with updated metadata/version
* ALL IS DONE IN ONE TRANSACTION
*
* @param connector DBI connector to commit
* @param metadataStorageTablesConfig Druid metadata tables definitions
* @param dataSource Druid datasource name
* @param segments List of segments to move and commit to metadata
* @param overwrite if it is an insert overwrite
* @param conf Configuration
* @param dataSegmentPusher segment pusher
*
* @return List of successfully published Druid segments.
* This list has the updated versions and metadata about segments after move and timeline sorting
*
* @throws CallbackFailedException
*/
public static List<DataSegment> publishSegmentsAndCommit(final SQLMetadataConnector connector, final MetadataStorageTablesConfig metadataStorageTablesConfig, final String dataSource, final List<DataSegment> segments, boolean overwrite, Configuration conf, DataSegmentPusher dataSegmentPusher) throws CallbackFailedException {
return connector.getDBI().inTransaction((handle, transactionStatus) -> {
// We create the timeline for the existing and new segments
VersionedIntervalTimeline<String, DataSegment> timeline;
if (overwrite) {
// If we are overwriting, we disable existing sources
disableDataSourceWithHandle(handle, metadataStorageTablesConfig, dataSource);
// When overwriting, we just start with empty timeline,
// as we are overwriting segments with new versions
timeline = new VersionedIntervalTimeline<>(Ordering.natural());
} else {
// Append Mode
if (segments.isEmpty()) {
// If there are no new segments, we can just bail out
return Collections.EMPTY_LIST;
}
// Otherwise, build a timeline of existing segments in metadata storage
Interval indexedInterval = JodaUtils.umbrellaInterval(Iterables.transform(segments, input -> input.getInterval()));
LOG.info("Building timeline for umbrella Interval [{}]", indexedInterval);
timeline = getTimelineForIntervalWithHandle(handle, dataSource, indexedInterval, metadataStorageTablesConfig);
}
final List<DataSegment> finalSegmentsToPublish = Lists.newArrayList();
for (DataSegment segment : segments) {
List<TimelineObjectHolder<String, DataSegment>> existingChunks = timeline.lookup(segment.getInterval());
if (existingChunks.size() > 1) {
// Druid shard specs does not support multiple partitions for same interval with different granularity.
throw new IllegalStateException(String.format("Cannot allocate new segment for dataSource[%s], interval[%s], already have [%,d] chunks. Not possible to append new segment.", dataSource, segment.getInterval(), existingChunks.size()));
}
// Find out the segment with latest version and maximum partition number
SegmentIdentifier max = null;
final ShardSpec newShardSpec;
final String newVersion;
if (!existingChunks.isEmpty()) {
// Some existing chunk, Find max
TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
for (PartitionChunk<DataSegment> existing : existingHolder.getObject()) {
if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject().getShardSpec().getPartitionNum()) {
max = SegmentIdentifier.fromDataSegment(existing.getObject());
}
}
}
if (max == null) {
// No existing shard present in the database, use the current version.
newShardSpec = segment.getShardSpec();
newVersion = segment.getVersion();
} else {
// use version of existing max segment to generate new shard spec
newShardSpec = getNextPartitionShardSpec(max.getShardSpec());
newVersion = max.getVersion();
}
DataSegment publishedSegment = publishSegmentWithShardSpec(segment, newShardSpec, newVersion, getPath(segment).getFileSystem(conf), dataSegmentPusher);
finalSegmentsToPublish.add(publishedSegment);
timeline.add(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().createChunk(publishedSegment));
}
// Publish new segments to metadata storage
final PreparedBatch batch = handle.prepareBatch(String.format("INSERT INTO %1$s (id, dataSource, created_date, start, \"end\", partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)", metadataStorageTablesConfig.getSegmentsTable()));
for (final DataSegment segment : finalSegmentsToPublish) {
batch.add(new ImmutableMap.Builder<String, Object>().put("id", segment.getIdentifier()).put("dataSource", segment.getDataSource()).put("created_date", new DateTime().toString()).put("start", segment.getInterval().getStart().toString()).put("end", segment.getInterval().getEnd().toString()).put("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true).put("version", segment.getVersion()).put("used", true).put("payload", JSON_MAPPER.writeValueAsBytes(segment)).build());
LOG.info("Published {}", segment.getIdentifier());
}
batch.execute();
return finalSegmentsToPublish;
});
}
use of io.druid.timeline.partition.ShardSpec in project druid by druid-io.
the class HadoopDruidIndexerConfig method getBucket.
/********************************************
Granularity/Bucket Helper Methods
********************************************/
/**
* Get the proper bucket for some input row.
*
* @param inputRow an InputRow
*
* @return the Bucket that this row belongs to
*/
public Optional<Bucket> getBucket(InputRow inputRow) {
final Optional<Interval> timeBucket = schema.getDataSchema().getGranularitySpec().bucketInterval(new DateTime(inputRow.getTimestampFromEpoch()));
if (!timeBucket.isPresent()) {
return Optional.absent();
}
final DateTime bucketStart = timeBucket.get().getStart();
final ShardSpec actualSpec = shardSpecLookups.get(bucketStart.getMillis()).getShardSpec(rollupGran.bucketStart(inputRow.getTimestamp()).getMillis(), inputRow);
final HadoopyShardSpec hadoopyShardSpec = hadoopShardSpecLookup.get(bucketStart.getMillis()).get(actualSpec);
return Optional.of(new Bucket(hadoopyShardSpec.getShardNum(), bucketStart, actualSpec.getPartitionNum()));
}
use of io.druid.timeline.partition.ShardSpec in project druid by druid-io.
the class OrcIndexGeneratorJobTest method loadShardSpecs.
private Map<Long, List<HadoopyShardSpec>> loadShardSpecs(Integer[][][] shardInfoForEachShard) {
Map<Long, List<HadoopyShardSpec>> shardSpecs = Maps.newTreeMap(DateTimeComparator.getInstance());
int shardCount = 0;
int segmentNum = 0;
for (Interval segmentGranularity : config.getSegmentGranularIntervals().get()) {
List<ShardSpec> specs = Lists.newArrayList();
for (Integer[] shardInfo : shardInfoForEachShard[segmentNum++]) {
specs.add(new HashBasedNumberedShardSpec(shardInfo[0], shardInfo[1], null, HadoopDruidIndexerConfig.JSON_MAPPER));
}
List<HadoopyShardSpec> actualSpecs = Lists.newArrayListWithExpectedSize(specs.size());
for (ShardSpec spec : specs) {
actualSpecs.add(new HadoopyShardSpec(spec, shardCount++));
}
shardSpecs.put(segmentGranularity.getStartMillis(), actualSpecs);
}
return shardSpecs;
}
use of io.druid.timeline.partition.ShardSpec in project druid by druid-io.
the class DimFilterUtilsTest method shardSpec.
private static ShardSpec shardSpec(String dimension, Range<String> range) {
ShardSpec shard = EasyMock.createMock(ShardSpec.class);
EasyMock.expect(shard.getDomain()).andReturn(ImmutableMap.of(dimension, range)).anyTimes();
return shard;
}
Aggregations