use of io.druid.segment.loading.SegmentLoaderConfig in project druid by druid-io.
the class RealtimeIndexTaskTest method makeToolbox.
private TaskToolbox makeToolbox(final Task task, final TaskStorage taskStorage, final IndexerMetadataStorageCoordinator mdc, final File directory) {
final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, false, null, null);
final TaskLockbox taskLockbox = new TaskLockbox(taskStorage);
try {
taskStorage.insert(task, TaskStatus.running(task.getId()));
} catch (EntryExistsException e) {
// suppress
}
taskLockbox.syncFromStorage();
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, mdc, emitter, EasyMock.createMock(SupervisorManager.class));
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox);
final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>of(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(new IntervalChunkingQueryRunnerDecorator(null, null, null) {
@Override
public <T> QueryRunner<T> decorate(QueryRunner<T> delegate, QueryToolChest<T, ? extends Query<T>> toolChest) {
return delegate;
}
}), new TimeseriesQueryEngine(), new QueryWatcher() {
@Override
public void registerQuery(Query query, ListenableFuture future) {
// do nothing
}
})));
handOffCallbacks = Maps.newConcurrentMap();
final SegmentHandoffNotifierFactory handoffNotifierFactory = new SegmentHandoffNotifierFactory() {
@Override
public SegmentHandoffNotifier createSegmentHandoffNotifier(String dataSource) {
return new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
return true;
}
@Override
public void start() {
//Noop
}
@Override
public void close() {
//Noop
}
Map<SegmentDescriptor, Pair<Executor, Runnable>> getHandOffCallbacks() {
return handOffCallbacks;
}
};
}
};
final TestUtils testUtils = new TestUtils();
final TaskToolboxFactory toolboxFactory = new TaskToolboxFactory(taskConfig, taskActionClientFactory, emitter, new TestDataSegmentPusher(), new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), handoffNotifierFactory, conglomerate, // queryExecutorService
MoreExecutors.sameThreadExecutor(), EasyMock.createMock(MonitorScheduler.class), new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, new SegmentLoaderConfig() {
@Override
public List<StorageLocationConfig> getLocations() {
return Lists.newArrayList();
}
}, testUtils.getTestObjectMapper())), testUtils.getTestObjectMapper(), testUtils.getTestIndexMerger(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), testUtils.getTestIndexMergerV9());
return toolboxFactory.build(task);
}
use of io.druid.segment.loading.SegmentLoaderConfig in project druid by druid-io.
the class TaskToolboxTest method testFetchSegments.
@Test
public void testFetchSegments() throws SegmentLoadingException, IOException {
File expectedFile = temporaryFolder.newFile();
EasyMock.expect(mockSegmentLoaderLocalCacheManager.getSegmentFiles((DataSegment) EasyMock.anyObject())).andReturn(expectedFile).anyTimes();
EasyMock.expect(mockSegmentLoaderLocalCacheManager.withConfig((SegmentLoaderConfig) EasyMock.anyObject())).andReturn(mockSegmentLoaderLocalCacheManager).anyTimes();
EasyMock.replay(mockSegmentLoaderLocalCacheManager);
DataSegment dataSegment = DataSegment.builder().dataSource("source").interval(new Interval("2012-01-01/P1D")).version("1").size(1).build();
List<DataSegment> segments = ImmutableList.of(dataSegment);
Map actualFetchedSegment = taskToolbox.build(task).fetchSegments(segments);
Assert.assertEquals(expectedFile, actualFetchedSegment.get(dataSegment));
}
use of io.druid.segment.loading.SegmentLoaderConfig in project druid by druid-io.
the class ZkCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
try {
infoDir = new File(File.createTempFile("blah", "blah2").getParent(), "ZkCoordinatorTest");
infoDir.mkdirs();
for (File file : infoDir.listFiles()) {
file.delete();
}
log.info("Creating tmp test files in [%s]", infoDir);
} catch (IOException e) {
throw new RuntimeException(e);
}
scheduledRunnable = Lists.newArrayList();
segmentLoader = new CacheTestSegmentLoader();
serverManager = new ServerManager(segmentLoader, new NoopQueryRunnerFactoryConglomerate(), new NoopServiceEmitter(), MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor(), new DefaultObjectMapper(), new LocalCacheProvider().get(), new CacheConfig());
final ZkPathsConfig zkPaths = new ZkPathsConfig() {
@Override
public String getBase() {
return "/druid";
}
};
segmentsAnnouncedByMe = new ConcurrentSkipListSet<>();
announceCount = new AtomicInteger(0);
announcer = new DataSegmentAnnouncer() {
private final DataSegmentAnnouncer delegate = new BatchDataSegmentAnnouncer(me, new BatchDataSegmentAnnouncerConfig(), zkPaths, new Announcer(curator, Execs.singleThreaded("blah")), jsonMapper);
@Override
public void announceSegment(DataSegment segment) throws IOException {
segmentsAnnouncedByMe.add(segment);
announceCount.incrementAndGet();
delegate.announceSegment(segment);
}
@Override
public void unannounceSegment(DataSegment segment) throws IOException {
segmentsAnnouncedByMe.remove(segment);
announceCount.decrementAndGet();
delegate.unannounceSegment(segment);
}
@Override
public void announceSegments(Iterable<DataSegment> segments) throws IOException {
for (DataSegment segment : segments) {
segmentsAnnouncedByMe.add(segment);
}
announceCount.addAndGet(Iterables.size(segments));
delegate.announceSegments(segments);
}
@Override
public void unannounceSegments(Iterable<DataSegment> segments) throws IOException {
for (DataSegment segment : segments) {
segmentsAnnouncedByMe.remove(segment);
}
announceCount.addAndGet(-Iterables.size(segments));
delegate.unannounceSegments(segments);
}
@Override
public boolean isAnnounced(DataSegment segment) {
return segmentsAnnouncedByMe.contains(segment);
}
};
zkCoordinator = new ZkCoordinator(jsonMapper, new SegmentLoaderConfig() {
@Override
public File getInfoDir() {
return infoDir;
}
@Override
public int getNumLoadingThreads() {
return 5;
}
@Override
public int getAnnounceIntervalMillis() {
return 50;
}
@Override
public int getDropSegmentDelayMillis() {
return 0;
}
}, zkPaths, me, announcer, curator, serverManager, new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int corePoolSize, String nameFormat) {
/*
Override normal behavoir by adding the runnable to a list so that you can make sure
all the shceduled runnables are executed by explicitly calling run() on each item in the list
*/
return new ScheduledThreadPoolExecutor(corePoolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat(nameFormat).build()) {
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
scheduledRunnable.add(command);
return null;
}
};
}
});
}
use of io.druid.segment.loading.SegmentLoaderConfig in project druid by druid-io.
the class IngestSegmentFirehoseFactoryTest method constructorFeeder.
@Parameterized.Parameters(name = "{1}")
public static Collection<Object[]> constructorFeeder() throws IOException {
final IndexSpec indexSpec = new IndexSpec();
final HeapMemoryTaskStorage ts = new HeapMemoryTaskStorage(new TaskStorageConfig(null) {
});
final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.NONE).withMinTimestamp(JodaUtils.MIN_INSTANT).withDimensionsSpec(ROW_PARSER).withMetrics(new AggregatorFactory[] { new LongSumAggregatorFactory(METRIC_LONG_NAME, DIM_LONG_NAME), new DoubleSumAggregatorFactory(METRIC_FLOAT_NAME, DIM_FLOAT_NAME) }).build();
final OnheapIncrementalIndex index = new OnheapIncrementalIndex(schema, true, MAX_ROWS * MAX_SHARD_NUMBER);
for (Integer i = 0; i < MAX_ROWS; ++i) {
index.add(ROW_PARSER.parse(buildRow(i.longValue())));
}
if (!persistDir.mkdirs() && !persistDir.exists()) {
throw new IOException(String.format("Could not create directory at [%s]", persistDir.getAbsolutePath()));
}
INDEX_MERGER.persist(index, persistDir, indexSpec);
final TaskLockbox tl = new TaskLockbox(ts);
final IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(null, null, null) {
private final Set<DataSegment> published = Sets.newHashSet();
private final Set<DataSegment> nuked = Sets.newHashSet();
@Override
public List<DataSegment> getUsedSegmentsForInterval(String dataSource, Interval interval) throws IOException {
return ImmutableList.copyOf(segmentSet);
}
@Override
public List<DataSegment> getUsedSegmentsForIntervals(String dataSource, List<Interval> interval) throws IOException {
return ImmutableList.copyOf(segmentSet);
}
@Override
public List<DataSegment> getUnusedSegmentsForInterval(String dataSource, Interval interval) {
return ImmutableList.of();
}
@Override
public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) {
Set<DataSegment> added = Sets.newHashSet();
for (final DataSegment segment : segments) {
if (published.add(segment)) {
added.add(segment);
}
}
return ImmutableSet.copyOf(added);
}
@Override
public void deleteSegments(Set<DataSegment> segments) {
nuked.addAll(segments);
}
};
final LocalTaskActionClientFactory tac = new LocalTaskActionClientFactory(ts, new TaskActionToolbox(tl, mdc, newMockEmitter(), EasyMock.createMock(SupervisorManager.class)));
SegmentHandoffNotifierFactory notifierFactory = EasyMock.createNiceMock(SegmentHandoffNotifierFactory.class);
EasyMock.replay(notifierFactory);
final TaskToolboxFactory taskToolboxFactory = new TaskToolboxFactory(new TaskConfig(tmpDir.getAbsolutePath(), null, null, 50000, null, false, null, null), tac, newMockEmitter(), new DataSegmentPusher() {
@Deprecated
@Override
public String getPathForHadoop(String dataSource) {
return getPathForHadoop();
}
@Override
public String getPathForHadoop() {
throw new UnsupportedOperationException();
}
@Override
public DataSegment push(File file, DataSegment segment) throws IOException {
return segment;
}
}, new DataSegmentKiller() {
@Override
public void kill(DataSegment segments) throws SegmentLoadingException {
}
@Override
public void killAll() throws IOException {
throw new UnsupportedOperationException("not implemented");
}
}, new DataSegmentMover() {
@Override
public DataSegment move(DataSegment dataSegment, Map<String, Object> targetLoadSpec) throws SegmentLoadingException {
return dataSegment;
}
}, new DataSegmentArchiver() {
@Override
public DataSegment archive(DataSegment segment) throws SegmentLoadingException {
return segment;
}
@Override
public DataSegment restore(DataSegment segment) throws SegmentLoadingException {
return segment;
}
}, // segment announcer
null, notifierFactory, // query runner factory conglomerate corporation unionized collective
null, // query executor service
null, // monitor scheduler
null, new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, new SegmentLoaderConfig() {
@Override
public List<StorageLocationConfig> getLocations() {
return Lists.newArrayList();
}
}, MAPPER)), MAPPER, INDEX_MERGER, INDEX_IO, null, null, INDEX_MERGER_V9);
Collection<Object[]> values = new LinkedList<>();
for (InputRowParser parser : Arrays.<InputRowParser>asList(ROW_PARSER, new MapInputRowParser(new JSONParseSpec(new TimestampSpec(TIME_COLUMN, "auto", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.<String>of()), ImmutableList.of(DIM_FLOAT_NAME, DIM_LONG_NAME), ImmutableList.<SpatialDimensionSchema>of()), null, null)))) {
for (List<String> dim_names : Arrays.<List<String>>asList(null, ImmutableList.of(DIM_NAME))) {
for (List<String> metric_names : Arrays.<List<String>>asList(null, ImmutableList.of(METRIC_LONG_NAME, METRIC_FLOAT_NAME))) {
values.add(new Object[] { new IngestSegmentFirehoseFactory(DATA_SOURCE_NAME, FOREVER, new SelectorDimFilter(DIM_NAME, DIM_VALUE, null), dim_names, metric_names, Guice.createInjector(new Module() {
@Override
public void configure(Binder binder) {
binder.bind(TaskToolboxFactory.class).toInstance(taskToolboxFactory);
}
}), INDEX_IO), String.format("DimNames[%s]MetricNames[%s]ParserDimNames[%s]", dim_names == null ? "null" : "dims", metric_names == null ? "null" : "metrics", parser == ROW_PARSER ? "dims" : "null"), parser });
}
}
}
return values;
}
Aggregations