use of io.druid.segment.loading.DataSegmentPusher in project druid by druid-io.
the class TaskLifecycleTest method testRealtimeIndexTaskFailure.
@Test(timeout = 60_000L)
public void testRealtimeIndexTaskFailure() throws Exception {
dataSegmentPusher = new DataSegmentPusher() {
@Deprecated
@Override
public String getPathForHadoop(String s) {
return getPathForHadoop();
}
@Override
public String getPathForHadoop() {
throw new UnsupportedOperationException();
}
@Override
public DataSegment push(File file, DataSegment dataSegment) throws IOException {
throw new RuntimeException("FAILURE");
}
};
tb = setUpTaskToolboxFactory(dataSegmentPusher, handoffNotifierFactory, mdc);
taskRunner = setUpThreadPoolTaskRunner(tb);
taskQueue = setUpTaskQueue(taskStorage, taskRunner);
monitorScheduler.addMonitor(EasyMock.anyObject(Monitor.class));
EasyMock.expectLastCall().atLeastOnce();
monitorScheduler.removeMonitor(EasyMock.anyObject(Monitor.class));
EasyMock.expectLastCall().anyTimes();
EasyMock.replay(monitorScheduler, queryRunnerFactoryConglomerate);
RealtimeIndexTask realtimeIndexTask = newRealtimeIndexTask();
final String taskId = realtimeIndexTask.getId();
taskQueue.start();
taskQueue.add(realtimeIndexTask);
// Wait for realtime index task to fail
while (tsqa.getStatus(taskId).get().isRunnable()) {
Thread.sleep(10);
}
Assert.assertTrue("Task should be in Failure state", tsqa.getStatus(taskId).get().isFailure());
EasyMock.verify(monitorScheduler, queryRunnerFactoryConglomerate);
}
use of io.druid.segment.loading.DataSegmentPusher in project hive by apache.
the class DruidStorageHandler method loadDruidSegments.
protected void loadDruidSegments(Table table, boolean overwrite) throws MetaException {
// at this point we have Druid segments from reducers but we need to atomically
// rename and commit to metadata
final String dataSourceName = table.getParameters().get(Constants.DRUID_DATA_SOURCE);
final List<DataSegment> segmentList = Lists.newArrayList();
final Path tableDir = getSegmentDescriptorDir();
// Read the created segments metadata from the table staging directory
try {
segmentList.addAll(DruidStorageHandlerUtils.getCreatedSegments(tableDir, getConf()));
} catch (IOException e) {
LOG.error("Failed to load segments descriptor from directory {}", tableDir.toString());
Throwables.propagate(e);
cleanWorkingDir();
}
// Moving Druid segments and committing to druid metadata as one transaction.
final HdfsDataSegmentPusherConfig hdfsSegmentPusherConfig = new HdfsDataSegmentPusherConfig();
List<DataSegment> publishedDataSegmentList = Lists.newArrayList();
final String segmentDirectory = table.getParameters().get(Constants.DRUID_SEGMENT_DIRECTORY) != null ? table.getParameters().get(Constants.DRUID_SEGMENT_DIRECTORY) : HiveConf.getVar(getConf(), HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY);
LOG.info(String.format("Moving [%s] Druid segments from staging directory [%s] to Deep storage [%s]", segmentList.size(), getStagingWorkingDir(), segmentDirectory));
hdfsSegmentPusherConfig.setStorageDirectory(segmentDirectory);
try {
DataSegmentPusher dataSegmentPusher = new HdfsDataSegmentPusher(hdfsSegmentPusherConfig, getConf(), DruidStorageHandlerUtils.JSON_MAPPER);
publishedDataSegmentList = DruidStorageHandlerUtils.publishSegmentsAndCommit(getConnector(), getDruidMetadataStorageTablesConfig(), dataSourceName, segmentList, overwrite, getConf(), dataSegmentPusher);
} catch (CallbackFailedException | IOException e) {
LOG.error("Failed to move segments from staging directory");
if (e instanceof CallbackFailedException) {
Throwables.propagate(e.getCause());
}
Throwables.propagate(e);
} finally {
cleanWorkingDir();
}
checkLoadStatus(publishedDataSegmentList);
}
Aggregations