use of io.druid.java.util.common.IAE in project druid by druid-io.
the class VSizeIndexedInts method readFromByteBuffer.
public static VSizeIndexedInts readFromByteBuffer(ByteBuffer buffer) {
byte versionFromBuffer = buffer.get();
if (VERSION == versionFromBuffer) {
int numBytes = buffer.get();
int size = buffer.getInt();
ByteBuffer bufferToUse = buffer.asReadOnlyBuffer();
bufferToUse.limit(bufferToUse.position() + size);
buffer.position(bufferToUse.limit());
return new VSizeIndexedInts(bufferToUse, numBytes);
}
throw new IAE("Unknown version[%s]", versionFromBuffer);
}
use of io.druid.java.util.common.IAE in project druid by druid-io.
the class GenericIndexed method createVersionTwoGenericIndexed.
private static <T> GenericIndexed<T> createVersionTwoGenericIndexed(ByteBuffer byteBuffer, ObjectStrategy<T> strategy, SmooshedFileMapper fileMapper) {
if (fileMapper == null) {
throw new IAE("SmooshedFileMapper can not be null for version 2.");
}
boolean allowReverseLookup = byteBuffer.get() == REVERSE_LOOKUP_ALLOWED;
int logBaseTwoOfElementsPerValueFile = byteBuffer.getInt();
int numElements = byteBuffer.getInt();
String columnName;
List<ByteBuffer> valueBuffersToUse;
ByteBuffer headerBuffer;
try {
columnName = SERIALIZER_UTILS.readString(byteBuffer);
valueBuffersToUse = Lists.newArrayList();
int elementsPerValueFile = 1 << logBaseTwoOfElementsPerValueFile;
int numberOfFilesRequired = getNumberOfFilesRequired(elementsPerValueFile, numElements);
for (int i = 0; i < numberOfFilesRequired; i++) {
valueBuffersToUse.add(fileMapper.mapFile(GenericIndexedWriter.generateValueFileName(columnName, i)).asReadOnlyBuffer());
}
headerBuffer = fileMapper.mapFile(GenericIndexedWriter.generateHeaderFileName(columnName));
} catch (IOException e) {
throw new RuntimeException("File mapping failed.", e);
}
return new GenericIndexed<T>(valueBuffersToUse, headerBuffer, strategy, allowReverseLookup, logBaseTwoOfElementsPerValueFile, numElements);
}
use of io.druid.java.util.common.IAE in project druid by druid-io.
the class DruidCoordinator method moveSegment.
public void moveSegment(ImmutableDruidServer fromServer, ImmutableDruidServer toServer, String segmentName, final LoadPeonCallback callback) {
try {
if (fromServer.getMetadata().equals(toServer.getMetadata())) {
throw new IAE("Cannot move [%s] to and from the same server [%s]", segmentName, fromServer.getName());
}
final DataSegment segment = fromServer.getSegment(segmentName);
if (segment == null) {
throw new IAE("Unable to find segment [%s] on server [%s]", segmentName, fromServer.getName());
}
final LoadQueuePeon loadPeon = loadManagementPeons.get(toServer.getName());
if (loadPeon == null) {
throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", toServer.getName());
}
final LoadQueuePeon dropPeon = loadManagementPeons.get(fromServer.getName());
if (dropPeon == null) {
throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", fromServer.getName());
}
final ServerHolder toHolder = new ServerHolder(toServer, loadPeon);
if (toHolder.getAvailableSize() < segment.getSize()) {
throw new IAE("Not enough capacity on server [%s] for segment [%s]. Required: %,d, available: %,d.", toServer.getName(), segment, segment.getSize(), toHolder.getAvailableSize());
}
final String toLoadQueueSegPath = ZKPaths.makePath(ZKPaths.makePath(zkPaths.getLoadQueuePath(), toServer.getName()), segmentName);
final String toServedSegPath = ZKPaths.makePath(ZKPaths.makePath(serverInventoryView.getInventoryManagerConfig().getInventoryPath(), toServer.getName()), segmentName);
loadPeon.loadSegment(segment, new LoadPeonCallback() {
@Override
public void execute() {
try {
if (curator.checkExists().forPath(toServedSegPath) != null && curator.checkExists().forPath(toLoadQueueSegPath) == null && !dropPeon.getSegmentsToDrop().contains(segment)) {
dropPeon.dropSegment(segment, callback);
} else if (callback != null) {
callback.execute();
}
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
});
} catch (Exception e) {
log.makeAlert(e, "Exception moving segment %s", segmentName).emit();
if (callback != null) {
callback.execute();
}
}
}
use of io.druid.java.util.common.IAE in project druid by druid-io.
the class CoordinatorDynamicConfigTest method testSerdeWithKillAllDataSources.
@Test
public void testSerdeWithKillAllDataSources() throws Exception {
String jsonStr = "{\n" + " \"millisToWaitBeforeDeleting\": 1,\n" + " \"mergeBytesLimit\": 1,\n" + " \"mergeSegmentsLimit\" : 1,\n" + " \"maxSegmentsToMove\": 1,\n" + " \"replicantLifetime\": 1,\n" + " \"replicationThrottleLimit\": 1,\n" + " \"balancerComputeThreads\": 2, \n" + " \"emitBalancingStats\": true,\n" + " \"killAllDataSources\": true\n" + "}\n";
ObjectMapper mapper = TestHelper.getObjectMapper();
CoordinatorDynamicConfig actual = mapper.readValue(mapper.writeValueAsString(mapper.readValue(jsonStr, CoordinatorDynamicConfig.class)), CoordinatorDynamicConfig.class);
Assert.assertEquals(new CoordinatorDynamicConfig(1, 1, 1, 1, 1, 1, 2, true, ImmutableSet.of(), true), actual);
//ensure whitelist is empty when killAllDataSources is true
try {
jsonStr = "{\n" + " \"killDataSourceWhitelist\": [\"test1\",\"test2\"],\n" + " \"killAllDataSources\": true\n" + "}\n";
mapper.readValue(jsonStr, CoordinatorDynamicConfig.class);
Assert.fail("deserialization should fail.");
} catch (JsonMappingException e) {
Assert.assertTrue(e.getCause() instanceof IAE);
}
}
use of io.druid.java.util.common.IAE in project druid by druid-io.
the class HadoopConverterJob method run.
public List<DataSegment> run() throws IOException {
final JobConf jobConf = new JobConf();
jobConf.setKeepFailedTaskFiles(false);
for (Map.Entry<String, String> entry : converterConfig.getHadoopProperties().entrySet()) {
jobConf.set(entry.getKey(), entry.getValue(), "converterConfig.getHadoopProperties()");
}
final List<DataSegment> segments = converterConfig.getSegments();
if (segments.isEmpty()) {
throw new IAE("No segments found for datasource [%s]", converterConfig.getDataSource());
}
converterConfigIntoConfiguration(converterConfig, segments, jobConf);
// Map only. Number of map tasks determined by input format
jobConf.setNumReduceTasks(0);
jobConf.setWorkingDirectory(new Path(converterConfig.getDistributedSuccessCache()));
setJobName(jobConf, segments);
if (converterConfig.getJobPriority() != null) {
jobConf.setJobPriority(JobPriority.valueOf(converterConfig.getJobPriority()));
}
final Job job = Job.getInstance(jobConf);
job.setInputFormatClass(ConfigInputFormat.class);
job.setMapperClass(ConvertingMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setMapSpeculativeExecution(false);
job.setOutputFormatClass(ConvertingOutputFormat.class);
JobHelper.setupClasspath(JobHelper.distributedClassPath(jobConf.getWorkingDirectory()), JobHelper.distributedClassPath(getJobClassPathDir(job.getJobName(), jobConf.getWorkingDirectory())), job);
Throwable throwable = null;
try {
job.submit();
log.info("Job %s submitted, status available at %s", job.getJobName(), job.getTrackingURL());
final boolean success = job.waitForCompletion(true);
if (!success) {
final TaskReport[] reports = job.getTaskReports(TaskType.MAP);
if (reports != null) {
for (final TaskReport report : reports) {
log.error("Error in task [%s] : %s", report.getTaskId(), Arrays.toString(report.getDiagnostics()));
}
}
return null;
}
try {
loadedBytes = job.getCounters().findCounter(COUNTER_GROUP, COUNTER_LOADED).getValue();
writtenBytes = job.getCounters().findCounter(COUNTER_GROUP, COUNTER_WRITTEN).getValue();
} catch (IOException ex) {
log.error(ex, "Could not fetch counters");
}
final JobID jobID = job.getJobID();
final Path jobDir = getJobPath(jobID, job.getWorkingDirectory());
final FileSystem fs = jobDir.getFileSystem(job.getConfiguration());
final RemoteIterator<LocatedFileStatus> it = fs.listFiles(jobDir, true);
final List<Path> goodPaths = new ArrayList<>();
while (it.hasNext()) {
final LocatedFileStatus locatedFileStatus = it.next();
if (locatedFileStatus.isFile()) {
final Path myPath = locatedFileStatus.getPath();
if (ConvertingOutputFormat.DATA_SUCCESS_KEY.equals(myPath.getName())) {
goodPaths.add(new Path(myPath.getParent(), ConvertingOutputFormat.DATA_FILE_KEY));
}
}
}
if (goodPaths.isEmpty()) {
log.warn("No good data found at [%s]", jobDir);
return null;
}
final List<DataSegment> returnList = ImmutableList.copyOf(Lists.transform(goodPaths, new Function<Path, DataSegment>() {
@Nullable
@Override
public DataSegment apply(final Path input) {
try {
if (!fs.exists(input)) {
throw new ISE("Somehow [%s] was found but [%s] is missing at [%s]", ConvertingOutputFormat.DATA_SUCCESS_KEY, ConvertingOutputFormat.DATA_FILE_KEY, jobDir);
}
} catch (final IOException e) {
throw Throwables.propagate(e);
}
try (final InputStream stream = fs.open(input)) {
return HadoopDruidConverterConfig.jsonMapper.readValue(stream, DataSegment.class);
} catch (final IOException e) {
throw Throwables.propagate(e);
}
}
}));
if (returnList.size() == segments.size()) {
return returnList;
} else {
throw new ISE("Tasks reported success but result length did not match! Expected %d found %d at path [%s]", segments.size(), returnList.size(), jobDir);
}
} catch (InterruptedException | ClassNotFoundException e) {
RuntimeException exception = Throwables.propagate(e);
throwable = exception;
throw exception;
} catch (Throwable t) {
throwable = t;
throw t;
} finally {
try {
cleanup(job);
} catch (IOException e) {
if (throwable != null) {
throwable.addSuppressed(e);
} else {
log.error(e, "Could not clean up job [%s]", job.getJobID());
}
}
}
}
Aggregations