use of org.apache.druid.segment.loading.SegmentLoadingException in project druid by druid-io.
the class SegmentManagerTest method testDropSegment.
@Test
public void testDropSegment() throws SegmentLoadingException, ExecutionException, InterruptedException {
for (DataSegment eachSegment : SEGMENTS) {
Assert.assertTrue(segmentManager.loadSegment(eachSegment, false, SegmentLazyLoadFailCallback.NOOP));
}
final List<Future<Void>> futures = ImmutableList.of(SEGMENTS.get(0), SEGMENTS.get(2)).stream().map(segment -> executor.submit(() -> {
segmentManager.dropSegment(segment);
return (Void) null;
})).collect(Collectors.toList());
for (Future<Void> eachFuture : futures) {
eachFuture.get();
}
assertResult(ImmutableList.of(SEGMENTS.get(1), SEGMENTS.get(3), SEGMENTS.get(4)));
}
use of org.apache.druid.segment.loading.SegmentLoadingException in project druid by druid-io.
the class SegmentLoadDropHandler method addSegment.
@Override
public void addSegment(DataSegment segment, @Nullable DataSegmentChangeCallback callback) {
Status result = null;
try {
log.info("Loading segment %s", segment.getId());
/*
The lock below is used to prevent a race condition when the scheduled runnable in removeSegment() starts,
and if (segmentsToDelete.remove(segment)) returns true, in which case historical will start deleting segment
files. At that point, it's possible that right after the "if" check, addSegment() is called and actually loads
the segment, which makes dropping segment and downloading segment happen at the same time.
*/
if (segmentsToDelete.contains(segment)) {
/*
Both contains(segment) and remove(segment) can be moved inside the synchronized block. However, in that case,
each time when addSegment() is called, it has to wait for the lock in order to make progress, which will make
things slow. Given that in most cases segmentsToDelete.contains(segment) returns false, it will save a lot of
cost of acquiring lock by doing the "contains" check outside the synchronized block.
*/
synchronized (segmentDeleteLock) {
segmentsToDelete.remove(segment);
}
}
loadSegment(segment, DataSegmentChangeCallback.NOOP, false);
// announce segment even if the segment file already exists.
try {
announcer.announceSegment(segment);
} catch (IOException e) {
throw new SegmentLoadingException(e, "Failed to announce segment[%s]", segment.getId());
}
result = Status.SUCCESS;
} catch (Exception e) {
log.makeAlert(e, "Failed to load segment for dataSource").addData("segment", segment).emit();
result = Status.failed(e.getMessage());
} finally {
updateRequestStatus(new SegmentChangeRequestLoad(segment), result);
if (null != callback) {
callback.execute();
}
}
}
use of org.apache.druid.segment.loading.SegmentLoadingException in project druid by druid-io.
the class S3DataSegmentKiller method kill.
@Override
public void kill(DataSegment segment) throws SegmentLoadingException {
try {
Map<String, Object> loadSpec = segment.getLoadSpec();
String s3Bucket = MapUtils.getString(loadSpec, "bucket");
String s3Path = MapUtils.getString(loadSpec, "key");
String s3DescriptorPath = DataSegmentKiller.descriptorPath(s3Path);
final ServerSideEncryptingAmazonS3 s3Client = this.s3ClientSupplier.get();
if (s3Client.doesObjectExist(s3Bucket, s3Path)) {
log.info("Removing index file[s3://%s/%s] from s3!", s3Bucket, s3Path);
s3Client.deleteObject(s3Bucket, s3Path);
}
// anymore, but we still delete them if exists.
if (s3Client.doesObjectExist(s3Bucket, s3DescriptorPath)) {
log.info("Removing descriptor file[s3://%s/%s] from s3!", s3Bucket, s3DescriptorPath);
s3Client.deleteObject(s3Bucket, s3DescriptorPath);
}
} catch (AmazonServiceException e) {
throw new SegmentLoadingException(e, "Couldn't kill segment[%s]: [%s]", segment.getId(), e);
}
}
use of org.apache.druid.segment.loading.SegmentLoadingException in project druid by druid-io.
the class S3DataSegmentPuller method getSegmentFiles.
FileUtils.FileCopyResult getSegmentFiles(final CloudObjectLocation s3Coords, final File outDir) throws SegmentLoadingException {
log.info("Pulling index at path[%s] to outDir[%s]", s3Coords, outDir);
if (!isObjectInBucket(s3Coords)) {
throw new SegmentLoadingException("IndexFile[%s] does not exist.", s3Coords);
}
try {
FileUtils.mkdirp(outDir);
final URI uri = s3Coords.toUri(S3StorageDruidModule.SCHEME);
final ByteSource byteSource = new ByteSource() {
@Override
public InputStream openStream() throws IOException {
try {
return buildFileObject(uri).openInputStream();
} catch (AmazonServiceException e) {
if (e.getCause() != null) {
if (S3Utils.S3RETRY.apply(e)) {
throw new IOException("Recoverable exception", e);
}
}
throw new RuntimeException(e);
}
}
};
if (CompressionUtils.isZip(s3Coords.getPath())) {
final FileUtils.FileCopyResult result = CompressionUtils.unzip(byteSource, outDir, S3Utils.S3RETRY, false);
log.info("Loaded %d bytes from [%s] to [%s]", result.size(), s3Coords.toString(), outDir.getAbsolutePath());
return result;
}
if (CompressionUtils.isGz(s3Coords.getPath())) {
final String fname = Files.getNameWithoutExtension(uri.getPath());
final File outFile = new File(outDir, fname);
final FileUtils.FileCopyResult result = CompressionUtils.gunzip(byteSource, outFile, S3Utils.S3RETRY);
log.info("Loaded %d bytes from [%s] to [%s]", result.size(), s3Coords.toString(), outFile.getAbsolutePath());
return result;
}
throw new IAE("Do not know how to load file type at [%s]", uri.toString());
} catch (Exception e) {
try {
FileUtils.deleteDirectory(outDir);
} catch (IOException ioe) {
log.warn(ioe, "Failed to remove output directory [%s] for segment pulled from [%s]", outDir.getAbsolutePath(), s3Coords.toString());
}
throw new SegmentLoadingException(e, e.getMessage());
}
}
use of org.apache.druid.segment.loading.SegmentLoadingException in project druid by druid-io.
the class OssDataSegmentPuller method getSegmentFiles.
FileUtils.FileCopyResult getSegmentFiles(final CloudObjectLocation ossCoords, final File outDir) throws SegmentLoadingException {
log.info("Pulling index at path[%s] to outDir[%s]", ossCoords, outDir);
if (!isObjectInBucket(ossCoords)) {
throw new SegmentLoadingException("IndexFile[%s] does not exist.", ossCoords);
}
try {
FileUtils.mkdirp(outDir);
final URI uri = ossCoords.toUri(OssStorageDruidModule.SCHEME);
final ByteSource byteSource = new ByteSource() {
@Override
public InputStream openStream() throws IOException {
try {
return buildFileObject(uri).openInputStream();
} catch (OSSException e) {
if (e.getCause() != null) {
if (OssUtils.RETRYABLE.apply(e)) {
throw new IOException("Recoverable exception", e);
}
}
throw new RuntimeException(e);
}
}
};
if (CompressionUtils.isZip(ossCoords.getPath())) {
final FileUtils.FileCopyResult result = CompressionUtils.unzip(byteSource, outDir, OssUtils.RETRYABLE, false);
log.info("Loaded %d bytes from [%s] to [%s]", result.size(), ossCoords.toString(), outDir.getAbsolutePath());
return result;
}
if (CompressionUtils.isGz(ossCoords.getPath())) {
final String fname = Files.getNameWithoutExtension(uri.getPath());
final File outFile = new File(outDir, fname);
final FileUtils.FileCopyResult result = CompressionUtils.gunzip(byteSource, outFile, OssUtils.RETRYABLE);
log.info("Loaded %d bytes from [%s] to [%s]", result.size(), ossCoords.toString(), outFile.getAbsolutePath());
return result;
}
throw new IAE("Do not know how to load file type at [%s]", uri.toString());
} catch (Exception e) {
try {
FileUtils.deleteDirectory(outDir);
} catch (IOException ioe) {
log.warn(ioe, "Failed to remove output directory [%s] for segment pulled from [%s]", outDir.getAbsolutePath(), ossCoords.toString());
}
throw new SegmentLoadingException(e, e.getMessage());
}
}
Aggregations