use of java.nio.file.DirectoryNotEmptyException in project elasticsearch by elastic.
the class BlobStoreRepository method deleteSnapshot.
@Override
public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) {
if (isReadOnly()) {
throw new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository");
}
final RepositoryData repositoryData = getRepositoryData();
List<String> indices = Collections.emptyList();
SnapshotInfo snapshot = null;
try {
snapshot = getSnapshotInfo(snapshotId);
indices = snapshot.indices();
} catch (SnapshotMissingException ex) {
throw ex;
} catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex);
}
MetaData metaData = null;
try {
if (snapshot != null) {
metaData = readSnapshotMetaData(snapshotId, snapshot.version(), repositoryData.resolveIndices(indices), true);
} else {
metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true);
}
} catch (IOException | SnapshotException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex);
}
try {
// Delete snapshot from the index file, since it is the maintainer of truth of active snapshots
final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId);
writeIndexGen(updatedRepositoryData, repositoryStateId);
// delete the snapshot file
safeSnapshotBlobDelete(snapshot, snapshotId.getUUID());
// delete the global metadata file
safeGlobalMetaDataBlobDelete(snapshot, snapshotId.getUUID());
// Now delete all indices
for (String index : indices) {
final IndexId indexId = repositoryData.resolveIndexId(index);
BlobPath indexPath = basePath().add("indices").add(indexId.getId());
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
try {
indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID());
} catch (IOException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex);
}
if (metaData != null) {
IndexMetaData indexMetaData = metaData.index(index);
if (indexMetaData != null) {
for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
try {
delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId));
} catch (SnapshotException ex) {
final int finalShardId = shardId;
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex);
}
}
}
}
}
// cleanup indices that are no longer part of the repository
final Collection<IndexId> indicesToCleanUp = Sets.newHashSet(repositoryData.getIndices().values());
indicesToCleanUp.removeAll(updatedRepositoryData.getIndices().values());
final BlobContainer indicesBlobContainer = blobStore().blobContainer(basePath().add("indices"));
for (final IndexId indexId : indicesToCleanUp) {
try {
indicesBlobContainer.deleteBlob(indexId.getId());
} catch (DirectoryNotEmptyException dnee) {
// if the directory isn't empty for some reason, it will fail to clean up;
// we'll ignore that and accept that cleanup didn't fully succeed.
// since we are using UUIDs for path names, this won't be an issue for
// snapshotting indices of the same name
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder due to the directory not being empty.", metadata.name(), indexId), dnee);
} catch (IOException ioe) {
// a different IOException occurred while trying to delete - will just log the issue for now
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder.", metadata.name(), indexId), ioe);
}
}
} catch (IOException ex) {
throw new RepositoryException(metadata.name(), "failed to update snapshot in repository", ex);
}
}
use of java.nio.file.DirectoryNotEmptyException in project elasticsearch by elastic.
the class ExceptionSerializationTests method testFileSystemExceptions.
public void testFileSystemExceptions() throws IOException {
for (FileSystemException ex : Arrays.asList(new FileSystemException("a", "b", "c"), new NoSuchFileException("a", "b", "c"), new NotDirectoryException("a"), new DirectoryNotEmptyException("a"), new AtomicMoveNotSupportedException("a", "b", "c"), new FileAlreadyExistsException("a", "b", "c"), new AccessDeniedException("a", "b", "c"), new FileSystemLoopException("a"))) {
FileSystemException serialize = serialize(ex);
assertEquals(serialize.getClass(), ex.getClass());
assertEquals("a", serialize.getFile());
if (serialize.getClass() == NotDirectoryException.class || serialize.getClass() == FileSystemLoopException.class || serialize.getClass() == DirectoryNotEmptyException.class) {
assertNull(serialize.getOtherFile());
assertNull(serialize.getReason());
} else {
assertEquals(serialize.getClass().toString(), "b", serialize.getOtherFile());
assertEquals(serialize.getClass().toString(), "c", serialize.getReason());
}
}
}
use of java.nio.file.DirectoryNotEmptyException in project Terasology by MovingBlocks.
the class SaveTransactionHelper method mergeChanges.
/**
* Merges all outstanding changes into the save game. If this operation gets interrupted it can be started again
* without any file corruption when the file system supports atomic moves.
* <br><br>
* The write lock for the save directory should be acquired before this method gets called.
*/
public void mergeChanges() throws IOException {
final Path sourceDirectory = storagePathProvider.getUnmergedChangesPath();
final Path targetDirectory = storagePathProvider.getStoragePathDirectory();
Files.walkFileTree(sourceDirectory, new SimpleFileVisitor<Path>() {
boolean atomicNotPossibleLogged;
@Override
public FileVisitResult preVisitDirectory(Path sourceSubDir, BasicFileAttributes attrs) throws IOException {
Path targetSubDir = targetDirectory.resolve(sourceDirectory.relativize(sourceSubDir));
if (!Files.isDirectory(targetSubDir)) {
Files.createDirectory(targetSubDir);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path sourcePath, BasicFileAttributes attrs) throws IOException {
Path targetPath = targetDirectory.resolve(sourceDirectory.relativize(sourcePath));
try {
// Delete file, as behavior of atomic move is undefined if target file exists:
Files.deleteIfExists(targetPath);
Files.move(sourcePath, targetPath, StandardCopyOption.ATOMIC_MOVE);
} catch (AtomicMoveNotSupportedException e) {
if (!atomicNotPossibleLogged) {
logger.warn("Atomic move was not possible, doing it non atomically...");
atomicNotPossibleLogged = true;
}
Files.move(sourcePath, targetPath);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
try {
Files.delete(dir);
} catch (DirectoryNotEmptyException e) {
/**
* Happens rarely for some players on windows (See issue #2160). Exact reason for this behavior is
* unknown. Maybe they have some kind of background task that processes that creates a temporary
* files in new directories to store some intermediate scan result.
*/
logger.warn("The save job could not cleanup a temporarly created directory, it will retry once in one second");
try {
Thread.sleep(1000L);
} catch (InterruptedException e1) {
// Reset flag and ignore it
Thread.currentThread().interrupt();
}
Files.delete(dir);
}
return FileVisitResult.CONTINUE;
}
});
}
use of java.nio.file.DirectoryNotEmptyException in project graal by oracle.
the class MemoryFileSystem method delete.
@Override
public void delete(Path path) throws IOException {
final Path absolutePath = toAbsolutePath(path);
final Path parentPath = absolutePath.getParent();
if (parentPath == null) {
throw new IOException("Cannot delete root.");
}
Map.Entry<Long, Map<String, Long>> e = readDir(parentPath);
final long inode = e.getKey();
final Map<String, Long> dirents = e.getValue();
if (!inodes.get(inode).permissions.contains(AccessMode.WRITE)) {
throw new IOException("Read only dir: " + path);
}
final String fileName = absolutePath.getFileName().toString();
final Long fileInode = dirents.get(fileName);
if (fileInode == null) {
throw new NoSuchFileException(path.toString());
}
if (inodes.get(fileInode).isDirectory()) {
if (!readDir(fileInode).isEmpty()) {
throw new DirectoryNotEmptyException(path.toString());
}
}
inodes.remove(fileInode);
blocks.remove(fileInode);
dirents.remove(fileName);
writeDir(inode, dirents);
}
use of java.nio.file.DirectoryNotEmptyException in project cryptofs by cryptomator.
the class CryptoFileSystemImpl method delete.
void delete(CryptoPath cleartextPath) throws IOException {
Path ciphertextFile = cryptoPathMapper.getCiphertextFilePath(cleartextPath, CiphertextFileType.FILE);
// try to delete ciphertext file:
if (!Files.deleteIfExists(ciphertextFile)) {
// filePath doesn't exist, maybe it's an directory:
Path ciphertextDir = cryptoPathMapper.getCiphertextDirPath(cleartextPath);
Path ciphertextDirFile = cryptoPathMapper.getCiphertextFilePath(cleartextPath, CiphertextFileType.DIRECTORY);
try {
ciphertextDirDeleter.deleteCiphertextDirIncludingNonCiphertextFiles(ciphertextDir, cleartextPath);
if (!Files.deleteIfExists(ciphertextDirFile)) {
// should not happen. Nevertheless this is a valid state, so who no big deal...
LOG.warn("Successfully deleted dir {}, but didn't find corresponding dir file {}", ciphertextDir, ciphertextDirFile);
}
dirIdProvider.delete(ciphertextDirFile);
} catch (NoSuchFileException e) {
// translate ciphertext path to cleartext path
throw new NoSuchFileException(cleartextPath.toString());
} catch (DirectoryNotEmptyException e) {
// translate ciphertext path to cleartext path
throw new DirectoryNotEmptyException(cleartextPath.toString());
}
}
}
Aggregations