use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class NativeAzureFileSystem method delete.
/**
* Delete the specified file or folder. The parameter
* skipParentFolderLastModifidedTimeUpdate
* is used in the case of atomic folder rename redo. In that case, there is
* a lease on the parent folder, so (without reworking the code) modifying
* the parent folder update time will fail because of a conflict with the
* lease. Since we are going to delete the folder soon anyway so accurate
* modified time is not necessary, it's easier to just skip
* the modified time update.
*
* @param f file path to be deleted.
* @param recursive specify deleting recursively or not.
* @param skipParentFolderLastModifidedTimeUpdate If true, don't update the folder last
* modified time.
* @return true if and only if the file is deleted
* @throws IOException Thrown when fail to delete file or directory.
*/
public boolean delete(Path f, boolean recursive, boolean skipParentFolderLastModifidedTimeUpdate) throws IOException {
LOG.debug("Deleting file: {}", f.toString());
Path absolutePath = makeAbsolute(f);
performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.EXECUTE.toString(), "delete");
String key = pathToKey(absolutePath);
// Capture the metadata for the path.
//
FileMetadata metaFile = null;
try {
metaFile = store.retrieveMetadata(key);
} catch (IOException e) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(e);
if (innerException instanceof StorageException && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
return false;
}
throw e;
}
if (null == metaFile) {
// The path to be deleted does not exist.
return false;
}
// an empty folder, or a simple file and take the appropriate actions.
if (!metaFile.isDir()) {
// The path specifies a file. We need to check the parent path
// to make sure it's a proper materialized directory before we
// delete the file. Otherwise we may get into a situation where
// the file we were deleting was the last one in an implicit directory
// (e.g. the blob store only contains the blob a/b and there's no
// corresponding directory blob a) and that would implicitly delete
// the directory as well, which is not correct.
Path parentPath = absolutePath.getParent();
if (parentPath.getParent() != null) {
// Not root
String parentKey = pathToKey(parentPath);
FileMetadata parentMetadata = null;
try {
parentMetadata = store.retrieveMetadata(parentKey);
} catch (IOException e) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(e);
if (innerException instanceof StorageException) {
// hence throwing a IOException
if (NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
throw new IOException("File " + f + " has a parent directory " + parentPath + " whose metadata cannot be retrieved. Can't resolve");
}
}
throw e;
}
// hence throwing a IOException
if (parentMetadata == null) {
throw new IOException("File " + f + " has a parent directory " + parentPath + " whose metadata cannot be retrieved. Can't resolve");
}
if (!parentMetadata.isDir()) {
// Invalid state: the parent path is actually a file. Throw.
throw new AzureException("File " + f + " has a parent directory " + parentPath + " which is also a file. Can't resolve.");
}
if (parentMetadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
LOG.debug("Found an implicit parent directory while trying to" + " delete the file {}. Creating the directory blob for" + " it in {}.", f, parentKey);
store.storeEmptyFolder(parentKey, createPermissionStatus(FsPermission.getDefault()));
} else {
if (!skipParentFolderLastModifidedTimeUpdate) {
updateParentFolderLastModifiedTime(key);
}
}
}
try {
if (store.delete(key)) {
instrumentation.fileDeleted();
} else {
return false;
}
} catch (IOException e) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(e);
if (innerException instanceof StorageException && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
return false;
}
throw e;
}
} else {
// The path specifies a folder. Recursively delete all entries under the
// folder.
LOG.debug("Directory Delete encountered: {}", f.toString());
Path parentPath = absolutePath.getParent();
if (parentPath.getParent() != null) {
String parentKey = pathToKey(parentPath);
FileMetadata parentMetadata = null;
try {
parentMetadata = store.retrieveMetadata(parentKey);
} catch (IOException e) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(e);
if (innerException instanceof StorageException) {
// hence throwing a IOException
if (NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
throw new IOException("File " + f + " has a parent directory " + parentPath + " whose metadata cannot be retrieved. Can't resolve");
}
}
throw e;
}
// hence throwing a IOException
if (parentMetadata == null) {
throw new IOException("File " + f + " has a parent directory " + parentPath + " whose metadata cannot be retrieved. Can't resolve");
}
if (parentMetadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
LOG.debug("Found an implicit parent directory while trying to" + " delete the directory {}. Creating the directory blob for" + " it in {}. ", f, parentKey);
store.storeEmptyFolder(parentKey, createPermissionStatus(FsPermission.getDefault()));
}
}
// List all the blobs in the current folder.
String priorLastKey = null;
// Start time for list operation
long start = Time.monotonicNow();
ArrayList<FileMetadata> fileMetadataList = new ArrayList<FileMetadata>();
// List all the files in the folder with AZURE_UNBOUNDED_DEPTH depth.
do {
try {
PartialListing listing = store.listAll(key, AZURE_LIST_ALL, AZURE_UNBOUNDED_DEPTH, priorLastKey);
for (FileMetadata file : listing.getFiles()) {
fileMetadataList.add(file);
}
priorLastKey = listing.getPriorLastKey();
} catch (IOException e) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(e);
if (innerException instanceof StorageException && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
return false;
}
throw e;
}
} while (priorLastKey != null);
long end = Time.monotonicNow();
LOG.debug("Time taken to list {} blobs for delete operation: {} ms", fileMetadataList.size(), (end - start));
final FileMetadata[] contents = fileMetadataList.toArray(new FileMetadata[fileMetadataList.size()]);
if (!recursive && contents.length > 0) {
// specified for a non-empty folder.
throw new IOException("Non-recursive delete of non-empty directory " + f.toString());
}
// Delete all files / folders in current directory stored as list in 'contents'.
AzureFileSystemThreadTask task = new AzureFileSystemThreadTask() {
@Override
public boolean execute(FileMetadata file) throws IOException {
return deleteFile(file.getKey(), file.isDir());
}
};
AzureFileSystemThreadPoolExecutor executor = getThreadPoolExecutor(this.deleteThreadCount, "AzureBlobDeleteThread", "Delete", key, AZURE_DELETE_THREADS);
if (!executor.executeParallel(contents, task)) {
LOG.error("Failed to delete files / subfolders in blob {}", key);
return false;
}
// Delete the current directory
if (store.retrieveMetadata(metaFile.getKey()) != null && !deleteFile(metaFile.getKey(), metaFile.isDir())) {
LOG.error("Failed delete directory {}", f.toString());
return false;
}
// Update parent directory last modified time
Path parent = absolutePath.getParent();
if (parent != null && parent.getParent() != null) {
// not root
if (!skipParentFolderLastModifidedTimeUpdate) {
updateParentFolderLastModifiedTime(key);
}
}
}
// File or directory was successfully deleted.
LOG.debug("Delete Successful for : {}", f.toString());
return true;
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class NativeAzureFileSystem method rename.
@Override
public boolean rename(Path src, Path dst) throws FileNotFoundException, IOException {
FolderRenamePending renamePending = null;
LOG.debug("Moving {} to {}", src, dst);
if (containsColon(dst)) {
throw new IOException("Cannot rename to file " + dst + " through WASB that has colons in the name");
}
Path absolutePath = makeAbsolute(src);
performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.EXECUTE.toString(), "rename");
String srcKey = pathToKey(absolutePath);
if (srcKey.length() == 0) {
// Cannot rename root of file system
return false;
}
// Figure out the final destination
Path absoluteDst = makeAbsolute(dst);
String dstKey = pathToKey(absoluteDst);
FileMetadata dstMetadata = null;
try {
dstMetadata = store.retrieveMetadata(dstKey);
} catch (IOException ex) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
// rename gracefully. Hence the StorageException is swallowed here.
if (innerException instanceof StorageException) {
if (NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
LOG.debug("BlobNotFound exception encountered for Destination key : {}. " + "Swallowin the exception to handle race condition gracefully", dstKey);
}
} else {
throw ex;
}
}
if (dstMetadata != null && dstMetadata.isDir()) {
// It's an existing directory.
dstKey = pathToKey(makeAbsolute(new Path(dst, src.getName())));
LOG.debug("Destination {} " + " is a directory, adjusted the destination to be {}", dst, dstKey);
} else if (dstMetadata != null) {
// Attempting to overwrite a file using rename()
LOG.debug("Destination {}" + " is an already existing file, failing the rename.", dst);
return false;
} else {
// Check that the parent directory exists.
FileMetadata parentOfDestMetadata = null;
try {
parentOfDestMetadata = store.retrieveMetadata(pathToKey(absoluteDst.getParent()));
} catch (IOException ex) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
if (innerException instanceof StorageException && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
LOG.debug("Parent of destination {} doesn't exists. Failing rename", dst);
return false;
}
throw ex;
}
if (parentOfDestMetadata == null) {
LOG.debug("Parent of the destination {}" + " doesn't exist, failing the rename.", dst);
return false;
} else if (!parentOfDestMetadata.isDir()) {
LOG.debug("Parent of the destination {}" + " is a file, failing the rename.", dst);
return false;
}
}
FileMetadata srcMetadata = null;
try {
srcMetadata = store.retrieveMetadata(srcKey);
} catch (IOException ex) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
if (innerException instanceof StorageException && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
LOG.debug("Source {} doesn't exists. Failing rename", src);
return false;
}
throw ex;
}
if (srcMetadata == null) {
// Source doesn't exist
LOG.debug("Source {} doesn't exist, failing the rename.", src);
return false;
} else if (!srcMetadata.isDir()) {
LOG.debug("Source {} found as a file, renaming.", src);
try {
store.rename(srcKey, dstKey);
} catch (IOException ex) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
if (innerException instanceof StorageException && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
LOG.debug("BlobNotFoundException encountered. Failing rename", src);
return false;
}
throw ex;
}
} else {
// Prepare for, execute and clean up after of all files in folder, and
// the root file, and update the last modified time of the source and
// target parent folders. The operation can be redone if it fails part
// way through, by applying the "Rename Pending" file.
// The following code (internally) only does atomic rename preparation
// and lease management for page blob folders, limiting the scope of the
// operation to HBase log file folders, where atomic rename is required.
// In the future, we could generalize it easily to all folders.
renamePending = prepareAtomicFolderRename(srcKey, dstKey);
renamePending.execute();
LOG.debug("Renamed {} to {} successfully.", src, dst);
renamePending.cleanup();
return true;
}
// Update the last-modified time of the parent folders of both source
// and destination.
updateParentFolderLastModifiedTime(srcKey);
updateParentFolderLastModifiedTime(dstKey);
LOG.debug("Renamed {} to {} successfully.", src, dst);
return true;
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class AzureNativeFileSystemStore method rename.
@Override
public void rename(String srcKey, String dstKey, boolean acquireLease, SelfRenewingLease existingLease) throws IOException {
LOG.debug("Moving {} to {}", srcKey, dstKey);
if (acquireLease && existingLease != null) {
throw new IOException("Cannot acquire new lease if one already exists.");
}
CloudBlobWrapper srcBlob = null;
CloudBlobWrapper dstBlob = null;
SelfRenewingLease lease = null;
try {
// storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format("Storage session expected for URI '%s' but does not exist.", sessionUri);
throw new AssertionError(errMsg);
}
checkContainer(ContainerAccessType.ReadThenWrite);
// Get the source blob and assert its existence. If the source key
// needs to be normalized then normalize it.
//
srcBlob = getBlobReference(srcKey);
if (!srcBlob.exists(getInstrumentedContext())) {
throw new AzureException("Source blob " + srcKey + " does not exist.");
}
/**
* Conditionally get a lease on the source blob to prevent other writers
* from changing it. This is used for correctness in HBase when log files
* are renamed. It generally should do no harm other than take a little
* more time for other rename scenarios. When the HBase master renames a
* log file folder, the lease locks out other writers. This
* prevents a region server that the master thinks is dead, but is still
* alive, from committing additional updates. This is different than
* when HBase runs on HDFS, where the region server recovers the lease
* on a log file, to gain exclusive access to it, before it splits it.
*/
if (acquireLease) {
lease = srcBlob.acquireLease();
} else if (existingLease != null) {
lease = existingLease;
}
// Get the destination blob. The destination key always needs to be
// normalized.
//
dstBlob = getBlobReference(dstKey);
// throttled.
try {
dstBlob.startCopyFromBlob(srcBlob, null, getInstrumentedContext());
} catch (StorageException se) {
if (se.getHttpStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) {
int copyBlobMinBackoff = sessionConfiguration.getInt(KEY_COPYBLOB_MIN_BACKOFF_INTERVAL, DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL);
int copyBlobMaxBackoff = sessionConfiguration.getInt(KEY_COPYBLOB_MAX_BACKOFF_INTERVAL, DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL);
int copyBlobDeltaBackoff = sessionConfiguration.getInt(KEY_COPYBLOB_BACKOFF_INTERVAL, DEFAULT_COPYBLOB_BACKOFF_INTERVAL);
int copyBlobMaxRetries = sessionConfiguration.getInt(KEY_COPYBLOB_MAX_IO_RETRIES, DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS);
BlobRequestOptions options = new BlobRequestOptions();
options.setRetryPolicyFactory(new RetryExponentialRetry(copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff, copyBlobMaxRetries));
dstBlob.startCopyFromBlob(srcBlob, options, getInstrumentedContext());
} else {
throw se;
}
}
waitForCopyToComplete(dstBlob, getInstrumentedContext());
safeDelete(srcBlob, lease);
} catch (StorageException e) {
if (e.getHttpStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) {
LOG.warn("Rename: CopyBlob: StorageException: ServerBusy: Retry complete, will attempt client side copy for page blob");
InputStream ipStream = null;
OutputStream opStream = null;
try {
if (srcBlob.getProperties().getBlobType() == BlobType.PAGE_BLOB) {
ipStream = openInputStream(srcBlob);
opStream = openOutputStream(dstBlob);
byte[] buffer = new byte[PageBlobFormatHelpers.PAGE_SIZE];
int len;
while ((len = ipStream.read(buffer)) != -1) {
opStream.write(buffer, 0, len);
}
opStream.flush();
opStream.close();
ipStream.close();
} else {
throw new AzureException(e);
}
safeDelete(srcBlob, lease);
} catch (StorageException se) {
LOG.warn("Rename: CopyBlob: StorageException: Failed");
throw new AzureException(se);
} finally {
IOUtils.closeStream(ipStream);
IOUtils.closeStream(opStream);
}
} else {
throw new AzureException(e);
}
} catch (URISyntaxException e) {
// Re-throw exception as an Azure storage exception.
throw new AzureException(e);
}
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class BlockBlobAppendStream method updateBlobAppendMetadata.
/**
* Helper method to updated the Blob metadata during Append lease operations.
* Blob metadata is updated to holdLease value only if the current lease
* status is equal to testCondition and the last update on the blob metadata
* is less that 30 secs old.
* @param holdLease
* @param testCondition
* @return true if the updated lease operation was successful or false otherwise
* @throws StorageException
*/
private boolean updateBlobAppendMetadata(boolean holdLease, boolean testCondition) throws StorageException {
SelfRenewingLease lease = null;
StorageException lastStorageException = null;
int leaseRenewalRetryCount = 0;
/*
* Updating the Blob metadata honours following algorithm based on
* 1) If the append lease metadata is present
* 2) Last updated time of the append lease
* 3) Previous value of the Append lease metadata.
*
* The algorithm:
* 1) If append lease metadata is not part of the Blob. In this case
* this is the first client to Append so we update the metadata.
* 2) If append lease metadata is present and timeout has occurred.
* In this case irrespective of what the value of the append lease is we update the metadata.
* 3) If append lease metadata is present and is equal to testCondition value (passed as parameter)
* and timeout has not occurred, we update the metadata.
* 4) If append lease metadata is present and is not equal to testCondition value (passed as parameter)
* and timeout has not occurred, we do not update metadata and return false.
*
*/
while (leaseRenewalRetryCount < MAX_LEASE_RENEWAL_RETRY_COUNT) {
lastStorageException = null;
synchronized (this) {
try {
final Calendar currentCalendar = Calendar.getInstance(Locale.US);
currentCalendar.setTimeZone(TimeZone.getTimeZone(UTC_STR));
long currentTime = currentCalendar.getTime().getTime();
// Acquire lease on the blob.
lease = new SelfRenewingLease(blob);
blob.downloadAttributes(opContext);
HashMap<String, String> metadata = blob.getMetadata();
if (metadata.containsKey(APPEND_LEASE) && currentTime - Long.parseLong(metadata.get(APPEND_LEASE_LAST_MODIFIED)) <= BlockBlobAppendStream.APPEND_LEASE_TIMEOUT && !metadata.get(APPEND_LEASE).equals(Boolean.toString(testCondition))) {
return false;
}
metadata.put(APPEND_LEASE, Boolean.toString(holdLease));
metadata.put(APPEND_LEASE_LAST_MODIFIED, Long.toString(currentTime));
blob.setMetadata(metadata);
AccessCondition accessCondition = new AccessCondition();
accessCondition.setLeaseID(lease.getLeaseID());
blob.uploadMetadata(accessCondition, null, opContext);
return true;
} catch (StorageException ex) {
lastStorageException = ex;
LOG.debug("Lease renewal for Blob : {} encountered Storage Exception : {} " + "Error Code : {}", key, ex, ex.getErrorCode());
leaseRenewalRetryCount++;
} finally {
if (lease != null) {
try {
lease.free();
} catch (StorageException ex) {
LOG.debug("Encountered Storage exception while releasing lease for Blob {} " + "during Append metadata operation. Storage Exception {} " + "Error Code : {} ", key, ex, ex.getErrorCode());
} finally {
lease = null;
}
}
}
}
if (leaseRenewalRetryCount == MAX_LEASE_RENEWAL_RETRY_COUNT) {
throw lastStorageException;
} else {
try {
Thread.sleep(LEASE_RENEWAL_RETRY_SLEEP_PERIOD);
} catch (InterruptedException ex) {
LOG.debug("Blob append metadata updated method interrupted");
Thread.currentThread().interrupt();
}
}
}
// would returning from the while loop.
return false;
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class BlockBlobAppendStream method setBlocksCountAndBlockIdPrefix.
/**
* Helper method used to generate the blockIDs. The algorithm used is similar to the Azure
* storage SDK.
*/
private void setBlocksCountAndBlockIdPrefix() throws IOException {
try {
if (nextBlockCount == UNSET_BLOCKS_COUNT && blockIdPrefix == null) {
List<BlockEntry> blockEntries = blob.downloadBlockList(BlockListingFilter.COMMITTED, new BlobRequestOptions(), opContext);
String blockZeroBlockId = (blockEntries.size() > 0) ? blockEntries.get(0).getId() : "";
String prefix = UUID.randomUUID().toString() + "-";
String sampleNewerVersionBlockId = generateNewerVersionBlockId(prefix, 0);
if (blockEntries.size() > 0 && blockZeroBlockId.length() < sampleNewerVersionBlockId.length()) {
// If blob has already been created with 2.2.0, append subsequent blocks with older version (2.2.0) blockId
// compute nextBlockCount, the way it was done before; and don't use blockIdPrefix
this.blockIdPrefix = "";
nextBlockCount = (long) (sequenceGenerator.nextInt(Integer.MAX_VALUE)) + sequenceGenerator.nextInt(Integer.MAX_VALUE - MAX_BLOCK_COUNT);
nextBlockCount += blockEntries.size();
} else {
// If there are no existing blocks, create the first block with newer version (4.2.0) blockId
// If blob has already been created with 4.2.0, append subsequent blocks with newer version (4.2.0) blockId
this.blockIdPrefix = prefix;
nextBlockCount = blockEntries.size();
}
}
} catch (StorageException ex) {
LOG.debug("Encountered storage exception during setting next Block Count and BlockId prefix." + " StorageException : {} ErrorCode : {}", ex, ex.getErrorCode());
throw new IOException(ex);
}
}
Aggregations