use of com.microsoft.azure.storage.StorageException in project jackrabbit-oak by apache.
the class AzureBlobStoreBackend method addMetadataRecordImpl.
private void addMetadataRecordImpl(final InputStream input, String name, long recordLength) throws DataStoreException {
try {
CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME);
CloudBlockBlob blob = metaDir.getBlockBlobReference(name);
blob.upload(input, recordLength);
} catch (StorageException e) {
LOG.info("Error adding metadata record. metadataName={} length={}", name, recordLength, e);
throw new DataStoreException(e);
} catch (URISyntaxException | IOException e) {
throw new DataStoreException(e);
}
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class BlockBlobAppendStream method commitAppendBlocks.
/**
* Method to commit all the uncommited blocks to azure storage.
* If the commit fails then blocks are automatically cleaned up
* by Azure storage.
* @throws IOException
*/
private synchronized void commitAppendBlocks() throws IOException {
SelfRenewingLease lease = null;
try {
if (uncommittedBlockEntries.size() > 0) {
//Acquiring lease on the blob.
lease = new SelfRenewingLease(blob);
// Downloading existing blocks
List<BlockEntry> blockEntries = blob.downloadBlockList(BlockListingFilter.COMMITTED, new BlobRequestOptions(), opContext);
// Adding uncommitted blocks.
blockEntries.addAll(uncommittedBlockEntries);
AccessCondition accessCondition = new AccessCondition();
accessCondition.setLeaseID(lease.getLeaseID());
blob.commitBlockList(blockEntries, accessCondition, new BlobRequestOptions(), opContext);
uncommittedBlockEntries.clear();
}
} catch (StorageException ex) {
LOG.error("Storage exception encountered during block commit phase of append for blob" + " : {} Storage Exception : {} Error Code: {}", key, ex, ex.getErrorCode());
throw new IOException("Encountered Exception while committing append blocks", ex);
} finally {
if (lease != null) {
try {
lease.free();
} catch (StorageException ex) {
LOG.debug("Exception encountered while releasing lease for " + "blob : {} StorageException : {} ErrorCode : {}", key, ex, ex.getErrorCode());
// Swallowing exception here as the lease is cleaned up by the SelfRenewingLease object.
}
}
}
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class LocalSASKeyGeneratorImpl method getRelativeBlobSASUri.
/**
* Implementation for generation of Relative Path Blob SAS Uri.
*/
@Override
public URI getRelativeBlobSASUri(String accountName, String container, String relativePath) throws SASKeyGenerationException {
CloudBlobContainer sc = null;
CloudBlobClient client = null;
try {
CloudStorageAccount account = getSASKeyBasedStorageAccountInstance(accountName);
client = account.createCloudBlobClient();
sc = client.getContainerReference(container);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException("Encountered URISyntaxException " + "while getting container references for container " + container + " inside storage account : " + accountName, uriSyntaxEx);
} catch (StorageException stoEx) {
throw new SASKeyGenerationException("Encountered StorageException while " + "getting container references for container " + container + " inside storage account : " + accountName, stoEx);
}
CloudBlockBlob blob = null;
try {
blob = sc.getBlockBlobReference(relativePath);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException("Encountered URISyntaxException while " + "getting Block Blob references for container " + container + " inside storage account : " + accountName, uriSyntaxEx);
} catch (StorageException stoEx) {
throw new SASKeyGenerationException("Encountered StorageException while " + "getting Block Blob references for container " + container + " inside storage account : " + accountName, stoEx);
}
try {
return client.getCredentials().transformUri(blob.getUri());
} catch (StorageException stoEx) {
throw new SASKeyGenerationException("Encountered StorageException while " + "generating SAS key for Blob: " + relativePath + " inside " + "container : " + container + " in Storage Account : " + accountName, stoEx);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException("Encountered URISyntaxException " + "while generating SAS key for Blob: " + relativePath + " inside " + "container: " + container + " in Storage Account : " + accountName, uriSyntaxEx);
}
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class PageBlobInputStream method ensureDataInBuffer.
/**
* Check our buffer and download more from the server if needed.
* If data is not available in the buffer, method downloads maximum
* page blob download size (4MB) or if there is less then 4MB left,
* all remaining pages.
* If we are on the last page, method will return true even if
* we reached the end of stream.
* @return true if there's more data in the buffer, false if buffer is empty
* and we reached the end of the blob.
* @throws IOException
*/
private synchronized boolean ensureDataInBuffer() throws IOException {
if (dataAvailableInBuffer()) {
// We still have some data in our buffer.
return true;
}
currentBuffer = null;
if (numberOfPagesRemaining == 0) {
// No more data to read.
return false;
}
final long pagesToRead = Math.min(MAX_PAGES_PER_DOWNLOAD, numberOfPagesRemaining);
final int bufferSize = (int) (pagesToRead * PAGE_SIZE);
// Download page to current buffer.
try {
// Create a byte array output stream to capture the results of the
// download.
ByteArrayOutputStream baos = new ByteArrayOutputStream(bufferSize);
blob.downloadRange(currentOffsetInBlob, bufferSize, baos, withMD5Checking(), opContext);
currentBuffer = baos.toByteArray();
} catch (StorageException e) {
throw new IOException(e);
}
numberOfPagesRemaining -= pagesToRead;
currentOffsetInBlob += bufferSize;
currentOffsetInBuffer = PAGE_HEADER_SIZE;
// Since we just downloaded a new buffer, validate its consistency.
validateCurrentBufferConsistency();
return true;
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class PageBlobOutputStream method conditionalExtendFile.
/**
* Extend the page blob file if we are close to the end.
*/
private void conditionalExtendFile() {
// maximum allowed size of an Azure page blob (1 terabyte)
final long MAX_PAGE_BLOB_SIZE = 1024L * 1024L * 1024L * 1024L;
// If blob is already at the maximum size, then don't try to extend it.
if (currentBlobSize == MAX_PAGE_BLOB_SIZE) {
return;
}
// If we are within the maximum write size of the end of the file,
if (currentBlobSize - currentBlobOffset <= MAX_RAW_BYTES_PER_REQUEST) {
// Extend the file. Retry up to 3 times with back-off.
CloudPageBlob cloudPageBlob = (CloudPageBlob) blob.getBlob();
long newSize = currentBlobSize + configuredPageBlobExtensionSize;
// Make sure we don't exceed maximum blob size.
if (newSize > MAX_PAGE_BLOB_SIZE) {
newSize = MAX_PAGE_BLOB_SIZE;
}
final int MAX_RETRIES = 3;
int retries = 1;
boolean resizeDone = false;
while (!resizeDone && retries <= MAX_RETRIES) {
try {
cloudPageBlob.resize(newSize);
resizeDone = true;
currentBlobSize = newSize;
} catch (StorageException e) {
LOG.warn("Failed to extend size of " + cloudPageBlob.getUri());
try {
// sleep 2, 8, 18 seconds for up to 3 retries
Thread.sleep(2000 * retries * retries);
} catch (InterruptedException e1) {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
} finally {
retries++;
}
}
}
}
Aggregations