use of com.microsoft.azure.storage.RetryExponentialRetry in project hadoop by apache.
the class AzureNativeFileSystemStore method rename.
@Override
public void rename(String srcKey, String dstKey, boolean acquireLease, SelfRenewingLease existingLease) throws IOException {
LOG.debug("Moving {} to {}", srcKey, dstKey);
if (acquireLease && existingLease != null) {
throw new IOException("Cannot acquire new lease if one already exists.");
}
CloudBlobWrapper srcBlob = null;
CloudBlobWrapper dstBlob = null;
SelfRenewingLease lease = null;
try {
// storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format("Storage session expected for URI '%s' but does not exist.", sessionUri);
throw new AssertionError(errMsg);
}
checkContainer(ContainerAccessType.ReadThenWrite);
// Get the source blob and assert its existence. If the source key
// needs to be normalized then normalize it.
//
srcBlob = getBlobReference(srcKey);
if (!srcBlob.exists(getInstrumentedContext())) {
throw new AzureException("Source blob " + srcKey + " does not exist.");
}
/**
* Conditionally get a lease on the source blob to prevent other writers
* from changing it. This is used for correctness in HBase when log files
* are renamed. It generally should do no harm other than take a little
* more time for other rename scenarios. When the HBase master renames a
* log file folder, the lease locks out other writers. This
* prevents a region server that the master thinks is dead, but is still
* alive, from committing additional updates. This is different than
* when HBase runs on HDFS, where the region server recovers the lease
* on a log file, to gain exclusive access to it, before it splits it.
*/
if (acquireLease) {
lease = srcBlob.acquireLease();
} else if (existingLease != null) {
lease = existingLease;
}
// Get the destination blob. The destination key always needs to be
// normalized.
//
dstBlob = getBlobReference(dstKey);
// throttled.
try {
dstBlob.startCopyFromBlob(srcBlob, null, getInstrumentedContext());
} catch (StorageException se) {
if (se.getHttpStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) {
int copyBlobMinBackoff = sessionConfiguration.getInt(KEY_COPYBLOB_MIN_BACKOFF_INTERVAL, DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL);
int copyBlobMaxBackoff = sessionConfiguration.getInt(KEY_COPYBLOB_MAX_BACKOFF_INTERVAL, DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL);
int copyBlobDeltaBackoff = sessionConfiguration.getInt(KEY_COPYBLOB_BACKOFF_INTERVAL, DEFAULT_COPYBLOB_BACKOFF_INTERVAL);
int copyBlobMaxRetries = sessionConfiguration.getInt(KEY_COPYBLOB_MAX_IO_RETRIES, DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS);
BlobRequestOptions options = new BlobRequestOptions();
options.setRetryPolicyFactory(new RetryExponentialRetry(copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff, copyBlobMaxRetries));
dstBlob.startCopyFromBlob(srcBlob, options, getInstrumentedContext());
} else {
throw se;
}
}
waitForCopyToComplete(dstBlob, getInstrumentedContext());
safeDelete(srcBlob, lease);
} catch (StorageException e) {
if (e.getHttpStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) {
LOG.warn("Rename: CopyBlob: StorageException: ServerBusy: Retry complete, will attempt client side copy for page blob");
InputStream ipStream = null;
OutputStream opStream = null;
try {
if (srcBlob.getProperties().getBlobType() == BlobType.PAGE_BLOB) {
ipStream = openInputStream(srcBlob);
opStream = openOutputStream(dstBlob);
byte[] buffer = new byte[PageBlobFormatHelpers.PAGE_SIZE];
int len;
while ((len = ipStream.read(buffer)) != -1) {
opStream.write(buffer, 0, len);
}
opStream.flush();
opStream.close();
ipStream.close();
} else {
throw new AzureException(e);
}
safeDelete(srcBlob, lease);
} catch (StorageException se) {
LOG.warn("Rename: CopyBlob: StorageException: Failed");
throw new AzureException(se);
} finally {
IOUtils.closeStream(ipStream);
IOUtils.closeStream(opStream);
}
} else {
throw new AzureException(e);
}
} catch (URISyntaxException e) {
// Re-throw exception as an Azure storage exception.
throw new AzureException(e);
}
}
use of com.microsoft.azure.storage.RetryExponentialRetry in project hadoop by apache.
the class AzureNativeFileSystemStore method getDownloadOptions.
private BlobRequestOptions getDownloadOptions() {
BlobRequestOptions options = new BlobRequestOptions();
options.setRetryPolicyFactory(new RetryExponentialRetry(minBackoff, deltaBackoff, maxBackoff, maxRetries));
options.setUseTransactionalContentMD5(getUseTransactionalContentMD5());
return options;
}
use of com.microsoft.azure.storage.RetryExponentialRetry in project hadoop by apache.
the class AzureNativeFileSystemStore method configureAzureStorageSession.
/**
* Set the configuration parameters for this client storage session with
* Azure.
*
* @throws AzureException
*/
private void configureAzureStorageSession() throws AzureException {
// Assertion: Target session URI already should have been captured.
if (sessionUri == null) {
throw new AssertionError("Expected a non-null session URI when configuring storage session");
}
// Azure.
if (storageInteractionLayer == null) {
throw new AssertionError(String.format("Cannot configure storage session for URI '%s' " + "if storage session has not been established.", sessionUri.toString()));
}
// Determine whether or not reads are allowed concurrent with OOB writes.
tolerateOobAppends = sessionConfiguration.getBoolean(KEY_READ_TOLERATE_CONCURRENT_APPEND, DEFAULT_READ_TOLERATE_CONCURRENT_APPEND);
// Retrieve configuration for the minimum stream read and write block size.
//
this.downloadBlockSizeBytes = sessionConfiguration.getInt(KEY_STREAM_MIN_READ_SIZE, DEFAULT_DOWNLOAD_BLOCK_SIZE);
this.uploadBlockSizeBytes = sessionConfiguration.getInt(KEY_WRITE_BLOCK_SIZE, DEFAULT_UPLOAD_BLOCK_SIZE);
// The job may want to specify a timeout to use when engaging the
// storage service. The default is currently 90 seconds. It may
// be necessary to increase this value for long latencies in larger
// jobs. If the timeout specified is greater than zero seconds use
// it, otherwise use the default service client timeout.
int storageConnectionTimeout = sessionConfiguration.getInt(KEY_STORAGE_CONNECTION_TIMEOUT, 0);
if (0 < storageConnectionTimeout) {
storageInteractionLayer.setTimeoutInMs(storageConnectionTimeout * 1000);
}
// Set the concurrency values equal to the that specified in the
// configuration file. If it does not exist, set it to the default
// value calculated as double the number of CPU cores on the client
// machine. The concurrency value is minimum of double the cores and
// the read/write property.
int cpuCores = 2 * Runtime.getRuntime().availableProcessors();
concurrentWrites = sessionConfiguration.getInt(KEY_CONCURRENT_CONNECTION_VALUE_OUT, Math.min(cpuCores, DEFAULT_CONCURRENT_WRITES));
// Set up the exponential retry policy.
//
minBackoff = sessionConfiguration.getInt(KEY_MIN_BACKOFF_INTERVAL, DEFAULT_MIN_BACKOFF_INTERVAL);
maxBackoff = sessionConfiguration.getInt(KEY_MAX_BACKOFF_INTERVAL, DEFAULT_MAX_BACKOFF_INTERVAL);
deltaBackoff = sessionConfiguration.getInt(KEY_BACKOFF_INTERVAL, DEFAULT_BACKOFF_INTERVAL);
maxRetries = sessionConfiguration.getInt(KEY_MAX_IO_RETRIES, DEFAULT_MAX_RETRY_ATTEMPTS);
storageInteractionLayer.setRetryPolicyFactory(new RetryExponentialRetry(minBackoff, deltaBackoff, maxBackoff, maxRetries));
// read the self-throttling config.
selfThrottlingEnabled = sessionConfiguration.getBoolean(KEY_SELF_THROTTLE_ENABLE, DEFAULT_SELF_THROTTLE_ENABLE);
selfThrottlingReadFactor = sessionConfiguration.getFloat(KEY_SELF_THROTTLE_READ_FACTOR, DEFAULT_SELF_THROTTLE_READ_FACTOR);
selfThrottlingWriteFactor = sessionConfiguration.getFloat(KEY_SELF_THROTTLE_WRITE_FACTOR, DEFAULT_SELF_THROTTLE_WRITE_FACTOR);
OperationContext.setLoggingEnabledByDefault(sessionConfiguration.getBoolean(KEY_ENABLE_STORAGE_CLIENT_LOGGING, false));
LOG.debug("AzureNativeFileSystemStore init. Settings={},{},{},{{},{},{},{}},{{},{},{}}", concurrentWrites, tolerateOobAppends, ((storageConnectionTimeout > 0) ? storageConnectionTimeout : STORAGE_CONNECTION_TIMEOUT_DEFAULT), minBackoff, deltaBackoff, maxBackoff, maxRetries, selfThrottlingEnabled, selfThrottlingReadFactor, selfThrottlingWriteFactor);
}
use of com.microsoft.azure.storage.RetryExponentialRetry in project hadoop by apache.
the class AzureNativeFileSystemStore method getUploadOptions.
private BlobRequestOptions getUploadOptions() {
BlobRequestOptions options = new BlobRequestOptions();
options.setStoreBlobContentMD5(sessionConfiguration.getBoolean(KEY_STORE_BLOB_MD5, false));
options.setUseTransactionalContentMD5(getUseTransactionalContentMD5());
options.setConcurrentRequestCount(concurrentWrites);
options.setRetryPolicyFactory(new RetryExponentialRetry(minBackoff, deltaBackoff, maxBackoff, maxRetries));
return options;
}
Aggregations