use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class LocalSASKeyGeneratorImpl method getContainerSASUri.
/**
* Implementation to generate SAS Key for a container
*/
@Override
public URI getContainerSASUri(String accountName, String container) throws SASKeyGenerationException {
try {
CloudStorageAccount account = getSASKeyBasedStorageAccountInstance(accountName);
CloudBlobClient client = account.createCloudBlobClient();
return client.getCredentials().transformUri(client.getContainerReference(container).getUri());
} catch (StorageException stoEx) {
throw new SASKeyGenerationException("Encountered StorageException while" + " generating SAS Key for container " + container + " inside " + "storage account " + accountName, stoEx);
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException("Encountered URISyntaxException while" + " generating SAS Key for container " + container + " inside storage" + " account " + accountName, uriSyntaxEx);
}
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class AzureNativeFileSystemStore method connectUsingAnonymousCredentials.
/**
* Connect to Azure storage using anonymous credentials.
*
* @param uri
* - URI to target blob (R/O access to public blob)
*
* @throws StorageException
* raised on errors communicating with Azure storage.
* @throws IOException
* raised on errors performing I/O or setting up the session.
* @throws URISyntaxException
* raised on creating mal-formed URI's.
*/
private void connectUsingAnonymousCredentials(final URI uri) throws StorageException, IOException, URISyntaxException {
// Use an HTTP scheme since the URI specifies a publicly accessible
// container. Explicitly create a storage URI corresponding to the URI
// parameter for use in creating the service client.
String accountName = getAccountFromAuthority(uri);
URI storageUri = new URI(getHTTPScheme() + ":" + PATH_DELIMITER + PATH_DELIMITER + accountName);
// Create the service client with anonymous credentials.
String containerName = getContainerFromAuthority(uri);
storageInteractionLayer.createBlobClient(storageUri);
suppressRetryPolicyInClientIfNeeded();
// Capture the container reference.
container = storageInteractionLayer.getContainerReference(containerName);
rootDirectory = container.getDirectoryReference("");
// Check for container existence, and our ability to access it.
try {
if (!container.exists(getInstrumentedContext())) {
throw new AzureException("Container " + containerName + " in account " + accountName + " not found, and we can't create" + " it using anoynomous credentials, and no credentials found for them" + " in the configuration.");
}
} catch (StorageException ex) {
throw new AzureException("Unable to access container " + containerName + " in account " + accountName + " using anonymous credentials, and no credentials found for them " + " in the configuration.", ex);
}
// Accessing the storage server unauthenticated using
// anonymous credentials.
isAnonymousCredentials = true;
// Configure Azure storage session.
configureAzureStorageSession();
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class AzureNativeFileSystemStore method storeEmptyFolder.
@Override
public void storeEmptyFolder(String key, PermissionStatus permissionStatus) throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format("Storage session expected for URI '%s' but does not exist.", sessionUri);
throw new AssertionError(errMsg);
}
// been authenticated and all access is anonymous.
if (!isAuthenticatedAccess()) {
// allowed to anonymous accounts.
throw new AzureException("Uploads to to public accounts using anonymous access is prohibited.");
}
try {
checkContainer(ContainerAccessType.PureWrite);
CloudBlobWrapper blob = getBlobReference(key);
storePermissionStatus(blob, permissionStatus);
storeFolderAttribute(blob);
openOutputStream(blob).close();
} catch (StorageException e) {
// storage exception.
throw new AzureException(e);
} catch (URISyntaxException e) {
throw new AzureException(e);
} catch (IOException e) {
Throwable t = e.getCause();
if (t != null && t instanceof StorageException) {
StorageException se = (StorageException) t;
// If we got this exception, the blob should have already been created
if (!se.getErrorCode().equals("LeaseIdMissing")) {
throw new AzureException(e);
}
} else {
throw new AzureException(e);
}
}
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class NativeAzureFileSystemBaseTest method testSelfRenewingLease.
@Test
public // timeout, to make sure the lease renews itself.
void testSelfRenewingLease() throws IllegalArgumentException, IOException, InterruptedException, StorageException {
SelfRenewingLease lease;
final String FILE_KEY = "file";
fs.create(new Path(FILE_KEY));
NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs;
String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(FILE_KEY)));
AzureNativeFileSystemStore store = nfs.getStore();
lease = store.acquireLease(fullKey);
assertTrue(lease.getLeaseID() != null);
// The sleep time for the keep-alive thread is 40 seconds, so sleep just
// a little beyond that, to make sure the keep-alive thread wakes up
// and renews the lease.
Thread.sleep(42000);
lease.free();
// Check that the lease is really freed.
CloudBlob blob = lease.getCloudBlob();
// Try to acquire it again, using direct Azure blob access.
// If that succeeds, then the lease was already freed.
String differentLeaseID = null;
try {
differentLeaseID = blob.acquireLease(15, null);
} catch (Exception e) {
e.printStackTrace();
fail("Caught exception trying to directly re-acquire lease from Azure");
} finally {
assertTrue(differentLeaseID != null);
AccessCondition accessCondition = AccessCondition.generateEmptyCondition();
accessCondition.setLeaseID(differentLeaseID);
blob.releaseLease(accessCondition);
}
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class NativeAzureFileSystem method updateParentFolderLastModifiedTime.
/**
* Update the last-modified time of the parent folder of the file
* identified by key.
* @param key
* @throws IOException
*/
private void updateParentFolderLastModifiedTime(String key) throws IOException {
Path parent = makeAbsolute(keyToPath(key)).getParent();
if (parent != null && parent.getParent() != null) {
// not root
String parentKey = pathToKey(parent);
// ensure the parent is a materialized folder
FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
// file is renamed; so we can safely ignore the null pointer case.
if (parentMetadata != null) {
if (parentMetadata.isDir() && parentMetadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
store.storeEmptyFolder(parentKey, createPermissionStatus(FsPermission.getDefault()));
}
if (store.isAtomicRenameKey(parentKey)) {
SelfRenewingLease lease = null;
try {
lease = leaseSourceFolder(parentKey);
store.updateFolderLastModifiedTime(parentKey, lease);
} catch (AzureException e) {
String errorCode = "";
try {
StorageException e2 = (StorageException) e.getCause();
errorCode = e2.getErrorCode();
} catch (Exception e3) {
// do nothing if cast fails
}
if (errorCode.equals("BlobNotFound")) {
throw new FileNotFoundException("Folder does not exist: " + parentKey);
}
LOG.warn("Got unexpected exception trying to get lease on {}. {}", parentKey, e.getMessage());
throw e;
} finally {
try {
if (lease != null) {
lease.free();
}
} catch (Exception e) {
LOG.error("Unable to free lease on {}", parentKey, e);
}
}
} else {
store.updateFolderLastModifiedTime(parentKey, null);
}
}
}
}
Aggregations