use of org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper in project hadoop by apache.
the class AzureNativeFileSystemStore method retrieve.
@Override
public DataInputStream retrieve(String key) throws AzureException, IOException {
try {
// Azure storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format("Storage session expected for URI '%s' but does not exist.", sessionUri);
throw new AssertionError(errMsg);
}
checkContainer(ContainerAccessType.PureRead);
// Get blob reference and open the input buffer stream.
CloudBlobWrapper blob = getBlobReference(key);
BufferedInputStream inBufStream = new BufferedInputStream(openInputStream(blob));
// Return a data input stream.
DataInputStream inDataStream = new DataInputStream(inBufStream);
return inDataStream;
} catch (Exception e) {
// Re-throw as an Azure storage exception.
throw new AzureException(e);
}
}
use of org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper in project hadoop by apache.
the class AzureNativeFileSystemStore method storeEmptyLinkFile.
/**
* Stores an empty blob that's linking to the temporary file where're we're
* uploading the initial data.
*/
@Override
public void storeEmptyLinkFile(String key, String tempBlobKey, PermissionStatus permissionStatus) throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format("Storage session expected for URI '%s' but does not exist.", sessionUri);
throw new AssertionError(errMsg);
}
// been authenticated and all access is anonymous.
if (!isAuthenticatedAccess()) {
// allowed to anonymous accounts.
throw new AzureException("Uploads to to public accounts using anonymous access is prohibited.");
}
try {
checkContainer(ContainerAccessType.PureWrite);
CloudBlobWrapper blob = getBlobReference(key);
storePermissionStatus(blob, permissionStatus);
storeLinkAttribute(blob, tempBlobKey);
openOutputStream(blob).close();
} catch (Exception e) {
// storage exception.
throw new AzureException(e);
}
}
use of org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper in project hadoop by apache.
the class AzureNativeFileSystemStore method getLinkInFileMetadata.
/**
* If the blob with the given key exists and has a link in its metadata to a
* temporary file (see storeEmptyLinkFile), this method returns the key to
* that temporary file. Otherwise, returns null.
*/
@Override
public String getLinkInFileMetadata(String key) throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format("Storage session expected for URI '%s' but does not exist.", sessionUri);
throw new AssertionError(errMsg);
}
try {
checkContainer(ContainerAccessType.PureRead);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
return getLinkAttributeValue(blob);
} catch (Exception e) {
// storage exception.
throw new AzureException(e);
}
}
use of org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper in project hadoop by apache.
the class AzureNativeFileSystemStore method retrieveAppendStream.
@Override
public DataOutputStream retrieveAppendStream(String key, int bufferSize) throws IOException {
try {
if (isPageBlobKey(key)) {
throw new UnsupportedOperationException("Append not supported for Page Blobs");
}
CloudBlobWrapper blob = this.container.getBlockBlobReference(key);
BlockBlobAppendStream appendStream = new BlockBlobAppendStream((CloudBlockBlobWrapper) blob, key, bufferSize, getInstrumentedContext());
appendStream.initialize();
return new DataOutputStream(appendStream);
} catch (Exception ex) {
throw new AzureException(ex);
}
}
use of org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper in project hadoop by apache.
the class AzureNativeFileSystemStore method storefile.
@Override
public DataOutputStream storefile(String key, PermissionStatus permissionStatus) throws AzureException {
try {
// Azure storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format("Storage session expected for URI '%s' but does not exist.", sessionUri);
throw new AzureException(errMsg);
}
// has not been authenticated and all access is anonymous.
if (!isAuthenticatedAccess()) {
// allowed to anonymous accounts.
throw new AzureException(new IOException("Uploads to public accounts using anonymous " + "access is prohibited."));
}
checkContainer(ContainerAccessType.PureWrite);
// $root containers.
if (AZURE_ROOT_CONTAINER.equals(getContainerFromAuthority(sessionUri))) {
// Azure containers are restricted to non-root containers.
final String errMsg = String.format("Writes to '%s' container for URI '%s' are prohibited, " + "only updates on non-root containers permitted.", AZURE_ROOT_CONTAINER, sessionUri.toString());
throw new AzureException(errMsg);
}
// Get the blob reference from the store's container and
// return it.
CloudBlobWrapper blob = getBlobReference(key);
storePermissionStatus(blob, permissionStatus);
// Create the output stream for the Azure blob.
//
OutputStream outputStream = openOutputStream(blob);
DataOutputStream dataOutStream = new SyncableDataOutputStream(outputStream);
return dataOutStream;
} catch (Exception e) {
// Re-throw as an Azure storage exception.
throw new AzureException(e);
}
}
Aggregations