use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class TestBlobDataValidation method testStoreBlobMd5.
private void testStoreBlobMd5(boolean expectMd5Stored) throws Exception {
assumeNotNull(testAccount);
// Write a test file.
String testFileKey = "testFile";
Path testFilePath = new Path("/" + testFileKey);
OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
outStream.write(new byte[] { 5, 15 });
outStream.close();
// Check that we stored/didn't store the MD5 field as configured.
CloudBlockBlob blob = testAccount.getBlobReference(testFileKey);
blob.downloadAttributes();
String obtainedMd5 = blob.getProperties().getContentMD5();
if (expectMd5Stored) {
assertNotNull(obtainedMd5);
} else {
assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
}
// Mess with the content so it doesn't match the MD5.
String newBlockId = Base64.encode(new byte[] { 55, 44, 33, 22 });
blob.uploadBlock(newBlockId, new ByteArrayInputStream(new byte[] { 6, 45 }), 2);
blob.commitBlockList(Arrays.asList(new BlockEntry[] { new BlockEntry(newBlockId, BlockSearchMode.UNCOMMITTED) }));
// Now read back the content. If we stored the MD5 for the blob content
// we should get a data corruption error.
InputStream inStream = testAccount.getFileSystem().open(testFilePath);
try {
byte[] inBuf = new byte[100];
while (inStream.read(inBuf) > 0) {
//nothing;
}
inStream.close();
if (expectMd5Stored) {
fail("Should've thrown because of data corruption.");
}
} catch (IOException ex) {
if (!expectMd5Stored) {
throw ex;
}
StorageException cause = (StorageException) ex.getCause();
assertNotNull(cause);
assertTrue("Unexpected cause: " + cause, cause.getErrorCode().equals(StorageErrorCodeStrings.INVALID_MD5));
}
}
use of com.microsoft.azure.storage.StorageException in project hadoop by apache.
the class TestNativeAzureFileSystemLive method testDeleteThrowsExceptionWithLeaseExistsErrorMessage.
/**
* Tests fs.delete() function to delete a blob when another blob is holding a
* lease on it. Delete if called without a lease should fail if another process
* is holding a lease and throw appropriate exception
* This is a scenario that would happen in HMaster startup when it tries to
* clean up the temp dirs while the HMaster process which was killed earlier
* held lease on the blob when doing some DDL operation
*/
@Test
public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage() throws Exception {
LOG.info("Starting test");
final String FILE_KEY = "fileWithLease";
// Create the file
Path path = new Path(FILE_KEY);
fs.create(path);
assertTrue(fs.exists(path));
NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs;
final String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
final AzureNativeFileSystemStore store = nfs.getStore();
// Acquire the lease on the file in a background thread
final CountDownLatch leaseAttemptComplete = new CountDownLatch(1);
final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1);
Thread t = new Thread() {
@Override
public void run() {
// Acquire the lease and then signal the main test thread.
SelfRenewingLease lease = null;
try {
lease = store.acquireLease(fullKey);
LOG.info("Lease acquired: " + lease.getLeaseID());
} catch (AzureException e) {
LOG.warn("Lease acqusition thread unable to acquire lease", e);
} finally {
leaseAttemptComplete.countDown();
}
// Wait for the main test thread to signal it will attempt the delete.
try {
beginningDeleteAttempt.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// the test covers the case of delete retrying to acquire the lease.
try {
Thread.sleep(SelfRenewingLease.LEASE_ACQUIRE_RETRY_INTERVAL * 3);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
try {
if (lease != null) {
LOG.info("Freeing lease");
lease.free();
}
} catch (StorageException se) {
LOG.warn("Unable to free lease.", se);
}
}
};
// Start the background thread and wait for it to signal the lease is held.
t.start();
try {
leaseAttemptComplete.await();
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
// Try to delete the same file
beginningDeleteAttempt.countDown();
store.delete(fullKey);
// At this point file SHOULD BE DELETED
assertFalse(fs.exists(path));
}
use of com.microsoft.azure.storage.StorageException in project druid by druid-io.
the class AzureDataSegmentKiller method kill.
@Override
public void kill(DataSegment segment) throws SegmentLoadingException {
log.info("Killing segment [%s]", segment);
Map<String, Object> loadSpec = segment.getLoadSpec();
final String containerName = MapUtils.getString(loadSpec, "containerName");
final String blobPath = MapUtils.getString(loadSpec, "blobPath");
final String dirPath = Paths.get(blobPath).getParent().toString();
try {
azureStorage.emptyCloudBlobDirectory(containerName, dirPath);
} catch (StorageException e) {
throw new SegmentLoadingException(e, "Couldn't kill segment[%s]: [%s]", segment.getIdentifier(), e.getExtendedErrorInformation() == null ? null : e.getExtendedErrorInformation().getErrorMessage());
} catch (URISyntaxException e) {
throw new SegmentLoadingException(e, "Couldn't kill segment[%s]: [%s]", segment.getIdentifier(), e.getReason());
}
}
use of com.microsoft.azure.storage.StorageException in project druid by druid-io.
the class AzureTaskLogs method streamTaskLog.
@Override
public Optional<ByteSource> streamTaskLog(final String taskid, final long offset) throws IOException {
final String container = config.getContainer();
final String taskKey = getTaskLogKey(taskid);
try {
if (!azureStorage.getBlobExists(container, taskKey)) {
return Optional.absent();
}
return Optional.<ByteSource>of(new ByteSource() {
@Override
public InputStream openStream() throws IOException {
try {
final long start;
final long length = azureStorage.getBlobLength(container, taskKey);
if (offset > 0 && offset < length) {
start = offset;
} else if (offset < 0 && (-1 * offset) < length) {
start = length + offset;
} else {
start = 0;
}
InputStream stream = azureStorage.getBlobInputStream(container, taskKey);
stream.skip(start);
return stream;
} catch (Exception e) {
throw new IOException(e);
}
}
});
} catch (StorageException | URISyntaxException e) {
throw new IOException(String.format("Failed to stream logs from: %s", taskKey), e);
}
}
use of com.microsoft.azure.storage.StorageException in project druid by druid-io.
the class AzureByteSourceTest method openStreamWithRecoverableErrorTest.
@Test(expected = IOException.class)
public void openStreamWithRecoverableErrorTest() throws URISyntaxException, StorageException, IOException {
final String containerName = "container";
final String blobPath = "/path/to/file";
AzureStorage azureStorage = createMock(AzureStorage.class);
expect(azureStorage.getBlobInputStream(containerName, blobPath)).andThrow(new StorageException("", "", 500, null, null));
replayAll();
AzureByteSource byteSource = new AzureByteSource(azureStorage, containerName, blobPath);
byteSource.openStream();
verifyAll();
}
Aggregations