Search in sources :

Example 1 with BlockEntry

use of com.microsoft.azure.storage.blob.BlockEntry in project camel by apache.

the class BlobServiceProducer method getBlobBlockList.

private void getBlobBlockList(Exchange exchange) throws Exception {
    CloudBlockBlob client = BlobServiceUtil.createBlockBlobClient(getConfiguration());
    BlobServiceRequestOptions opts = BlobServiceUtil.getRequestOptions(exchange);
    LOG.trace("Getting the blob block list [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
    BlockListingFilter filter = exchange.getIn().getBody(BlockListingFilter.class);
    if (filter == null) {
        filter = BlockListingFilter.COMMITTED;
    }
    List<BlockEntry> blockEntries = client.downloadBlockList(filter, opts.getAccessCond(), opts.getRequestOpts(), opts.getOpContext());
    ExchangeUtil.getMessageForResponse(exchange).setBody(blockEntries);
}
Also used : BlockListingFilter(com.microsoft.azure.storage.blob.BlockListingFilter) BlockEntry(com.microsoft.azure.storage.blob.BlockEntry) CloudBlockBlob(com.microsoft.azure.storage.blob.CloudBlockBlob)

Example 2 with BlockEntry

use of com.microsoft.azure.storage.blob.BlockEntry in project camel by apache.

the class BlobServiceProducer method uploadBlobBlocks.

private void uploadBlobBlocks(Exchange exchange) throws Exception {
    Object object = exchange.getIn().getMandatoryBody();
    List<BlobBlock> blobBlocks = null;
    if (object instanceof List) {
        blobBlocks = (List<BlobBlock>) blobBlocks;
    } else if (object instanceof BlobBlock) {
        blobBlocks = Collections.singletonList((BlobBlock) object);
    }
    if (blobBlocks == null || blobBlocks.isEmpty()) {
        throw new IllegalArgumentException("Illegal storageBlocks payload");
    }
    CloudBlockBlob client = BlobServiceUtil.createBlockBlobClient(getConfiguration());
    configureCloudBlobForWrite(client);
    BlobServiceRequestOptions opts = BlobServiceUtil.getRequestOptions(exchange);
    LOG.trace("Putting a blob [{}] from blocks from exchange [{}]...", getConfiguration().getBlobName(), exchange);
    List<BlockEntry> blockEntries = new LinkedList<BlockEntry>();
    for (BlobBlock blobBlock : blobBlocks) {
        blockEntries.add(blobBlock.getBlockEntry());
        client.uploadBlock(blobBlock.getBlockEntry().getId(), blobBlock.getBlockStream(), -1, opts.getAccessCond(), opts.getRequestOpts(), opts.getOpContext());
    }
    Boolean commitBlockListLater = exchange.getIn().getHeader(BlobServiceConstants.COMMIT_BLOCK_LIST_LATER, Boolean.class);
    if (Boolean.TRUE != commitBlockListLater) {
        client.commitBlockList(blockEntries, opts.getAccessCond(), opts.getRequestOpts(), opts.getOpContext());
    }
}
Also used : BlockEntry(com.microsoft.azure.storage.blob.BlockEntry) LinkedList(java.util.LinkedList) List(java.util.List) CloudBlockBlob(com.microsoft.azure.storage.blob.CloudBlockBlob) LinkedList(java.util.LinkedList)

Example 3 with BlockEntry

use of com.microsoft.azure.storage.blob.BlockEntry in project hadoop by apache.

the class BlockBlobAppendStream method setBlocksCountAndBlockIdPrefix.

/**
   * Helper method used to generate the blockIDs. The algorithm used is similar to the Azure
   * storage SDK.
   */
private void setBlocksCountAndBlockIdPrefix() throws IOException {
    try {
        if (nextBlockCount == UNSET_BLOCKS_COUNT && blockIdPrefix == null) {
            List<BlockEntry> blockEntries = blob.downloadBlockList(BlockListingFilter.COMMITTED, new BlobRequestOptions(), opContext);
            String blockZeroBlockId = (blockEntries.size() > 0) ? blockEntries.get(0).getId() : "";
            String prefix = UUID.randomUUID().toString() + "-";
            String sampleNewerVersionBlockId = generateNewerVersionBlockId(prefix, 0);
            if (blockEntries.size() > 0 && blockZeroBlockId.length() < sampleNewerVersionBlockId.length()) {
                // If blob has already been created with 2.2.0, append subsequent blocks with older version (2.2.0) blockId
                // compute nextBlockCount, the way it was done before; and don't use blockIdPrefix
                this.blockIdPrefix = "";
                nextBlockCount = (long) (sequenceGenerator.nextInt(Integer.MAX_VALUE)) + sequenceGenerator.nextInt(Integer.MAX_VALUE - MAX_BLOCK_COUNT);
                nextBlockCount += blockEntries.size();
            } else {
                // If there are no existing blocks, create the first block with newer version (4.2.0) blockId
                // If blob has already been created with 4.2.0, append subsequent blocks with newer version (4.2.0) blockId
                this.blockIdPrefix = prefix;
                nextBlockCount = blockEntries.size();
            }
        }
    } catch (StorageException ex) {
        LOG.debug("Encountered storage exception during setting next Block Count and BlockId prefix." + " StorageException : {} ErrorCode : {}", ex, ex.getErrorCode());
        throw new IOException(ex);
    }
}
Also used : BlobRequestOptions(com.microsoft.azure.storage.blob.BlobRequestOptions) BlockEntry(com.microsoft.azure.storage.blob.BlockEntry) IOException(java.io.IOException) StorageException(com.microsoft.azure.storage.StorageException)

Example 4 with BlockEntry

use of com.microsoft.azure.storage.blob.BlockEntry in project hadoop by apache.

the class BlockBlobAppendStream method uploadBlockToStorage.

/**
   * Helper method that creates a thread to upload a block to azure storage.
   * @param payload
   * @throws IOException
   */
private synchronized void uploadBlockToStorage(byte[] payload) throws IOException {
    // upload payload to azure storage
    String blockId = generateBlockId();
    // Since uploads of the Azure storage are done in parallel threads, we go ahead
    // add the blockId in the uncommitted list. If the upload of the block fails
    // we don't commit the blockIds.
    BlockEntry blockEntry = new BlockEntry(blockId);
    blockEntry.setSize(payload.length);
    uncommittedBlockEntries.add(blockEntry);
    ioThreadPool.execute(new WriteRequest(payload, blockId));
}
Also used : BlockEntry(com.microsoft.azure.storage.blob.BlockEntry)

Example 5 with BlockEntry

use of com.microsoft.azure.storage.blob.BlockEntry in project hadoop by apache.

the class TestBlobDataValidation method testStoreBlobMd5.

private void testStoreBlobMd5(boolean expectMd5Stored) throws Exception {
    assumeNotNull(testAccount);
    // Write a test file.
    String testFileKey = "testFile";
    Path testFilePath = new Path("/" + testFileKey);
    OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
    outStream.write(new byte[] { 5, 15 });
    outStream.close();
    // Check that we stored/didn't store the MD5 field as configured.
    CloudBlockBlob blob = testAccount.getBlobReference(testFileKey);
    blob.downloadAttributes();
    String obtainedMd5 = blob.getProperties().getContentMD5();
    if (expectMd5Stored) {
        assertNotNull(obtainedMd5);
    } else {
        assertNull("Expected no MD5, found: " + obtainedMd5, obtainedMd5);
    }
    // Mess with the content so it doesn't match the MD5.
    String newBlockId = Base64.encode(new byte[] { 55, 44, 33, 22 });
    blob.uploadBlock(newBlockId, new ByteArrayInputStream(new byte[] { 6, 45 }), 2);
    blob.commitBlockList(Arrays.asList(new BlockEntry[] { new BlockEntry(newBlockId, BlockSearchMode.UNCOMMITTED) }));
    // Now read back the content. If we stored the MD5 for the blob content
    // we should get a data corruption error.
    InputStream inStream = testAccount.getFileSystem().open(testFilePath);
    try {
        byte[] inBuf = new byte[100];
        while (inStream.read(inBuf) > 0) {
        //nothing;
        }
        inStream.close();
        if (expectMd5Stored) {
            fail("Should've thrown because of data corruption.");
        }
    } catch (IOException ex) {
        if (!expectMd5Stored) {
            throw ex;
        }
        StorageException cause = (StorageException) ex.getCause();
        assertNotNull(cause);
        assertTrue("Unexpected cause: " + cause, cause.getErrorCode().equals(StorageErrorCodeStrings.INVALID_MD5));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ByteArrayInputStream(java.io.ByteArrayInputStream) BlockEntry(com.microsoft.azure.storage.blob.BlockEntry) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) OutputStream(java.io.OutputStream) IOException(java.io.IOException) CloudBlockBlob(com.microsoft.azure.storage.blob.CloudBlockBlob) StorageException(com.microsoft.azure.storage.StorageException)

Aggregations

BlockEntry (com.microsoft.azure.storage.blob.BlockEntry)7 CloudBlockBlob (com.microsoft.azure.storage.blob.CloudBlockBlob)4 StorageException (com.microsoft.azure.storage.StorageException)3 IOException (java.io.IOException)3 BlobRequestOptions (com.microsoft.azure.storage.blob.BlobRequestOptions)2 LinkedList (java.util.LinkedList)2 List (java.util.List)2 AccessCondition (com.microsoft.azure.storage.AccessCondition)1 BlockListingFilter (com.microsoft.azure.storage.blob.BlockListingFilter)1 ByteArrayInputStream (java.io.ByteArrayInputStream)1 InputStream (java.io.InputStream)1 OutputStream (java.io.OutputStream)1 Path (org.apache.hadoop.fs.Path)1