Search in sources :

Example 36 with BlobProperties

use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.

the class ServerHardDeleteTest method getAndVerify.

/**
 * Fetches the Blob(for all MessageFormatFlags) and verifies the content
 * @param channel the {@link BlockingChannel} to use to send and receive data
 * @param blobsCount the total number of blobs that needs to be verified against
 * @throws Exception
 */
void getAndVerify(BlockingChannel channel, int blobsCount) throws Exception {
    ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<>();
    ArrayList<BlobId> ids = new ArrayList<>();
    for (int i = 0; i < blobsCount; i++) {
        ids.add(blobIdList.get(i));
    }
    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIdList.get(0).getPartition(), ids);
    partitionRequestInfoList.add(partitionRequestInfo);
    ArrayList<MessageFormatFlags> flags = new ArrayList<>();
    flags.add(MessageFormatFlags.BlobProperties);
    flags.add(MessageFormatFlags.BlobUserMetadata);
    flags.add(MessageFormatFlags.Blob);
    for (MessageFormatFlags flag : flags) {
        GetRequest getRequest = new GetRequest(1, "clientid2", flag, partitionRequestInfoList, GetOption.Include_All);
        channel.send(getRequest);
        InputStream stream = channel.receive().getInputStream();
        GetResponse resp = GetResponse.readFrom(new DataInputStream(stream), mockClusterMap);
        if (flag == MessageFormatFlags.BlobProperties) {
            for (int i = 0; i < blobsCount; i++) {
                BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
                Assert.assertEquals(properties.get(i).getBlobSize(), propertyOutput.getBlobSize());
                Assert.assertEquals("serviceid1", propertyOutput.getServiceId());
                Assert.assertEquals("AccountId mismatch", properties.get(i).getAccountId(), propertyOutput.getAccountId());
                Assert.assertEquals("ContainerId mismatch", properties.get(i).getContainerId(), propertyOutput.getContainerId());
            }
        } else if (flag == MessageFormatFlags.BlobUserMetadata) {
            for (int i = 0; i < blobsCount; i++) {
                ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
                Assert.assertArrayEquals(userMetadataOutput.array(), usermetadata.get(i));
            }
        } else if (flag == MessageFormatFlags.Blob) {
            for (int i = 0; i < blobsCount; i++) {
                BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
                Assert.assertEquals(properties.get(i).getBlobSize(), blobData.getSize());
                byte[] dataOutput = new byte[(int) blobData.getSize()];
                blobData.getStream().read(dataOutput);
                Assert.assertArrayEquals(dataOutput, data.get(i));
            }
        } else {
            throw new IllegalArgumentException("Unrecognized message format flags " + flags);
        }
    }
}
Also used : MessageFormatFlags(com.github.ambry.messageformat.MessageFormatFlags) DataInputStream(java.io.DataInputStream) CrcInputStream(com.github.ambry.utils.CrcInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) GetRequest(com.github.ambry.protocol.GetRequest) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobData(com.github.ambry.messageformat.BlobData) BlobId(com.github.ambry.commons.BlobId)

Example 37 with BlobProperties

use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.

the class NonBlockingRouterTest method setOperationParams.

private void setOperationParams() {
    putBlobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), testEncryption);
    putUserMetadata = new byte[10];
    random.nextBytes(putUserMetadata);
    putContent = new byte[PUT_CONTENT_SIZE];
    random.nextBytes(putContent);
    putChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(putContent));
}
Also used : ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) BlobProperties(com.github.ambry.messageformat.BlobProperties)

Example 38 with BlobProperties

use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.

the class NonBlockingRouterTest method testCompositeBlobDataChunksDelete.

/**
 * Test that if a composite blob is deleted, the data chunks are eventually deleted. Also check the service IDs used
 * for delete operations.
 */
@Test
public void testCompositeBlobDataChunksDelete() throws Exception {
    // Ensure there are 4 chunks.
    maxPutChunkSize = PUT_CONTENT_SIZE / 4;
    Properties props = getNonBlockingRouterProperties("DC1");
    VerifiableProperties verifiableProperties = new VerifiableProperties((props));
    RouterConfig routerConfig = new RouterConfig(verifiableProperties);
    MockClusterMap mockClusterMap = new MockClusterMap();
    MockTime mockTime = new MockTime();
    MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
    // metadata blob + data chunks.
    final AtomicReference<CountDownLatch> deletesDoneLatch = new AtomicReference<>();
    final Map<String, String> blobsThatAreDeleted = new HashMap<>();
    LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {

        @Override
        public void onBlobDeleted(String blobId, String serviceId) {
            blobsThatAreDeleted.put(blobId, serviceId);
            deletesDoneLatch.get().countDown();
        }
    };
    router = new NonBlockingRouter(routerConfig, new NonBlockingRouterMetrics(mockClusterMap), new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap, kms, cryptoService, cryptoJobHandler, mockTime);
    setOperationParams();
    String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
    String deleteServiceId = "delete-service";
    Set<String> blobsToBeDeleted = getBlobsInServers(mockServerLayout);
    int getRequestCount = mockServerLayout.getCount(RequestOrResponseType.GetRequest);
    // The third iteration is to test the case where the blob has expired.
    for (int i = 0; i < 3; i++) {
        if (i == 2) {
            // Create a clean cluster and put another blob that immediate expires.
            setOperationParams();
            putBlobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, 0, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false);
            blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
            Set<String> allBlobsInServer = getBlobsInServers(mockServerLayout);
            allBlobsInServer.removeAll(blobsToBeDeleted);
            blobsToBeDeleted = allBlobsInServer;
        }
        blobsThatAreDeleted.clear();
        deletesDoneLatch.set(new CountDownLatch(5));
        router.deleteBlob(blobId, deleteServiceId, null).get();
        Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS, deletesDoneLatch.get().await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
        Assert.assertTrue("All blobs in server are deleted", blobsThatAreDeleted.keySet().containsAll(blobsToBeDeleted));
        Assert.assertTrue("Only blobs in server are deleted", blobsToBeDeleted.containsAll(blobsThatAreDeleted.keySet()));
        for (Map.Entry<String, String> blobIdAndServiceId : blobsThatAreDeleted.entrySet()) {
            String expectedServiceId = blobIdAndServiceId.getKey().equals(blobId) ? deleteServiceId : BackgroundDeleteRequest.SERVICE_ID_PREFIX + deleteServiceId;
            Assert.assertEquals("Unexpected service ID for deleted blob", expectedServiceId, blobIdAndServiceId.getValue());
        }
        // For 1 chunk deletion attempt, 1 background operation for Get is initiated which results in 2 Get Requests at
        // the servers.
        getRequestCount += 2;
        Assert.assertEquals("Only one attempt of chunk deletion should have been done", getRequestCount, mockServerLayout.getCount(RequestOrResponseType.GetRequest));
    }
    deletesDoneLatch.set(new CountDownLatch(5));
    router.deleteBlob(blobId, null, null).get();
    Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS, deletesDoneLatch.get().await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
    router.close();
    assertClosed();
    Assert.assertEquals("All operations should have completed", 0, router.getOperationsCount());
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) CountDownLatch(java.util.concurrent.CountDownLatch) RouterConfig(com.github.ambry.config.RouterConfig) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobProperties(com.github.ambry.messageformat.BlobProperties) HashMap(java.util.HashMap) Map(java.util.Map) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) MockTime(com.github.ambry.utils.MockTime) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 39 with BlobProperties

use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.

the class MockReadableStreamChannel method verifyCompositeBlob.

/**
 * Verify Composite blob for content, userMetadata and
 * @param properties {@link BlobProperties} of the blob
 * @param originalPutContent original out content
 * @param originalUserMetadata original user-metadata
 * @param dataBlobIds {@link List} of {@link StoreKey}s of the composite blob in context
 * @param request {@link com.github.ambry.protocol.PutRequest.ReceivedPutRequest} to fetch info from
 * @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
 * @throws Exception
 */
private void verifyCompositeBlob(BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, List<StoreKey> dataBlobIds, PutRequest.ReceivedPutRequest request, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
    StoreKey lastKey = dataBlobIds.get(dataBlobIds.size() - 1);
    byte[] content = new byte[(int) request.getBlobProperties().getBlobSize()];
    AtomicInteger offset = new AtomicInteger(0);
    for (StoreKey key : dataBlobIds) {
        PutRequest.ReceivedPutRequest dataBlobPutRequest = deserializePutRequest(serializedRequests.get(key.getID()));
        AtomicInteger dataBlobLength = new AtomicInteger((int) dataBlobPutRequest.getBlobSize());
        InputStream dataBlobStream = dataBlobPutRequest.getBlobStream();
        if (!properties.isEncrypted()) {
            Utils.readBytesFromStream(dataBlobStream, content, offset.get(), dataBlobLength.get());
            Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, dataBlobPutRequest.getUsermetadata().array());
        } else {
            byte[] dataBlobContent = Utils.readBytesFromStream(dataBlobStream, dataBlobLength.get());
            // reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
            // assertion failures in non main thread will not fail the test.
            new DecryptJob(dataBlobPutRequest.getBlobId(), dataBlobPutRequest.getBlobEncryptionKey().duplicate(), ByteBuffer.wrap(dataBlobContent), dataBlobPutRequest.getUsermetadata().duplicate(), cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
                Assert.assertNull("Exception should not be thrown", exception);
                Assert.assertEquals("BlobId mismatch", dataBlobPutRequest.getBlobId(), result.getBlobId());
                Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
                dataBlobLength.set(result.getDecryptedBlobContent().remaining());
                result.getDecryptedBlobContent().get(content, offset.get(), dataBlobLength.get());
            }).run();
        }
        if (key != lastKey) {
            Assert.assertEquals("all chunks except last should be fully filled", chunkSize, dataBlobLength.get());
        } else {
            Assert.assertEquals("Last chunk should be of non-zero length and equal to the length of the remaining bytes", (originalPutContent.length - 1) % chunkSize + 1, dataBlobLength.get());
        }
        offset.addAndGet(dataBlobLength.get());
        Assert.assertEquals("dataBlobStream should have no more data", -1, dataBlobStream.read());
        notificationSystem.verifyNotification(key.getID(), NotificationBlobType.DataChunk, dataBlobPutRequest.getBlobProperties());
    }
    Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
}
Also used : DataInputStream(java.io.DataInputStream) Arrays(java.util.Arrays) ServerErrorCode(com.github.ambry.commons.ServerErrorCode) BlobProperties(com.github.ambry.messageformat.BlobProperties) DataNodeId(com.github.ambry.clustermap.DataNodeId) RunWith(org.junit.runner.RunWith) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) HashMap(java.util.HashMap) Random(java.util.Random) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Future(java.util.concurrent.Future) GeneralSecurityException(java.security.GeneralSecurityException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) After(org.junit.After) SystemTime(com.github.ambry.utils.SystemTime) PutRequest(com.github.ambry.protocol.PutRequest) Parameterized(org.junit.runners.Parameterized) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Set(java.util.Set) MetadataContentSerDe(com.github.ambry.messageformat.MetadataContentSerDe) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) CryptoServiceConfig(com.github.ambry.config.CryptoServiceConfig) TimeUnit(java.util.concurrent.TimeUnit) RouterConfig(com.github.ambry.config.RouterConfig) CountDownLatch(java.util.concurrent.CountDownLatch) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) MockTime(com.github.ambry.utils.MockTime) KMSConfig(com.github.ambry.config.KMSConfig) NotificationBlobType(com.github.ambry.notification.NotificationBlobType) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobType(com.github.ambry.messageformat.BlobType) Assert(org.junit.Assert) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) BlobId(com.github.ambry.commons.BlobId) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) InputStream(java.io.InputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DataInputStream(java.io.DataInputStream) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) InputStream(java.io.InputStream) PutRequest(com.github.ambry.protocol.PutRequest) StoreKey(com.github.ambry.store.StoreKey)

Example 40 with BlobProperties

use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.

the class AmbryBlobStorageService method handlePost.

@Override
public void handlePost(RestRequest restRequest, RestResponseChannel restResponseChannel) {
    long processingStartTime = System.currentTimeMillis();
    long preProcessingTime = 0;
    handlePrechecks(restRequest, restResponseChannel);
    boolean sslUsed = restRequest.getSSLSession() != null;
    RestRequestMetrics metrics = frontendMetrics.postRequestMetricsGroup.getRestRequestMetrics(sslUsed, false);
    restRequest.getMetricsTracker().injectMetrics(metrics);
    try {
        logger.trace("Handling POST request - {}", restRequest.getUri());
        checkAvailable();
        // TODO: make this non blocking once all handling of indiviual methods is moved to their own classes
        securityService.preProcessRequest(restRequest).get();
        long propsBuildStartTime = System.currentTimeMillis();
        accountAndContainerInjector.injectAccountAndContainerForPostRequest(restRequest);
        BlobProperties blobProperties = RestUtils.buildBlobProperties(restRequest.getArgs());
        if (blobProperties.getTimeToLiveInSeconds() + TimeUnit.MILLISECONDS.toSeconds(blobProperties.getCreationTimeInMs()) > Integer.MAX_VALUE) {
            logger.debug("TTL set to very large value in POST request with BlobProperties {}", blobProperties);
            frontendMetrics.ttlTooLargeError.inc();
        }
        // inject encryption metrics if applicable
        if (blobProperties.isEncrypted()) {
            metrics = frontendMetrics.postRequestMetricsGroup.getRestRequestMetrics(sslUsed, true);
            restRequest.getMetricsTracker().injectMetrics(metrics);
        }
        byte[] usermetadata = RestUtils.buildUsermetadata(restRequest.getArgs());
        frontendMetrics.blobPropsBuildTimeInMs.update(System.currentTimeMillis() - propsBuildStartTime);
        logger.trace("Blob properties of blob being POSTed - {}", blobProperties);
        PostCallback routerCallback = new PostCallback(restRequest, restResponseChannel, new BlobInfo(blobProperties, usermetadata));
        preProcessingTime = System.currentTimeMillis() - processingStartTime;
        SecurityProcessRequestCallback securityCallback = new SecurityProcessRequestCallback(restRequest, restResponseChannel, blobProperties, usermetadata, routerCallback);
        securityService.processRequest(restRequest, securityCallback);
    } catch (Exception e) {
        submitResponse(restRequest, restResponseChannel, null, extractExecutionExceptionCause(e));
    } finally {
        frontendMetrics.postPreProcessingTimeInMs.update(preProcessingTime);
    }
}
Also used : RestRequestMetrics(com.github.ambry.rest.RestRequestMetrics) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobInfo(com.github.ambry.messageformat.BlobInfo) IOException(java.io.IOException) RouterException(com.github.ambry.router.RouterException) ExecutionException(java.util.concurrent.ExecutionException) RestServiceException(com.github.ambry.rest.RestServiceException)

Aggregations

BlobProperties (com.github.ambry.messageformat.BlobProperties)79 BlobId (com.github.ambry.commons.BlobId)35 ArrayList (java.util.ArrayList)35 DataInputStream (java.io.DataInputStream)26 Test (org.junit.Test)25 ByteBufferReadableStreamChannel (com.github.ambry.commons.ByteBufferReadableStreamChannel)24 VerifiableProperties (com.github.ambry.config.VerifiableProperties)24 ByteBuffer (java.nio.ByteBuffer)24 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)20 GetResponse (com.github.ambry.protocol.GetResponse)20 PutRequest (com.github.ambry.protocol.PutRequest)20 IOException (java.io.IOException)20 Properties (java.util.Properties)20 InMemAccountService (com.github.ambry.account.InMemAccountService)19 PartitionRequestInfo (com.github.ambry.protocol.PartitionRequestInfo)19 LoggingNotificationSystem (com.github.ambry.commons.LoggingNotificationSystem)18 ByteBuf (io.netty.buffer.ByteBuf)18 GetRequest (com.github.ambry.protocol.GetRequest)17 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)17 CountDownLatch (java.util.concurrent.CountDownLatch)16