use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class ServerHardDeleteTest method getAndVerify.
/**
* Fetches the Blob(for all MessageFormatFlags) and verifies the content
* @param channel the {@link BlockingChannel} to use to send and receive data
* @param blobsCount the total number of blobs that needs to be verified against
* @throws Exception
*/
void getAndVerify(BlockingChannel channel, int blobsCount) throws Exception {
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<>();
ArrayList<BlobId> ids = new ArrayList<>();
for (int i = 0; i < blobsCount; i++) {
ids.add(blobIdList.get(i));
}
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIdList.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
ArrayList<MessageFormatFlags> flags = new ArrayList<>();
flags.add(MessageFormatFlags.BlobProperties);
flags.add(MessageFormatFlags.BlobUserMetadata);
flags.add(MessageFormatFlags.Blob);
for (MessageFormatFlags flag : flags) {
GetRequest getRequest = new GetRequest(1, "clientid2", flag, partitionRequestInfoList, GetOption.Include_All);
channel.send(getRequest);
InputStream stream = channel.receive().getInputStream();
GetResponse resp = GetResponse.readFrom(new DataInputStream(stream), mockClusterMap);
if (flag == MessageFormatFlags.BlobProperties) {
for (int i = 0; i < blobsCount; i++) {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
Assert.assertEquals(properties.get(i).getBlobSize(), propertyOutput.getBlobSize());
Assert.assertEquals("serviceid1", propertyOutput.getServiceId());
Assert.assertEquals("AccountId mismatch", properties.get(i).getAccountId(), propertyOutput.getAccountId());
Assert.assertEquals("ContainerId mismatch", properties.get(i).getContainerId(), propertyOutput.getContainerId());
}
} else if (flag == MessageFormatFlags.BlobUserMetadata) {
for (int i = 0; i < blobsCount; i++) {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
Assert.assertArrayEquals(userMetadataOutput.array(), usermetadata.get(i));
}
} else if (flag == MessageFormatFlags.Blob) {
for (int i = 0; i < blobsCount; i++) {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
Assert.assertEquals(properties.get(i).getBlobSize(), blobData.getSize());
byte[] dataOutput = new byte[(int) blobData.getSize()];
blobData.getStream().read(dataOutput);
Assert.assertArrayEquals(dataOutput, data.get(i));
}
} else {
throw new IllegalArgumentException("Unrecognized message format flags " + flags);
}
}
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class NonBlockingRouterTest method setOperationParams.
private void setOperationParams() {
putBlobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), testEncryption);
putUserMetadata = new byte[10];
random.nextBytes(putUserMetadata);
putContent = new byte[PUT_CONTENT_SIZE];
random.nextBytes(putContent);
putChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(putContent));
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class NonBlockingRouterTest method testCompositeBlobDataChunksDelete.
/**
* Test that if a composite blob is deleted, the data chunks are eventually deleted. Also check the service IDs used
* for delete operations.
*/
@Test
public void testCompositeBlobDataChunksDelete() throws Exception {
// Ensure there are 4 chunks.
maxPutChunkSize = PUT_CONTENT_SIZE / 4;
Properties props = getNonBlockingRouterProperties("DC1");
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
RouterConfig routerConfig = new RouterConfig(verifiableProperties);
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
// metadata blob + data chunks.
final AtomicReference<CountDownLatch> deletesDoneLatch = new AtomicReference<>();
final Map<String, String> blobsThatAreDeleted = new HashMap<>();
LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {
@Override
public void onBlobDeleted(String blobId, String serviceId) {
blobsThatAreDeleted.put(blobId, serviceId);
deletesDoneLatch.get().countDown();
}
};
router = new NonBlockingRouter(routerConfig, new NonBlockingRouterMetrics(mockClusterMap), new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap, kms, cryptoService, cryptoJobHandler, mockTime);
setOperationParams();
String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
String deleteServiceId = "delete-service";
Set<String> blobsToBeDeleted = getBlobsInServers(mockServerLayout);
int getRequestCount = mockServerLayout.getCount(RequestOrResponseType.GetRequest);
// The third iteration is to test the case where the blob has expired.
for (int i = 0; i < 3; i++) {
if (i == 2) {
// Create a clean cluster and put another blob that immediate expires.
setOperationParams();
putBlobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, 0, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false);
blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
Set<String> allBlobsInServer = getBlobsInServers(mockServerLayout);
allBlobsInServer.removeAll(blobsToBeDeleted);
blobsToBeDeleted = allBlobsInServer;
}
blobsThatAreDeleted.clear();
deletesDoneLatch.set(new CountDownLatch(5));
router.deleteBlob(blobId, deleteServiceId, null).get();
Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS, deletesDoneLatch.get().await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
Assert.assertTrue("All blobs in server are deleted", blobsThatAreDeleted.keySet().containsAll(blobsToBeDeleted));
Assert.assertTrue("Only blobs in server are deleted", blobsToBeDeleted.containsAll(blobsThatAreDeleted.keySet()));
for (Map.Entry<String, String> blobIdAndServiceId : blobsThatAreDeleted.entrySet()) {
String expectedServiceId = blobIdAndServiceId.getKey().equals(blobId) ? deleteServiceId : BackgroundDeleteRequest.SERVICE_ID_PREFIX + deleteServiceId;
Assert.assertEquals("Unexpected service ID for deleted blob", expectedServiceId, blobIdAndServiceId.getValue());
}
// For 1 chunk deletion attempt, 1 background operation for Get is initiated which results in 2 Get Requests at
// the servers.
getRequestCount += 2;
Assert.assertEquals("Only one attempt of chunk deletion should have been done", getRequestCount, mockServerLayout.getCount(RequestOrResponseType.GetRequest));
}
deletesDoneLatch.set(new CountDownLatch(5));
router.deleteBlob(blobId, null, null).get();
Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS, deletesDoneLatch.get().await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
router.close();
assertClosed();
Assert.assertEquals("All operations should have completed", 0, router.getOperationsCount());
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class MockReadableStreamChannel method verifyCompositeBlob.
/**
* Verify Composite blob for content, userMetadata and
* @param properties {@link BlobProperties} of the blob
* @param originalPutContent original out content
* @param originalUserMetadata original user-metadata
* @param dataBlobIds {@link List} of {@link StoreKey}s of the composite blob in context
* @param request {@link com.github.ambry.protocol.PutRequest.ReceivedPutRequest} to fetch info from
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
* @throws Exception
*/
private void verifyCompositeBlob(BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, List<StoreKey> dataBlobIds, PutRequest.ReceivedPutRequest request, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
StoreKey lastKey = dataBlobIds.get(dataBlobIds.size() - 1);
byte[] content = new byte[(int) request.getBlobProperties().getBlobSize()];
AtomicInteger offset = new AtomicInteger(0);
for (StoreKey key : dataBlobIds) {
PutRequest.ReceivedPutRequest dataBlobPutRequest = deserializePutRequest(serializedRequests.get(key.getID()));
AtomicInteger dataBlobLength = new AtomicInteger((int) dataBlobPutRequest.getBlobSize());
InputStream dataBlobStream = dataBlobPutRequest.getBlobStream();
if (!properties.isEncrypted()) {
Utils.readBytesFromStream(dataBlobStream, content, offset.get(), dataBlobLength.get());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, dataBlobPutRequest.getUsermetadata().array());
} else {
byte[] dataBlobContent = Utils.readBytesFromStream(dataBlobStream, dataBlobLength.get());
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(dataBlobPutRequest.getBlobId(), dataBlobPutRequest.getBlobEncryptionKey().duplicate(), ByteBuffer.wrap(dataBlobContent), dataBlobPutRequest.getUsermetadata().duplicate(), cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
Assert.assertNull("Exception should not be thrown", exception);
Assert.assertEquals("BlobId mismatch", dataBlobPutRequest.getBlobId(), result.getBlobId());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
dataBlobLength.set(result.getDecryptedBlobContent().remaining());
result.getDecryptedBlobContent().get(content, offset.get(), dataBlobLength.get());
}).run();
}
if (key != lastKey) {
Assert.assertEquals("all chunks except last should be fully filled", chunkSize, dataBlobLength.get());
} else {
Assert.assertEquals("Last chunk should be of non-zero length and equal to the length of the remaining bytes", (originalPutContent.length - 1) % chunkSize + 1, dataBlobLength.get());
}
offset.addAndGet(dataBlobLength.get());
Assert.assertEquals("dataBlobStream should have no more data", -1, dataBlobStream.read());
notificationSystem.verifyNotification(key.getID(), NotificationBlobType.DataChunk, dataBlobPutRequest.getBlobProperties());
}
Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class AmbryBlobStorageService method handlePost.
@Override
public void handlePost(RestRequest restRequest, RestResponseChannel restResponseChannel) {
long processingStartTime = System.currentTimeMillis();
long preProcessingTime = 0;
handlePrechecks(restRequest, restResponseChannel);
boolean sslUsed = restRequest.getSSLSession() != null;
RestRequestMetrics metrics = frontendMetrics.postRequestMetricsGroup.getRestRequestMetrics(sslUsed, false);
restRequest.getMetricsTracker().injectMetrics(metrics);
try {
logger.trace("Handling POST request - {}", restRequest.getUri());
checkAvailable();
// TODO: make this non blocking once all handling of indiviual methods is moved to their own classes
securityService.preProcessRequest(restRequest).get();
long propsBuildStartTime = System.currentTimeMillis();
accountAndContainerInjector.injectAccountAndContainerForPostRequest(restRequest);
BlobProperties blobProperties = RestUtils.buildBlobProperties(restRequest.getArgs());
if (blobProperties.getTimeToLiveInSeconds() + TimeUnit.MILLISECONDS.toSeconds(blobProperties.getCreationTimeInMs()) > Integer.MAX_VALUE) {
logger.debug("TTL set to very large value in POST request with BlobProperties {}", blobProperties);
frontendMetrics.ttlTooLargeError.inc();
}
// inject encryption metrics if applicable
if (blobProperties.isEncrypted()) {
metrics = frontendMetrics.postRequestMetricsGroup.getRestRequestMetrics(sslUsed, true);
restRequest.getMetricsTracker().injectMetrics(metrics);
}
byte[] usermetadata = RestUtils.buildUsermetadata(restRequest.getArgs());
frontendMetrics.blobPropsBuildTimeInMs.update(System.currentTimeMillis() - propsBuildStartTime);
logger.trace("Blob properties of blob being POSTed - {}", blobProperties);
PostCallback routerCallback = new PostCallback(restRequest, restResponseChannel, new BlobInfo(blobProperties, usermetadata));
preProcessingTime = System.currentTimeMillis() - processingStartTime;
SecurityProcessRequestCallback securityCallback = new SecurityProcessRequestCallback(restRequest, restResponseChannel, blobProperties, usermetadata, routerCallback);
securityService.processRequest(restRequest, securityCallback);
} catch (Exception e) {
submitResponse(restRequest, restResponseChannel, null, extractExecutionExceptionCause(e));
} finally {
frontendMetrics.postPreProcessingTimeInMs.update(preProcessingTime);
}
}
Aggregations