use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class ChunkFillTest method fillChunksAndAssertSuccess.
/**
* Create a {@link PutOperation} and pass in a channel with the blobSize set by the caller; and test the chunk
* filling flow for puts.
* Note that this test is for the chunk filling flow, not for the ChunkFiller thread (which never gets exercised,
* as we do not even instantiate the {@link PutManager})
*/
private void fillChunksAndAssertSuccess() throws Exception {
VerifiableProperties vProps = getNonBlockingRouterProperties();
MockClusterMap mockClusterMap = new MockClusterMap();
RouterConfig routerConfig = new RouterConfig(vProps);
routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig);
ResponseHandler responseHandler = new ResponseHandler(mockClusterMap);
short accountId = Utils.getRandomShort(random);
short containerId = Utils.getRandomShort(random);
BlobProperties putBlobProperties = new BlobProperties(blobSize, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, accountId, containerId, testEncryption, null, null, null);
Random random = new Random();
byte[] putUserMetadata = new byte[10];
random.nextBytes(putUserMetadata);
putContent = new byte[blobSize];
random.nextBytes(putContent);
final ReadableStreamChannel putChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(putContent));
FutureResult<String> futureResult = new FutureResult<String>();
MockTime time = new MockTime();
MockNetworkClientFactory networkClientFactory = new MockNetworkClientFactory(vProps, null, 0, 0, 0, null, time);
if (testEncryption) {
kms = new MockKeyManagementService(new KMSConfig(vProps), TestUtils.getRandomKey(SingleKeyManagementServiceTest.DEFAULT_KEY_SIZE_CHARS));
cryptoService = new MockCryptoService(new CryptoServiceConfig(vProps));
cryptoJobHandler = new CryptoJobHandler(CryptoJobHandlerTest.DEFAULT_THREAD_COUNT);
}
MockRouterCallback routerCallback = new MockRouterCallback(networkClientFactory.getNetworkClient(), Collections.EMPTY_LIST);
PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), putUserMetadata, putChannel, PutBlobOptions.DEFAULT, futureResult, null, routerCallback, null, kms, cryptoService, cryptoJobHandler, time, putBlobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
op.startOperation();
numChunks = RouterUtils.getNumChunksForBlobAndChunkSize(blobSize, chunkSize);
compositeBuffers = new ByteBuf[numChunks];
compositeEncryptionKeys = new ByteBuffer[numChunks];
compositeBlobIds = new BlobId[numChunks];
final AtomicReference<Exception> operationException = new AtomicReference<Exception>(null);
int chunksLeftToBeFilled = numChunks;
do {
if (testEncryption) {
int chunksPerBatch = Math.min(routerConfig.routerMaxInMemPutChunks, chunksLeftToBeFilled);
CountDownLatch onPollLatch = new CountDownLatch(chunksPerBatch);
routerCallback.setOnPollLatch(onPollLatch);
op.fillChunks();
Assert.assertTrue("Latch should have been zeroed out", onPollLatch.await(1000, TimeUnit.MILLISECONDS));
chunksLeftToBeFilled -= chunksPerBatch;
} else {
op.fillChunks();
}
// since the channel is ByteBuffer based.
for (PutOperation.PutChunk putChunk : op.putChunks) {
if (putChunk.isFree()) {
continue;
}
Assert.assertEquals("Chunk should be ready.", PutOperation.ChunkState.Ready, putChunk.getState());
ByteBuf buf = putChunk.buf.retainedDuplicate();
totalSizeWritten += buf.readableBytes();
compositeBuffers[putChunk.getChunkIndex()] = buf;
if (testEncryption) {
compositeEncryptionKeys[putChunk.getChunkIndex()] = putChunk.encryptedPerBlobKey.duplicate();
compositeBlobIds[putChunk.getChunkIndex()] = putChunk.chunkBlobId;
}
putChunk.clear();
}
} while (!op.isChunkFillingDone());
if (!testEncryption) {
Assert.assertEquals("total size written out should match the blob size", blobSize, totalSizeWritten);
}
// for encrypted path, size will be implicitly tested via assertDataIdentity
Exception exception = operationException.get();
if (exception != null) {
throw exception;
}
assertDataIdentity(mockClusterMap);
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class NonBlockingRouterTest method testCompositeBlobDataChunksDeleteMaxDeleteOperation.
protected void testCompositeBlobDataChunksDeleteMaxDeleteOperation(int maxDeleteOperation) throws Exception {
try {
// Ensure there are 4 chunks.
maxPutChunkSize = PUT_CONTENT_SIZE / 4;
Properties props = getNonBlockingRouterProperties("DC1");
if (maxDeleteOperation != 0) {
props.setProperty(RouterConfig.ROUTER_BACKGROUND_DELETER_MAX_CONCURRENT_OPERATIONS, Integer.toString(maxDeleteOperation));
}
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
RouterConfig routerConfig = new RouterConfig(verifiableProperties);
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
// metadata blob + data chunks.
final AtomicReference<CountDownLatch> deletesDoneLatch = new AtomicReference<>();
final Map<String, String> blobsThatAreDeleted = new HashMap<>();
LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {
@Override
public void onBlobDeleted(String blobId, String serviceId, Account account, Container container) {
blobsThatAreDeleted.put(blobId, serviceId);
deletesDoneLatch.get().countDown();
}
};
NonBlockingRouterMetrics localMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig);
router = new NonBlockingRouter(routerConfig, localMetrics, new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap, kms, cryptoService, cryptoJobHandler, accountService, mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
setOperationParams();
String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
String deleteServiceId = "delete-service";
Set<String> blobsToBeDeleted = getBlobsInServers(mockServerLayout);
int getRequestCount = mockServerLayout.getCount(RequestOrResponseType.GetRequest);
// The third iteration is to test the case where the blob has expired.
for (int i = 0; i < 3; i++) {
if (i == 2) {
// Create a clean cluster and put another blob that immediate expires.
setOperationParams();
putBlobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, 0, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null);
blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
Set<String> allBlobsInServer = getBlobsInServers(mockServerLayout);
allBlobsInServer.removeAll(blobsToBeDeleted);
blobsToBeDeleted = allBlobsInServer;
}
blobsThatAreDeleted.clear();
deletesDoneLatch.set(new CountDownLatch(5));
router.deleteBlob(blobId, deleteServiceId).get();
Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS, deletesDoneLatch.get().await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
Assert.assertTrue("All blobs in server are deleted", blobsThatAreDeleted.keySet().containsAll(blobsToBeDeleted));
Assert.assertTrue("Only blobs in server are deleted", blobsToBeDeleted.containsAll(blobsThatAreDeleted.keySet()));
for (Map.Entry<String, String> blobIdAndServiceId : blobsThatAreDeleted.entrySet()) {
String expectedServiceId = blobIdAndServiceId.getKey().equals(blobId) ? deleteServiceId : BackgroundDeleteRequest.SERVICE_ID_PREFIX + deleteServiceId;
Assert.assertEquals("Unexpected service ID for deleted blob", expectedServiceId, blobIdAndServiceId.getValue());
}
// For 1 chunk deletion attempt, 1 background operation for Get is initiated which results in 2 Get Requests at
// the servers.
getRequestCount += 2;
Assert.assertEquals("Only one attempt of chunk deletion should have been done", getRequestCount, mockServerLayout.getCount(RequestOrResponseType.GetRequest));
}
deletesDoneLatch.set(new CountDownLatch(5));
router.deleteBlob(blobId, null).get();
Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS, deletesDoneLatch.get().await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
Assert.assertEquals("Get should NOT have been skipped", 0, localMetrics.skippedGetBlobCount.getCount());
} finally {
if (router != null) {
router.close();
assertClosed();
Assert.assertEquals("All operations should have completed", 0, router.getOperationsCount());
}
}
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class GetBlobOperationTest method doDirectPut.
/**
* Do a put directly to the mock servers. This allows for blobs with malformed properties to be constructed.
* @param blobType the {@link BlobType} for the blob to upload.
* @param blobContent the raw content for the blob to upload (i.e. this can be serialized composite blob metadata or
* an encrypted blob).
*/
private void doDirectPut(BlobType blobType, ByteBuf blobContent) throws Exception {
List<PartitionId> writablePartitionIds = mockClusterMap.getWritablePartitionIds(null);
PartitionId partitionId = writablePartitionIds.get(random.nextInt(writablePartitionIds.size()));
blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, mockClusterMap.getLocalDatacenterId(), blobProperties.getAccountId(), blobProperties.getContainerId(), partitionId, blobProperties.isEncrypted(), blobType == BlobType.MetadataBlob ? BlobId.BlobDataType.METADATA : BlobId.BlobDataType.DATACHUNK);
blobIdStr = blobId.getID();
Iterator<MockServer> servers = partitionId.getReplicaIds().stream().map(ReplicaId::getDataNodeId).map(dataNodeId -> mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort())).iterator();
ByteBuffer blobEncryptionKey = null;
ByteBuffer userMetadataBuf = ByteBuffer.wrap(userMetadata);
if (blobProperties.isEncrypted()) {
FutureResult<EncryptJob.EncryptJobResult> futureResult = new FutureResult<>();
cryptoJobHandler.submitJob(new EncryptJob(blobProperties.getAccountId(), blobProperties.getContainerId(), blobType == BlobType.MetadataBlob ? null : blobContent.retainedDuplicate(), userMetadataBuf.duplicate(), kms.getRandomKey(), cryptoService, kms, null, new CryptoJobMetricsTracker(routerMetrics.encryptJobMetrics), futureResult::done));
EncryptJob.EncryptJobResult result = futureResult.get(5, TimeUnit.SECONDS);
blobEncryptionKey = result.getEncryptedKey();
if (blobType != BlobType.MetadataBlob) {
blobContent.release();
blobContent = result.getEncryptedBlobContent();
}
userMetadataBuf = result.getEncryptedUserMetadata();
}
while (servers.hasNext()) {
MockServer server = servers.next();
PutRequest request = new PutRequest(random.nextInt(), "clientId", blobId, blobProperties, userMetadataBuf.duplicate(), blobContent.retainedDuplicate(), blobContent.readableBytes(), blobType, blobEncryptionKey == null ? null : blobEncryptionKey.duplicate());
// Make sure we release the BoundedNettyByteBufReceive.
server.send(request).release();
request.release();
}
blobContent.release();
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class MockReadableStreamChannel method testChannelSizeNotSizeInPropertiesPutSuccess.
/**
* Test that the size in BlobProperties is ignored for puts, by attempting puts with varying values for size in
* BlobProperties.
*/
@Test
public void testChannelSizeNotSizeInPropertiesPutSuccess() throws Exception {
int[] actualBlobSizes = { 0, chunkSize - 1, chunkSize, chunkSize + 1, chunkSize * 2, chunkSize * 2 + 1 };
for (int actualBlobSize : actualBlobSizes) {
int[] sizesInProperties = { actualBlobSize - 1, actualBlobSize + 1 };
for (int sizeInProperties : sizesInProperties) {
requestAndResultsList.clear();
RequestAndResult requestAndResult = new RequestAndResult(0);
// Change the actual content size.
requestAndResult.putContent = new byte[actualBlobSize];
requestAndResult.putBlobProperties = new BlobProperties(sizeInProperties, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, EXTERNAL_ASSET_TAG, null, null);
random.nextBytes(requestAndResult.putContent);
requestAndResultsList.add(requestAndResult);
submitPutsAndAssertSuccess(true);
}
}
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class PutOperationTest method testSendIncomplete.
/**
* Ensure that if any of the requests associated with the buffer of a PutChunk is not completely read out even
* after the associated chunk is complete, the buffer is not reused even though the PutChunk is reused.
*/
@Test
public void testSendIncomplete() throws Exception {
int numChunks = routerConfig.routerMaxInMemPutChunks + 1;
BlobProperties blobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null);
byte[] userMetadata = new byte[10];
byte[] content = new byte[chunkSize * numChunks];
random.nextBytes(content);
ReadableStreamChannel channel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(content));
FutureResult<String> future = new FutureResult<>();
MockNetworkClient mockNetworkClient = new MockNetworkClient();
PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), userMetadata, channel, PutBlobOptions.DEFAULT, future, null, new RouterCallback(mockNetworkClient, new ArrayList<>()), null, null, null, null, time, blobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
op.startOperation();
List<RequestInfo> requestInfos = new ArrayList<>();
requestRegistrationCallback.setRequestsToSend(requestInfos);
// Since this channel is in memory, one call to fill chunks would end up filling the maximum number of PutChunks.
op.fillChunks();
Assert.assertTrue("ReadyForPollCallback should have been invoked as chunks were fully filled", mockNetworkClient.getAndClearWokenUpStatus());
// A poll should therefore return requestParallelism number of requests from each chunk
op.poll(requestRegistrationCallback);
Assert.assertEquals(routerConfig.routerMaxInMemPutChunks * requestParallelism, requestInfos.size());
// There are routerMaxInMemPutChunks + 1 data chunks for this blob (and a metadata chunk).
// Once the first chunk is completely sent out, the first PutChunk will be reused. What the test verifies is that
// the buffer of the first PutChunk does not get reused. It does this as follows:
// For the first chunk,
// 1. use first request to succeed the chunk (the successTarget is set to 1).
// 2. read and store from the second for comparing later.
// 3. read from the third after the first PutChunk gets reused and ensure that the data from the third is the
// same as from what was saved off from the second. This means that the buffer was not reused by the first
// PutChunk.
// 1.
ResponseInfo responseInfo = getResponseInfo(requestInfos.get(0));
PutResponse putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
requestInfos.get(0).getRequest().release();
responseInfo.release();
// 2.
PutRequest putRequest = (PutRequest) requestInfos.get(1).getRequest();
ByteBuffer buf = ByteBuffer.allocate((int) putRequest.sizeInBytes());
ByteBufferChannel bufChannel = new ByteBufferChannel(buf);
// read it out (which also marks this request as complete).
putRequest.writeTo(bufChannel);
putRequest.release();
byte[] expectedRequestContent = buf.array();
// 3.
// first save the third request
PutRequest savedRequest = (PutRequest) requestInfos.get(2).getRequest();
// succeed all the other requests.
for (int i = 3; i < requestInfos.size(); i++) {
responseInfo = getResponseInfo(requestInfos.get(i));
putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
requestInfos.get(i).getRequest().release();
responseInfo.release();
}
// fill the first PutChunk with the last chunk.
op.fillChunks();
// Verify that the last chunk was filled.
requestInfos.clear();
op.poll(requestRegistrationCallback);
Assert.assertEquals(1 * requestParallelism, requestInfos.size());
// Verify that the buffer of the third request is not affected.
buf = ByteBuffer.allocate((int) savedRequest.sizeInBytes());
bufChannel = new ByteBufferChannel(buf);
savedRequest.writeTo(bufChannel);
savedRequest.release();
byte[] savedRequestContent = buf.array();
// reset the correlation id as they will be different between the two requests.
resetCorrelationId(expectedRequestContent);
resetCorrelationId(savedRequestContent);
Assert.assertArrayEquals("Underlying buffer should not have be reused", expectedRequestContent, savedRequestContent);
// internal to the chunk (though this can be verified via coverage).
for (int i = 0; i < requestInfos.size(); i++) {
responseInfo = getResponseInfo(requestInfos.get(i));
putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
requestInfos.get(i).getRequest().release();
responseInfo.release();
}
requestInfos.clear();
// this should return requests for the metadata chunk
op.poll(requestRegistrationCallback);
Assert.assertEquals(1 * requestParallelism, requestInfos.size());
Assert.assertFalse("Operation should not be complete yet", op.isOperationComplete());
// once the metadata request succeeds, it should complete the operation.
responseInfo = getResponseInfo(requestInfos.get(0));
putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
responseInfo.release();
requestInfos.forEach(info -> info.getRequest().release());
Assert.assertTrue("Operation should be complete at this time", op.isOperationComplete());
}
Aggregations