use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class NonBlockingRouterTestBase method setOperationParams.
/**
* Setup test suite to perform a {@link Router#putBlob} call.
* @param putContentSize the size of the content to put
* @param ttlSecs the TTL in seconds for the blob.
*/
protected void setOperationParams(int putContentSize, long ttlSecs) {
putBlobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, ttlSecs, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), testEncryption, null, null, null);
putUserMetadata = new byte[USER_METADATA_SIZE];
random.nextBytes(putUserMetadata);
putContent = new byte[putContentSize];
random.nextBytes(putContent);
putChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(putContent));
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class ChunkFillTest method testChunkNumAndSizeCalculations.
/**
* Test the calculation of number of chunks and the size of each chunk, using a very large blob size. No content
* comparison is done. This test does not consume memory more than chunkSize.
*/
@Test
public void testChunkNumAndSizeCalculations() throws Exception {
chunkSize = 4 * 1024 * 1024;
// a large blob greater than Integer.MAX_VALUE and not at chunk size boundary.
final long blobSize = ((long) Integer.MAX_VALUE / chunkSize + 1) * chunkSize + random.nextInt(chunkSize - 1) + 1;
VerifiableProperties vProps = getNonBlockingRouterProperties();
MockClusterMap mockClusterMap = new MockClusterMap();
RouterConfig routerConfig = new RouterConfig(vProps);
NonBlockingRouterMetrics routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig);
short accountId = Utils.getRandomShort(random);
short containerId = Utils.getRandomShort(random);
BlobProperties putBlobProperties = new BlobProperties(blobSize, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, accountId, containerId, false, null, null, null);
Random random = new Random();
byte[] putUserMetadata = new byte[10];
random.nextBytes(putUserMetadata);
final MockReadableStreamChannel putChannel = new MockReadableStreamChannel(blobSize, false);
FutureResult<String> futureResult = new FutureResult<String>();
MockTime time = new MockTime();
MockNetworkClientFactory networkClientFactory = new MockNetworkClientFactory(vProps, null, 0, 0, 0, null, time);
PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), putUserMetadata, putChannel, PutBlobOptions.DEFAULT, futureResult, null, new RouterCallback(networkClientFactory.getNetworkClient(), new ArrayList<>()), null, null, null, null, new MockTime(), putBlobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
op.startOperation();
numChunks = RouterUtils.getNumChunksForBlobAndChunkSize(blobSize, chunkSize);
// largeBlobSize is not a multiple of chunkSize
int expectedNumChunks = (int) (blobSize / chunkSize + 1);
Assert.assertEquals("numChunks should be as expected", expectedNumChunks, numChunks);
int lastChunkSize = (int) (blobSize % chunkSize);
final AtomicReference<Exception> channelException = new AtomicReference<Exception>(null);
int chunkIndex = 0;
// The write to the MockReadableStreamChannel blocks until the data is read as part fo the chunk filling,
// so create a thread that fills the MockReadableStreamChannel.
Utils.newThread(new Runnable() {
@Override
public void run() {
try {
byte[] writeBuf = new byte[chunkSize];
long written = 0;
while (written < blobSize) {
int toWrite = (int) Math.min(chunkSize, blobSize - written);
putChannel.write(ByteBuffer.wrap(writeBuf, 0, toWrite));
written += toWrite;
}
} catch (Exception e) {
channelException.set(e);
}
}
}, false).start();
// Do the chunk filling.
boolean fillingComplete = false;
do {
op.fillChunks();
// since the channel is ByteBuffer based.
for (PutOperation.PutChunk putChunk : op.putChunks) {
Assert.assertNull("Mock channel write should not have caused an exception", channelException.get());
if (putChunk.isFree()) {
continue;
}
if (chunkIndex == numChunks - 1) {
// last chunk may not be Ready as it is dependent on the completion callback to be called.
Assert.assertTrue("Chunk should be Building or Ready.", putChunk.getState() == PutOperation.ChunkState.Ready || putChunk.getState() == PutOperation.ChunkState.Building);
if (putChunk.getState() == PutOperation.ChunkState.Ready) {
Assert.assertEquals("Chunk size should be the last chunk size", lastChunkSize, putChunk.buf.readableBytes());
Assert.assertTrue("Chunk Filling should be complete at this time", op.isChunkFillingDone());
fillingComplete = true;
}
} else {
// if not last chunk, then the chunk should be full and Ready.
Assert.assertEquals("Chunk should be ready.", PutOperation.ChunkState.Ready, putChunk.getState());
Assert.assertEquals("Chunk size should be maxChunkSize", chunkSize, putChunk.buf.readableBytes());
chunkIndex++;
putChunk.clear();
}
}
} while (!fillingComplete);
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class MockReadableStreamChannel method verifyCompositeBlob.
/**
* Verify Composite blob for content, userMetadata and
* @param properties {@link BlobProperties} of the blob
* @param originalPutContent original out content
* @param originalUserMetadata original user-metadata
* @param dataBlobIds {@link List} of {@link StoreKey}s of the composite blob in context
* @param request {@link com.github.ambry.protocol.PutRequest} to fetch info from
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
* @throws Exception
*/
private void verifyCompositeBlob(BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, List<StoreKey> dataBlobIds, PutRequest request, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
StoreKey lastKey = dataBlobIds.get(dataBlobIds.size() - 1);
byte[] content = new byte[(int) request.getBlobProperties().getBlobSize()];
AtomicInteger offset = new AtomicInteger(0);
for (StoreKey key : dataBlobIds) {
PutRequest dataBlobPutRequest = deserializePutRequest(serializedRequests.get(key.getID()));
AtomicInteger dataBlobLength = new AtomicInteger((int) dataBlobPutRequest.getBlobSize());
InputStream dataBlobStream = dataBlobPutRequest.getBlobStream();
if (!properties.isEncrypted()) {
Utils.readBytesFromStream(dataBlobStream, content, offset.get(), dataBlobLength.get());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, dataBlobPutRequest.getUsermetadata().array());
} else {
byte[] dataBlobContent = Utils.readBytesFromStream(dataBlobStream, dataBlobLength.get());
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(dataBlobPutRequest.getBlobId(), dataBlobPutRequest.getBlobEncryptionKey().duplicate(), Unpooled.wrappedBuffer(dataBlobContent), dataBlobPutRequest.getUsermetadata().duplicate(), cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
Assert.assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", dataBlobPutRequest.getBlobId(), result.getBlobId());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
ByteBuf decryptedBlobContent = result.getDecryptedBlobContent();
dataBlobLength.set(decryptedBlobContent.readableBytes());
decryptedBlobContent.readBytes(content, offset.get(), dataBlobLength.get());
decryptedBlobContent.release();
}).run();
}
if (metadataContentVersion <= MessageFormatRecord.Metadata_Content_Version_V2 && key != lastKey) {
assertEquals("all chunks except last should be fully filled", chunkSize, dataBlobLength.get());
} else if (key == lastKey) {
assertEquals("Last chunk should be of non-zero length and equal to the length of the remaining bytes", (originalPutContent.length - 1) % chunkSize + 1, dataBlobLength.get());
}
offset.addAndGet(dataBlobLength.get());
assertEquals("dataBlobStream should have no more data", -1, dataBlobStream.read());
notificationSystem.verifyNotification(key.getID(), NotificationBlobType.DataChunk, dataBlobPutRequest.getBlobProperties());
}
Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class MockReadableStreamChannel method testChunkFillerSleepWithBuildingChunk.
/**
* Test when channel is closed while chunk filler thread is not responding, PutManager would release the data chunks.
* @throws Exception
*/
@Test
public void testChunkFillerSleepWithBuildingChunk() throws Exception {
VerifiableProperties vProps = getRouterConfigInVerifiableProperties();
MockNetworkClient networkClient = new MockNetworkClientFactory(vProps, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime).getMockNetworkClient();
PutManager manager = new PutManager(mockClusterMap, new ResponseHandler(mockClusterMap), notificationSystem, new RouterConfig(vProps), new NonBlockingRouterMetrics(mockClusterMap, null), new RouterCallback(networkClient, null), "0", kms, cryptoService, cryptoJobHandler, accountService, mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
BlobProperties blobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null);
byte[] userMetadata = new byte[10];
byte[] content = new byte[chunkSize / 4];
random.nextBytes(content);
MockReadableStreamChannel channel = // make sure we are not sending the entire chunk
new MockReadableStreamChannel(chunkSize / 2, false);
FutureResult<String> future = new FutureResult<>();
manager.submitPutBlobOperation(blobProperties, userMetadata, channel, PutBlobOptions.DEFAULT, future, null, null);
channel.write(ByteBuffer.wrap(content));
// Sleep until
// Op has a building chunk
// Chunk Filler is sleeping
PutOperation op = manager.getPutOperations().iterator().next();
Assert.assertFalse(op.isOperationComplete());
PutOperation.PutChunk putChunk = op.putChunks.iterator().next();
Assert.assertTrue(putChunk.isBuilding());
manager.forceChunkFillerThreadToSleep();
Thread chunkFillerThread = TestUtils.getThreadByThisName("ChunkFillerThread");
Assert.assertTrue("ChunkFillerThread should have gone to WAITING state as there are no active operations", waitForThreadState(chunkFillerThread, Thread.State.WAITING));
channel.beBad();
channel.write(ByteBuffer.wrap(content));
Assert.assertTrue(op.isOperationComplete());
Assert.assertTrue(putChunk.isBuilding());
manager.poll(new ArrayList<>(), new HashSet<>());
Assert.assertTrue(putChunk.isDataReleased());
Assert.assertEquals(0, manager.getPutOperations().size());
// Make sure this static field's value stay the same
NonBlockingRouter.currentOperationsCount.incrementAndGet();
manager.close();
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class UndeleteManagerTest method setup.
@Before
public void setup() throws Exception {
blobIds.clear();
for (int i = 0; i < BLOBS_COUNT; i++) {
ReadableStreamChannel putChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(PUT_CONTENT));
BlobProperties putBlobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null);
String blobId = router.putBlob(putBlobProperties, new byte[0], putChannel, new PutBlobOptionsBuilder().build()).get(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
blobIds.add(blobId);
// Make sure all the mock servers have this put
BlobId id = new BlobId(blobId, clusterMap);
for (MockServer server : serverLayout.getMockServers()) {
if (!server.getBlobs().containsKey(blobId)) {
server.send(new PutRequest(NonBlockingRouter.correlationIdGenerator.incrementAndGet(), routerConfig.routerHostname, id, putBlobProperties, ByteBuffer.wrap(new byte[0]), Unpooled.wrappedBuffer(PUT_CONTENT), PUT_CONTENT.length, BlobType.DataBlob, null)).release();
}
}
}
undeleteManager = new UndeleteManager(clusterMap, new ResponseHandler(clusterMap), new LoggingNotificationSystem(), accountService, routerConfig, metrics, time);
networkClient = networkClientFactory.getNetworkClient();
}
Aggregations