use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class OperationTrackerTest method operationClassTest.
/**
* Test that operation tracker can correctly populate parameters(i.e. successTarget) based on input {@link RouterOperation}.
*/
@Test
public void operationClassTest() {
Properties props = new Properties();
props.setProperty("router.hostname", "localhost");
props.setProperty("router.datacenter.name", "dc-0");
props.setProperty("router.get.success.target", "1");
props.setProperty("router.put.success.target", "2");
props.setProperty("router.delete.success.target", "2");
props.setProperty("router.ttl.update.success.target", "2");
RouterConfig routerConfig = new RouterConfig(new VerifiableProperties(props));
initialize();
NonBlockingRouterMetrics routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig);
Map<RouterOperation, Integer> operationAndSuccessTarget = new HashMap<>();
operationAndSuccessTarget.put(RouterOperation.GetBlobOperation, 1);
operationAndSuccessTarget.put(RouterOperation.GetBlobInfoOperation, 1);
operationAndSuccessTarget.put(RouterOperation.PutOperation, 2);
operationAndSuccessTarget.put(RouterOperation.DeleteOperation, 2);
operationAndSuccessTarget.put(RouterOperation.TtlUpdateOperation, 2);
for (Map.Entry<RouterOperation, Integer> entry : operationAndSuccessTarget.entrySet()) {
SimpleOperationTracker operationTracker = null;
switch(operationTrackerType) {
case SIMPLE_OP_TRACKER:
operationTracker = new SimpleOperationTracker(routerConfig, entry.getKey(), mockPartition, originatingDcName, true, routerMetrics);
break;
case ADAPTIVE_OP_TRACKER:
try {
operationTracker = new AdaptiveOperationTracker(routerConfig, routerMetrics, entry.getKey(), mockPartition, originatingDcName, time);
} catch (IllegalArgumentException e) {
assertTrue("Get operation shouldn't throw any exception in adaptive tracker", entry.getKey() != RouterOperation.GetBlobOperation && entry.getKey() != RouterOperation.GetBlobInfoOperation);
}
break;
}
// ensure the success target matches the number specified for each type of operaiton
if (operationTracker != null) {
assertEquals("The suggest target doesn't match", (long) entry.getValue(), operationTracker.getSuccessTarget(ReplicaType.DISK_BACKED));
}
}
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class ChunkFillTest method testChunkNumAndSizeCalculations.
/**
* Test the calculation of number of chunks and the size of each chunk, using a very large blob size. No content
* comparison is done. This test does not consume memory more than chunkSize.
*/
@Test
public void testChunkNumAndSizeCalculations() throws Exception {
chunkSize = 4 * 1024 * 1024;
// a large blob greater than Integer.MAX_VALUE and not at chunk size boundary.
final long blobSize = ((long) Integer.MAX_VALUE / chunkSize + 1) * chunkSize + random.nextInt(chunkSize - 1) + 1;
VerifiableProperties vProps = getNonBlockingRouterProperties();
MockClusterMap mockClusterMap = new MockClusterMap();
RouterConfig routerConfig = new RouterConfig(vProps);
NonBlockingRouterMetrics routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig);
short accountId = Utils.getRandomShort(random);
short containerId = Utils.getRandomShort(random);
BlobProperties putBlobProperties = new BlobProperties(blobSize, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, accountId, containerId, false, null, null, null);
Random random = new Random();
byte[] putUserMetadata = new byte[10];
random.nextBytes(putUserMetadata);
final MockReadableStreamChannel putChannel = new MockReadableStreamChannel(blobSize, false);
FutureResult<String> futureResult = new FutureResult<String>();
MockTime time = new MockTime();
MockNetworkClientFactory networkClientFactory = new MockNetworkClientFactory(vProps, null, 0, 0, 0, null, time);
PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), putUserMetadata, putChannel, PutBlobOptions.DEFAULT, futureResult, null, new RouterCallback(networkClientFactory.getNetworkClient(), new ArrayList<>()), null, null, null, null, new MockTime(), putBlobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
op.startOperation();
numChunks = RouterUtils.getNumChunksForBlobAndChunkSize(blobSize, chunkSize);
// largeBlobSize is not a multiple of chunkSize
int expectedNumChunks = (int) (blobSize / chunkSize + 1);
Assert.assertEquals("numChunks should be as expected", expectedNumChunks, numChunks);
int lastChunkSize = (int) (blobSize % chunkSize);
final AtomicReference<Exception> channelException = new AtomicReference<Exception>(null);
int chunkIndex = 0;
// The write to the MockReadableStreamChannel blocks until the data is read as part fo the chunk filling,
// so create a thread that fills the MockReadableStreamChannel.
Utils.newThread(new Runnable() {
@Override
public void run() {
try {
byte[] writeBuf = new byte[chunkSize];
long written = 0;
while (written < blobSize) {
int toWrite = (int) Math.min(chunkSize, blobSize - written);
putChannel.write(ByteBuffer.wrap(writeBuf, 0, toWrite));
written += toWrite;
}
} catch (Exception e) {
channelException.set(e);
}
}
}, false).start();
// Do the chunk filling.
boolean fillingComplete = false;
do {
op.fillChunks();
// since the channel is ByteBuffer based.
for (PutOperation.PutChunk putChunk : op.putChunks) {
Assert.assertNull("Mock channel write should not have caused an exception", channelException.get());
if (putChunk.isFree()) {
continue;
}
if (chunkIndex == numChunks - 1) {
// last chunk may not be Ready as it is dependent on the completion callback to be called.
Assert.assertTrue("Chunk should be Building or Ready.", putChunk.getState() == PutOperation.ChunkState.Ready || putChunk.getState() == PutOperation.ChunkState.Building);
if (putChunk.getState() == PutOperation.ChunkState.Ready) {
Assert.assertEquals("Chunk size should be the last chunk size", lastChunkSize, putChunk.buf.readableBytes());
Assert.assertTrue("Chunk Filling should be complete at this time", op.isChunkFillingDone());
fillingComplete = true;
}
} else {
// if not last chunk, then the chunk should be full and Ready.
Assert.assertEquals("Chunk should be ready.", PutOperation.ChunkState.Ready, putChunk.getState());
Assert.assertEquals("Chunk size should be maxChunkSize", chunkSize, putChunk.buf.readableBytes());
chunkIndex++;
putChunk.clear();
}
}
} while (!fillingComplete);
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class OperationTrackerTest method downReplicasOrderingTest.
/**
* Test to ensure that replicas that are down are also returned by the operation tracker, but they are
* ordered after the healthy replicas.
*/
@Test
public void downReplicasOrderingTest() {
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Collections.singletonList("mockMountPath");
datanodes = new ArrayList<>();
datanodes.add(new MockDataNodeId(portList, mountPaths, "dc-0"));
datanodes.add(new MockDataNodeId(portList, mountPaths, "dc-1"));
mockPartition = new MockPartitionId();
mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), datanodes.get(0).getDatacenterName());
int replicaCount = 6;
populateReplicaList(replicaCount, ReplicaState.STANDBY);
// Test scenarios with various number of replicas down
for (int i = 0; i < replicaCount; i++) {
testReplicaDown(replicaCount, i);
}
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class MockReadableStreamChannel method verifyBlob.
/**
* Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
* data as the original object that was put.
* @param requestAndResult the {@link RequestAndResult} to use for verification.
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
*/
private void verifyBlob(RequestAndResult requestAndResult, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
String blobId = requestAndResult.result.result();
ByteBuffer serializedRequest = serializedRequests.get(blobId);
PutRequest request = deserializePutRequest(serializedRequest);
NotificationBlobType notificationBlobType;
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
boolean stitchOperation = requestAndResult.chunksToStitch != null;
if (stitchOperation) {
assertEquals("Stitch operations should always produce metadata blobs", BlobType.MetadataBlob, request.getBlobType());
}
if (request.getBlobType() == BlobType.MetadataBlob) {
notificationBlobType = NotificationBlobType.Composite;
assertEquals("Expected metadata", BlobDataType.METADATA, origBlobId.getBlobDataType());
byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
long expectedMaxChunkSize;
long expectedTotalSize;
int expectedNumChunks;
if (stitchOperation) {
expectedMaxChunkSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).max().orElse(0);
expectedTotalSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).sum();
expectedNumChunks = requestAndResult.chunksToStitch.size();
} else {
expectedMaxChunkSize = chunkSize;
expectedTotalSize = requestAndResult.putContent.length;
expectedNumChunks = RouterUtils.getNumChunksForBlobAndChunkSize(requestAndResult.putContent.length, chunkSize);
}
if (metadataContentVersion <= MessageFormatRecord.Metadata_Content_Version_V2) {
assertEquals("Wrong max chunk size in metadata", expectedMaxChunkSize, compositeBlobInfo.getChunkSize());
}
assertEquals("Wrong total size in metadata", expectedTotalSize, compositeBlobInfo.getTotalSize());
assertEquals("Number of chunks is not as expected", expectedNumChunks, dataBlobIds.size());
// Verify all dataBlobIds are DataChunk
for (StoreKey key : dataBlobIds) {
BlobId origDataBlobId = (BlobId) key;
assertEquals("Expected datachunk", BlobDataType.DATACHUNK, origDataBlobId.getBlobDataType());
}
// verify user-metadata
if (requestAndResult.putBlobProperties.isEncrypted()) {
ByteBuffer userMetadata = request.getUsermetadata();
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
}).run();
} else {
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, request.getUsermetadata().array());
}
if (!stitchOperation) {
verifyCompositeBlob(requestAndResult.putBlobProperties, requestAndResult.putContent, requestAndResult.putUserMetadata, dataBlobIds, request, serializedRequests);
}
} else {
notificationBlobType = requestAndResult.options.isChunkUpload() ? NotificationBlobType.DataChunk : NotificationBlobType.Simple;
// TODO: Currently, we don't have the logic to distinguish Simple vs DataChunk for the first chunk
// Once the logic is fixed we should assert Simple.
BlobDataType dataType = origBlobId.getBlobDataType();
assertTrue("Invalid blob data type", dataType == BlobDataType.DATACHUNK || dataType == BlobDataType.SIMPLE);
byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
if (!requestAndResult.putBlobProperties.isEncrypted()) {
assertArrayEquals("Input blob and written blob should be the same", requestAndResult.putContent, content);
assertArrayEquals("UserMetadata mismatch for simple blob", requestAndResult.putUserMetadata, request.getUsermetadata().array());
} else {
ByteBuffer userMetadata = request.getUsermetadata();
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), Unpooled.wrappedBuffer(content), userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
ByteBuf decryptedBlobContent = result.getDecryptedBlobContent();
byte[] blobContent = new byte[decryptedBlobContent.readableBytes()];
decryptedBlobContent.readBytes(blobContent);
assertArrayEquals("Content mismatch", requestAndResult.putContent, blobContent);
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
decryptedBlobContent.release();
}).run();
}
}
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class Http2NetworkClientTest method putGetTest.
@Test
public void putGetTest() throws Exception {
MockClusterMap clusterMap = http2Cluster.getClusterMap();
DataNodeId dataNodeId = http2Cluster.getGeneralDataNode();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
SSLFactory sslFactory = new NettySslHttp2Factory(clientSSLConfig);
Http2NetworkClient networkClient = new Http2NetworkClient(new Http2ClientMetrics(new MetricRegistry()), new Http2ClientConfig(new VerifiableProperties(new Properties())), sslFactory, eventLoopGroup);
// Put a blob
int blobSize = 1024 * 1024;
byte[] usermetadata = new byte[1000];
byte[] data = new byte[blobSize];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(blobSize, "serviceid1", accountId, containerId, false);
TestUtils.RANDOM.nextBytes(usermetadata);
TestUtils.RANDOM.nextBytes(data);
List<? extends PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
// put blob 1
PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
RequestInfo request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), putRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
List<ResponseInfo> responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
long startTime = SystemTime.getInstance().milliseconds();
while (responseInfos.size() == 0) {
responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
if (SystemTime.getInstance().milliseconds() - startTime >= 6000) {
fail("Network Client no reponse and timeout.");
}
Thread.sleep(30);
}
assertEquals("Should be only one response", 1, responseInfos.size());
DataInputStream dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
PutResponse putResponse = PutResponse.readFrom(dis);
assertEquals("No error expected.", ServerErrorCode.No_Error, putResponse.getError());
// Get the blob
// get blob properties
ArrayList<BlobId> ids = new ArrayList<BlobId>();
MockPartitionId partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobId1);
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "http2-clientid", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), getRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
startTime = SystemTime.getInstance().milliseconds();
while (responseInfos.size() == 0) {
responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
if (SystemTime.getInstance().milliseconds() - startTime >= 3000) {
fail("Network Client no response and timeout.");
}
Thread.sleep(30);
}
assertEquals("Should be only one response", 1, responseInfos.size());
dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
GetResponse resp = GetResponse.readFrom(dis, clusterMap);
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
// verify BlobProperties
BlobProperties propertyOutput = blobAll.getBlobInfo().getBlobProperties();
assertEquals(blobSize, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
// verify UserMetadata
byte[] userMetadataOutput = blobAll.getBlobInfo().getUserMetadata();
assertArrayEquals(usermetadata, userMetadataOutput);
// verify content
byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch.", data, actualBlobData);
}
Aggregations