use of com.github.ambry.utils.MockTime in project ambry by linkedin.
the class ReplicationTest method addAndRemoveReplicaTest.
/**
* Test dynamically add/remove replica in {@link ReplicationManager}
* @throws Exception
*/
@Test
public void addAndRemoveReplicaTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
DataNodeId dataNodeId = clusterMap.getDataNodeIds().get(0);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, dataNodeId, null, null, new MockTime(), null, new InMemAccountService(false, false));
storageManager.start();
MockReplicationManager replicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, dataNodeId, storeKeyConverterFactory, null);
ReplicaId replicaToTest = clusterMap.getReplicaIds(dataNodeId).get(0);
// Attempting to add replica that already exists should fail
assertFalse("Adding an existing replica should fail", replicationManager.addReplica(replicaToTest));
// Create a brand new replica that sits on one of the disk of datanode, add it into replication manager
PartitionId newPartition = clusterMap.createNewPartition(clusterMap.getDataNodes());
for (ReplicaId replicaId : newPartition.getReplicaIds()) {
if (replicaId.getDataNodeId() == dataNodeId) {
replicaToTest = replicaId;
break;
}
}
// Before adding replica, partitionToPartitionInfo and mountPathToPartitionInfos should not contain new partition
assertFalse("partitionToPartitionInfo should not contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
for (PartitionInfo partitionInfo : replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath())) {
assertNotSame("mountPathToPartitionInfos should not contain new partition", partitionInfo.getPartitionId(), newPartition);
}
// Add new replica to replication manager
assertTrue("Adding new replica to replication manager should succeed", replicationManager.addReplica(replicaToTest));
// After adding replica, partitionToPartitionInfo and mountPathToPartitionInfos should contain new partition
assertTrue("partitionToPartitionInfo should contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
Optional<PartitionInfo> newPartitionInfo = replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath()).stream().filter(partitionInfo -> partitionInfo.getPartitionId() == newPartition).findAny();
assertTrue("mountPathToPartitionInfos should contain new partition info", newPartitionInfo.isPresent());
// Verify that all remoteReplicaInfos of new added replica have assigned thread
for (RemoteReplicaInfo remoteReplicaInfo : newPartitionInfo.get().getRemoteReplicaInfos()) {
assertNotNull("The remote replica should be assigned to one replica thread", remoteReplicaInfo.getReplicaThread());
}
// Remove replica
assertTrue("Remove replica from replication manager should succeed", replicationManager.removeReplica(replicaToTest));
// Verify replica is removed, so partitionToPartitionInfo and mountPathToPartitionInfos should not contain new partition
assertFalse("partitionToPartitionInfo should not contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
for (PartitionInfo partitionInfo : replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath())) {
assertNotSame("mountPathToPartitionInfos should not contain new partition", partitionInfo.getPartitionId(), newPartition);
}
// Verify that none of remoteReplicaInfo should have assigned thread
for (RemoteReplicaInfo remoteReplicaInfo : newPartitionInfo.get().getRemoteReplicaInfos()) {
assertNull("The remote replica should be assigned to one replica thread", remoteReplicaInfo.getReplicaThread());
}
// Remove the same replica that doesn't exist should be no-op
ReplicationManager mockManager = Mockito.spy(replicationManager);
assertFalse("Remove non-existent replica should return false", replicationManager.removeReplica(replicaToTest));
verify(mockManager, never()).removeRemoteReplicaInfoFromReplicaThread(anyList());
storageManager.shutdown();
}
use of com.github.ambry.utils.MockTime in project ambry by linkedin.
the class ChunkFillTest method fillChunksAndAssertSuccess.
/**
* Create a {@link PutOperation} and pass in a channel with the blobSize set by the caller; and test the chunk
* filling flow for puts.
* Note that this test is for the chunk filling flow, not for the ChunkFiller thread (which never gets exercised,
* as we do not even instantiate the {@link PutManager})
*/
private void fillChunksAndAssertSuccess() throws Exception {
VerifiableProperties vProps = getNonBlockingRouterProperties();
MockClusterMap mockClusterMap = new MockClusterMap();
RouterConfig routerConfig = new RouterConfig(vProps);
routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig);
ResponseHandler responseHandler = new ResponseHandler(mockClusterMap);
short accountId = Utils.getRandomShort(random);
short containerId = Utils.getRandomShort(random);
BlobProperties putBlobProperties = new BlobProperties(blobSize, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, accountId, containerId, testEncryption, null, null, null);
Random random = new Random();
byte[] putUserMetadata = new byte[10];
random.nextBytes(putUserMetadata);
putContent = new byte[blobSize];
random.nextBytes(putContent);
final ReadableStreamChannel putChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(putContent));
FutureResult<String> futureResult = new FutureResult<String>();
MockTime time = new MockTime();
MockNetworkClientFactory networkClientFactory = new MockNetworkClientFactory(vProps, null, 0, 0, 0, null, time);
if (testEncryption) {
kms = new MockKeyManagementService(new KMSConfig(vProps), TestUtils.getRandomKey(SingleKeyManagementServiceTest.DEFAULT_KEY_SIZE_CHARS));
cryptoService = new MockCryptoService(new CryptoServiceConfig(vProps));
cryptoJobHandler = new CryptoJobHandler(CryptoJobHandlerTest.DEFAULT_THREAD_COUNT);
}
MockRouterCallback routerCallback = new MockRouterCallback(networkClientFactory.getNetworkClient(), Collections.EMPTY_LIST);
PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), putUserMetadata, putChannel, PutBlobOptions.DEFAULT, futureResult, null, routerCallback, null, kms, cryptoService, cryptoJobHandler, time, putBlobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
op.startOperation();
numChunks = RouterUtils.getNumChunksForBlobAndChunkSize(blobSize, chunkSize);
compositeBuffers = new ByteBuf[numChunks];
compositeEncryptionKeys = new ByteBuffer[numChunks];
compositeBlobIds = new BlobId[numChunks];
final AtomicReference<Exception> operationException = new AtomicReference<Exception>(null);
int chunksLeftToBeFilled = numChunks;
do {
if (testEncryption) {
int chunksPerBatch = Math.min(routerConfig.routerMaxInMemPutChunks, chunksLeftToBeFilled);
CountDownLatch onPollLatch = new CountDownLatch(chunksPerBatch);
routerCallback.setOnPollLatch(onPollLatch);
op.fillChunks();
Assert.assertTrue("Latch should have been zeroed out", onPollLatch.await(1000, TimeUnit.MILLISECONDS));
chunksLeftToBeFilled -= chunksPerBatch;
} else {
op.fillChunks();
}
// since the channel is ByteBuffer based.
for (PutOperation.PutChunk putChunk : op.putChunks) {
if (putChunk.isFree()) {
continue;
}
Assert.assertEquals("Chunk should be ready.", PutOperation.ChunkState.Ready, putChunk.getState());
ByteBuf buf = putChunk.buf.retainedDuplicate();
totalSizeWritten += buf.readableBytes();
compositeBuffers[putChunk.getChunkIndex()] = buf;
if (testEncryption) {
compositeEncryptionKeys[putChunk.getChunkIndex()] = putChunk.encryptedPerBlobKey.duplicate();
compositeBlobIds[putChunk.getChunkIndex()] = putChunk.chunkBlobId;
}
putChunk.clear();
}
} while (!op.isChunkFillingDone());
if (!testEncryption) {
Assert.assertEquals("total size written out should match the blob size", blobSize, totalSizeWritten);
}
// for encrypted path, size will be implicitly tested via assertDataIdentity
Exception exception = operationException.get();
if (exception != null) {
throw exception;
}
assertDataIdentity(mockClusterMap);
}
use of com.github.ambry.utils.MockTime in project ambry by linkedin.
the class NonBlockingRouterTest method testCompositeBlobDataChunksDeleteMaxDeleteOperation.
protected void testCompositeBlobDataChunksDeleteMaxDeleteOperation(int maxDeleteOperation) throws Exception {
try {
// Ensure there are 4 chunks.
maxPutChunkSize = PUT_CONTENT_SIZE / 4;
Properties props = getNonBlockingRouterProperties("DC1");
if (maxDeleteOperation != 0) {
props.setProperty(RouterConfig.ROUTER_BACKGROUND_DELETER_MAX_CONCURRENT_OPERATIONS, Integer.toString(maxDeleteOperation));
}
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
RouterConfig routerConfig = new RouterConfig(verifiableProperties);
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
// metadata blob + data chunks.
final AtomicReference<CountDownLatch> deletesDoneLatch = new AtomicReference<>();
final Map<String, String> blobsThatAreDeleted = new HashMap<>();
LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {
@Override
public void onBlobDeleted(String blobId, String serviceId, Account account, Container container) {
blobsThatAreDeleted.put(blobId, serviceId);
deletesDoneLatch.get().countDown();
}
};
NonBlockingRouterMetrics localMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig);
router = new NonBlockingRouter(routerConfig, localMetrics, new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap, kms, cryptoService, cryptoJobHandler, accountService, mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
setOperationParams();
String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
String deleteServiceId = "delete-service";
Set<String> blobsToBeDeleted = getBlobsInServers(mockServerLayout);
int getRequestCount = mockServerLayout.getCount(RequestOrResponseType.GetRequest);
// The third iteration is to test the case where the blob has expired.
for (int i = 0; i < 3; i++) {
if (i == 2) {
// Create a clean cluster and put another blob that immediate expires.
setOperationParams();
putBlobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, 0, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null);
blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
Set<String> allBlobsInServer = getBlobsInServers(mockServerLayout);
allBlobsInServer.removeAll(blobsToBeDeleted);
blobsToBeDeleted = allBlobsInServer;
}
blobsThatAreDeleted.clear();
deletesDoneLatch.set(new CountDownLatch(5));
router.deleteBlob(blobId, deleteServiceId).get();
Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS, deletesDoneLatch.get().await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
Assert.assertTrue("All blobs in server are deleted", blobsThatAreDeleted.keySet().containsAll(blobsToBeDeleted));
Assert.assertTrue("Only blobs in server are deleted", blobsToBeDeleted.containsAll(blobsThatAreDeleted.keySet()));
for (Map.Entry<String, String> blobIdAndServiceId : blobsThatAreDeleted.entrySet()) {
String expectedServiceId = blobIdAndServiceId.getKey().equals(blobId) ? deleteServiceId : BackgroundDeleteRequest.SERVICE_ID_PREFIX + deleteServiceId;
Assert.assertEquals("Unexpected service ID for deleted blob", expectedServiceId, blobIdAndServiceId.getValue());
}
// For 1 chunk deletion attempt, 1 background operation for Get is initiated which results in 2 Get Requests at
// the servers.
getRequestCount += 2;
Assert.assertEquals("Only one attempt of chunk deletion should have been done", getRequestCount, mockServerLayout.getCount(RequestOrResponseType.GetRequest));
}
deletesDoneLatch.set(new CountDownLatch(5));
router.deleteBlob(blobId, null).get();
Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS, deletesDoneLatch.get().await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
Assert.assertEquals("Get should NOT have been skipped", 0, localMetrics.skippedGetBlobCount.getCount());
} finally {
if (router != null) {
router.close();
assertClosed();
Assert.assertEquals("All operations should have completed", 0, router.getOperationsCount());
}
}
}
use of com.github.ambry.utils.MockTime in project ambry by linkedin.
the class NonBlockingRouterTest method testUnsuccessfulPutDataChunkDelete.
/**
* Test that if a composite blob put fails, the successfully put data chunks are deleted.
*/
@Test
public void testUnsuccessfulPutDataChunkDelete() throws Exception {
try {
// Ensure there are 4 chunks.
maxPutChunkSize = PUT_CONTENT_SIZE / 4;
Properties props = getNonBlockingRouterProperties("DC1");
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
RouterConfig routerConfig = new RouterConfig(verifiableProperties);
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
// Since this test wants to ensure that successfully put data chunks are deleted when the overall put operation
// fails, it uses a notification system to track the deletions.
final CountDownLatch deletesDoneLatch = new CountDownLatch(2);
final Map<String, String> blobsThatAreDeleted = new HashMap<>();
LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {
@Override
public void onBlobDeleted(String blobId, String serviceId, Account account, Container container) {
blobsThatAreDeleted.put(blobId, serviceId);
deletesDoneLatch.countDown();
}
};
router = new NonBlockingRouter(routerConfig, new NonBlockingRouterMetrics(mockClusterMap, routerConfig), new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap, kms, cryptoService, cryptoJobHandler, accountService, mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
setOperationParams();
List<DataNodeId> dataNodeIds = mockClusterMap.getDataNodeIds();
List<ServerErrorCode> serverErrorList = new ArrayList<>();
// There are 4 chunks for this blob.
// All put operations make one request to each local server as there are 3 servers overall in the local DC.
// Set the state of the mock servers so that they return success for the first 2 requests in order to succeed
// the first two chunks.
serverErrorList.add(ServerErrorCode.No_Error);
serverErrorList.add(ServerErrorCode.No_Error);
// fail requests for third and fourth data chunks including the slipped put attempts:
serverErrorList.add(ServerErrorCode.Unknown_Error);
serverErrorList.add(ServerErrorCode.Unknown_Error);
serverErrorList.add(ServerErrorCode.Unknown_Error);
serverErrorList.add(ServerErrorCode.Unknown_Error);
// all subsequent requests (no more puts, but there will be deletes) will succeed.
for (DataNodeId dataNodeId : dataNodeIds) {
MockServer server = mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort());
server.setServerErrors(serverErrorList);
}
// Submit the put operation and wait for it to fail.
try {
router.putBlob(putBlobProperties, putUserMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
} catch (ExecutionException e) {
Assert.assertEquals(RouterErrorCode.AmbryUnavailable, ((RouterException) e.getCause()).getErrorCode());
}
// Now, wait until the deletes of the successfully put blobs are complete.
Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS, deletesDoneLatch.await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
for (Map.Entry<String, String> blobIdAndServiceId : blobsThatAreDeleted.entrySet()) {
Assert.assertEquals("Unexpected service ID for deleted blob", BackgroundDeleteRequest.SERVICE_ID_PREFIX + putBlobProperties.getServiceId(), blobIdAndServiceId.getValue());
}
} finally {
if (router != null) {
router.close();
assertClosed();
Assert.assertEquals("All operations should have completed", 0, router.getOperationsCount());
}
}
}
use of com.github.ambry.utils.MockTime in project ambry by linkedin.
the class DeleteManagerTest method init.
/**
* Initializes ClusterMap, Router, mock servers, and an {@code BlobId} to be deleted.
*/
@Before
public void init() throws Exception {
VerifiableProperties vProps = new VerifiableProperties(getNonBlockingRouterProperties());
mockTime = new MockTime();
mockSelectorState = new AtomicReference<MockSelectorState>(MockSelectorState.Good);
clusterMap = new MockClusterMap();
serverLayout = new MockServerLayout(clusterMap);
RouterConfig routerConfig = new RouterConfig(vProps);
router = new NonBlockingRouter(routerConfig, new NonBlockingRouterMetrics(clusterMap, routerConfig), new MockNetworkClientFactory(vProps, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, serverLayout, mockTime), new LoggingNotificationSystem(), clusterMap, null, null, null, new InMemAccountService(false, true), mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
List<PartitionId> mockPartitions = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
partition = mockPartitions.get(ThreadLocalRandom.current().nextInt(mockPartitions.size()));
blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), partition, false, BlobId.BlobDataType.DATACHUNK);
blobIdString = blobId.getID();
}
Aggregations