use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.
the class NonBlockingRouterTest method testResponseHandling.
/**
* Response handling related tests for all operation managers.
*/
@Test
public void testResponseHandling() throws Exception {
try {
Properties props = getNonBlockingRouterProperties("DC1");
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
setOperationParams();
final List<ReplicaId> failedReplicaIds = new ArrayList<>();
final AtomicInteger successfulResponseCount = new AtomicInteger(0);
final AtomicBoolean invalidResponse = new AtomicBoolean(false);
ResponseHandler mockResponseHandler = new ResponseHandler(mockClusterMap) {
@Override
public void onEvent(ReplicaId replicaId, Object e) {
if (e instanceof ServerErrorCode) {
if (e == ServerErrorCode.No_Error) {
successfulResponseCount.incrementAndGet();
} else {
invalidResponse.set(true);
}
} else {
failedReplicaIds.add(replicaId);
}
}
};
// Instantiate a router just to put a blob successfully.
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
setRouter(props, mockServerLayout, new LoggingNotificationSystem());
setOperationParams();
// More extensive test for puts present elsewhere - these statements are here just to exercise the flow within the
// NonBlockingRouter class, and to ensure that operations submitted to a router eventually completes.
String blobIdStr = router.putBlob(putBlobProperties, putUserMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
BlobId blobId = RouterUtils.getBlobIdFromString(blobIdStr, mockClusterMap);
router.close();
for (MockServer mockServer : mockServerLayout.getMockServers()) {
mockServer.setServerErrorForAllRequests(ServerErrorCode.No_Error);
}
SocketNetworkClient networkClient = new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime).getNetworkClient();
cryptoJobHandler = new CryptoJobHandler(CryptoJobHandlerTest.DEFAULT_THREAD_COUNT);
KeyManagementService localKMS = new MockKeyManagementService(new KMSConfig(verifiableProperties), singleKeyForKMS);
putManager = new PutManager(mockClusterMap, mockResponseHandler, new LoggingNotificationSystem(), new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap, null), new RouterCallback(networkClient, new ArrayList<>()), "0", localKMS, cryptoService, cryptoJobHandler, accountService, mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
OperationHelper opHelper = new OperationHelper(OperationType.PUT);
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, null, successfulResponseCount, invalidResponse, -1);
// Test that if a failed response comes before the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, null, successfulResponseCount, invalidResponse, 0);
// Test that if a failed response comes after the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, null, successfulResponseCount, invalidResponse, PUT_REQUEST_PARALLELISM - 1);
testNoResponseNoNotification(opHelper, failedReplicaIds, null, successfulResponseCount, invalidResponse);
testResponseDeserializationError(opHelper, networkClient, null);
opHelper = new OperationHelper(OperationType.GET);
getManager = new GetManager(mockClusterMap, mockResponseHandler, new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap, null), new RouterCallback(networkClient, new ArrayList<BackgroundDeleteRequest>()), localKMS, cryptoService, cryptoJobHandler, mockTime);
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, -1);
// Test that if a failed response comes before the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, 0);
// Test that if a failed response comes after the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, GET_REQUEST_PARALLELISM - 1);
testNoResponseNoNotification(opHelper, failedReplicaIds, blobId, successfulResponseCount, invalidResponse);
testResponseDeserializationError(opHelper, networkClient, blobId);
opHelper = new OperationHelper(OperationType.DELETE);
deleteManager = new DeleteManager(mockClusterMap, mockResponseHandler, accountService, new LoggingNotificationSystem(), new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap, null), new RouterCallback(null, new ArrayList<BackgroundDeleteRequest>()), mockTime);
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, -1);
// Test that if a failed response comes before the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, 0);
// Test that if a failed response comes after the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, DELETE_REQUEST_PARALLELISM - 1);
testNoResponseNoNotification(opHelper, failedReplicaIds, blobId, successfulResponseCount, invalidResponse);
testResponseDeserializationError(opHelper, networkClient, blobId);
} finally {
if (putManager != null) {
putManager.close();
}
if (getManager != null) {
getManager.close();
}
if (deleteManager != null) {
deleteManager.close();
}
}
}
use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.
the class MockRouterCallback method testInstantiation.
/**
* Test {@link GetBlobInfoOperation} instantiation and validate the get methods.
*/
@Test
public void testInstantiation() {
BlobId blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(random), Utils.getRandomShort(random), mockClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), false, BlobId.BlobDataType.DATACHUNK);
Callback<GetBlobResultInternal> getOperationCallback = (result, exception) -> {
// no op.
};
// test a good case
GetBlobInfoOperation op = new GetBlobInfoOperation(routerConfig, routerMetrics, mockClusterMap, responseHandler, blobId, options, getOperationCallback, routerCallback, kms, cryptoService, cryptoJobHandler, time, false, quotaChargeCallback);
Assert.assertEquals("Callback must match", getOperationCallback, op.getCallback());
Assert.assertEquals("Blob ids must match", blobId.getID(), op.getBlobIdStr());
// test the case where the tracker type is bad
Properties properties = getNonBlockingRouterProperties(true);
properties.setProperty("router.get.operation.tracker.type", "NonExistentTracker");
RouterConfig badConfig = new RouterConfig(new VerifiableProperties(properties));
try {
new GetBlobInfoOperation(badConfig, routerMetrics, mockClusterMap, responseHandler, blobId, options, getOperationCallback, routerCallback, kms, cryptoService, cryptoJobHandler, time, false, quotaChargeCallback);
Assert.fail("Instantiation of GetBlobInfoOperation with an invalid tracker type must fail");
} catch (IllegalArgumentException e) {
// expected. Nothing to do.
}
}
use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.
the class ReplicationManager method assignReplicasToThreadPool.
/**
* Partitions the list of data nodes between given set of replica threads for the given DC
*/
private void assignReplicasToThreadPool() {
for (Map.Entry<String, DataNodeRemoteReplicaInfos> mapEntry : dataNodeRemoteReplicaInfosPerDC.entrySet()) {
String datacenter = mapEntry.getKey();
DataNodeRemoteReplicaInfos dataNodeRemoteReplicaInfos = mapEntry.getValue();
Set<DataNodeId> dataNodesToReplicate = dataNodeRemoteReplicaInfos.getDataNodeIds();
int dataNodesCount = dataNodesToReplicate.size();
int replicaThreadCount = numberOfReplicaThreads.get(datacenter);
if (replicaThreadCount <= 0) {
logger.warn("Number of replica threads is smaller or equal to 0, not starting any replica threads for {} ", datacenter);
continue;
} else if (dataNodesCount == 0) {
logger.warn("Number of nodes to replicate from is 0, not starting any replica threads for {} ", datacenter);
continue;
}
// Divide the nodes between the replica threads if the number of replica threads is less than or equal to the
// number of nodes. Otherwise, assign one thread to one node.
logger.info("Number of replica threads to replicate from {}: {}", datacenter, replicaThreadCount);
logger.info("Number of dataNodes to replicate :", dataNodesCount);
if (dataNodesCount < replicaThreadCount) {
logger.warn("Number of replica threads: {} is more than the number of nodes to replicate from: {}", replicaThreadCount, dataNodesCount);
replicaThreadCount = dataNodesCount;
}
ResponseHandler responseHandler = new ResponseHandler(clusterMap);
int numberOfNodesPerThread = dataNodesCount / replicaThreadCount;
int remainingNodes = dataNodesCount % replicaThreadCount;
Iterator<DataNodeId> dataNodeIdIterator = dataNodesToReplicate.iterator();
for (int i = 0; i < replicaThreadCount; i++) {
// create the list of nodes for the replica thread
Map<DataNodeId, List<RemoteReplicaInfo>> replicasForThread = new HashMap<DataNodeId, List<RemoteReplicaInfo>>();
int nodesAssignedToThread = 0;
while (nodesAssignedToThread < numberOfNodesPerThread) {
DataNodeId dataNodeToReplicate = dataNodeIdIterator.next();
replicasForThread.put(dataNodeToReplicate, dataNodeRemoteReplicaInfos.getRemoteReplicaListForDataNode(dataNodeToReplicate));
dataNodeIdIterator.remove();
nodesAssignedToThread++;
}
if (remainingNodes > 0) {
DataNodeId dataNodeToReplicate = dataNodeIdIterator.next();
replicasForThread.put(dataNodeToReplicate, dataNodeRemoteReplicaInfos.getRemoteReplicaListForDataNode(dataNodeToReplicate));
dataNodeIdIterator.remove();
remainingNodes--;
}
boolean replicatingOverSsl = sslEnabledDatacenters.contains(datacenter);
String threadIdentity = "Replica Thread-" + (dataNodeId.getDatacenterName().equals(datacenter) ? "Intra-" : "Inter") + i + datacenter;
ReplicaThread replicaThread = new ReplicaThread(threadIdentity, replicasForThread, factory, clusterMap, correlationIdGenerator, dataNodeId, connectionPool, replicationConfig, replicationMetrics, notification, storeKeyFactory, replicationConfig.replicationValidateMessageStream, metricRegistry, replicatingOverSsl, datacenter, responseHandler);
if (replicaThreadPools.containsKey(datacenter)) {
replicaThreadPools.get(datacenter).add(replicaThread);
} else {
replicaThreadPools.put(datacenter, new ArrayList<>(Arrays.asList(replicaThread)));
}
}
}
}
use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.
the class CloudBlobStoreTest method testPutWithTtl.
/**
* Test PUT(with TTL) and TtlUpdate record replication.
* Replication may happen after PUT and after TtlUpdate, or after TtlUpdate only.
* PUT may already expired, expiration time < upload threshold or expiration time >= upload threshold.
* @throws Exception
*/
@Test
public void testPutWithTtl() throws Exception {
// Set up remote host
MockClusterMap clusterMap = new MockClusterMap();
MockHost remoteHost = getLocalAndRemoteHosts(clusterMap).getSecond();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
PartitionId partitionId = partitionIds.get(0);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<DataNodeId, MockHost> hosts = new HashMap<>();
hosts.put(remoteHost.dataNodeId, remoteHost);
MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
// Generate BlobIds for following PUT.
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
List<BlobId> blobIdList = new ArrayList<>();
for (int i = 0; i < 6; i++) {
blobIdList.add(new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK));
}
// Set up VCR
Properties props = new Properties();
setBasicProperties(props);
props.setProperty("clustermap.port", "12300");
props.setProperty("vcr.ssl.port", "12345");
ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
CloudDataNode cloudDataNode = new CloudDataNode(cloudConfig, clusterMapConfig);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIdList, clusterMap);
CloudReplica cloudReplica = new CloudReplica(partitionId, cloudDataNode);
CloudBlobStore cloudBlobStore = new CloudBlobStore(new VerifiableProperties(props), partitionId, latchBasedInMemoryCloudDestination, clusterMap, new VcrMetrics(new MetricRegistry()));
cloudBlobStore.start();
// Create ReplicaThread and add RemoteReplicaInfo to it.
ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), Collections.emptyList());
ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), cloudDataNode, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, cloudDataNode.getDatacenterName(), new ResponseHandler(clusterMap), new MockTime(), null, null, null);
for (ReplicaId replica : partitionId.getReplicaIds()) {
if (replica.getDataNodeId() == remoteHost.dataNodeId) {
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(replica, cloudReplica, cloudBlobStore, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(remoteHost.dataNodeId.getPort(), PortType.PLAINTEXT));
replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
break;
}
}
long referenceTime = System.currentTimeMillis();
// Case 1: Put already expired. Replication happens after Put and after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
BlobId id = blobIdList.get(0);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
replicaThread.replicate();
assertFalse("Blob should not exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 2: Put already expired. Replication happens after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(1);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 3: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(2);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
replicaThread.replicate();
if (isVcr) {
assertFalse("Blob should not exist (vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
} else {
assertTrue("Blob should exist (not vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
}
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 4: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(3);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 5: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
// Upload to Cloud after Put and update ttl after TtlUpdate.
id = blobIdList.get(4);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
replicaThread.replicate();
assertTrue(latchBasedInMemoryCloudDestination.doesBlobExist(id));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 6: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
// Upload to Cloud after TtlUpdate.
id = blobIdList.get(5);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Verify expiration time of all blobs.
Map<String, CloudBlobMetadata> map = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIdList);
for (BlobId blobId : blobIdList) {
assertEquals("Blob ttl should be infinite now.", Utils.Infinite_Time, map.get(blobId.toString()).getExpirationTime());
}
}
use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.
the class TestUtils method mockServerEventsAndVerify.
/**
* The helper method sets up initial states for datanode, disk and replica. Then it triggers specified server event and
* verifies the states of datanode, disk and replica are expected after event.
* @param clusterManager the {@link ClusterMap} to use.
* @param clusterMapConfig the {@link ClusterMapConfig} to use.
* @param initialStates the initial states for datanode, disk and replica (default order).
* @param serverErrorCode the {@link ServerErrorCode} received for mocking event.
* @param expectedStates the expected states for datanode, disk and replica (default order).
*/
static void mockServerEventsAndVerify(ClusterMap clusterManager, ClusterMapConfig clusterMapConfig, ResourceState[] initialStates, ServerErrorCode serverErrorCode, ResourceState[] expectedStates) {
ResponseHandler handler = new ResponseHandler(clusterManager);
// choose a disk backed replica
ReplicaId replica = clusterManager.getWritablePartitionIds(null).get(0).getReplicaIds().stream().filter(replicaId -> replicaId.getReplicaType() != ReplicaType.CLOUD_BACKED).findFirst().get();
DataNodeId dataNode = replica.getDataNodeId();
assertTrue(clusterManager.getReplicaIds(dataNode).contains(replica));
DiskId disk = replica.getDiskId();
// Verify that everything is up in the beginning.
assertFalse(replica.isDown());
assertEquals(HardwareState.AVAILABLE, dataNode.getState());
assertEquals(HardwareState.AVAILABLE, disk.getState());
// Mock initial states for node, disk and replica
if (initialStates[0] == ResourceState.Node_Down) {
for (int i = 0; i < clusterMapConfig.clusterMapFixedTimeoutDatanodeErrorThreshold; i++) {
clusterManager.onReplicaEvent(replica, ReplicaEventType.Node_Timeout);
}
}
if (initialStates[1] == ResourceState.Disk_Down) {
for (int i = 0; i < clusterMapConfig.clusterMapFixedTimeoutDiskErrorThreshold; i++) {
clusterManager.onReplicaEvent(replica, ReplicaEventType.Disk_Error);
}
}
if (initialStates[2] == ResourceState.Replica_Down) {
for (int i = 0; i < clusterMapConfig.clusterMapFixedTimeoutReplicaErrorThreshold; i++) {
clusterManager.onReplicaEvent(replica, ReplicaEventType.Replica_Unavailable);
}
}
// Make sure node, disk and replica match specified initial states
if (dataNode.getState() == HardwareState.AVAILABLE && disk.getState() == HardwareState.AVAILABLE) {
// Since replica.isDown() will check the state of disk, if we try to mock disk is down and replica is up, we should
// skip this check for initial state. Only when node and disk are up, we check the initial state of replica.
assertEquals(initialStates[2], replica.isDown() ? ResourceState.Replica_Down : ResourceState.Replica_Up);
}
if (dataNode.getState() == HardwareState.AVAILABLE) {
assertEquals(initialStates[1], disk.getState() == HardwareState.UNAVAILABLE ? ResourceState.Disk_Down : ResourceState.Disk_Up);
}
assertEquals(initialStates[0], dataNode.getState() == HardwareState.UNAVAILABLE ? ResourceState.Node_Down : ResourceState.Node_Up);
// Trigger server event
handler.onEvent(replica, serverErrorCode);
// Verify node, disk and replica match expected states after server event
assertEquals(expectedStates[2], replica.isDown() ? ResourceState.Replica_Down : ResourceState.Replica_Up);
assertEquals(expectedStates[1], disk.getState() == HardwareState.UNAVAILABLE ? ResourceState.Disk_Down : ResourceState.Disk_Up);
assertEquals(expectedStates[0], dataNode.getState() == HardwareState.UNAVAILABLE ? ResourceState.Node_Down : ResourceState.Node_Up);
}
Aggregations