use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class LeaderBasedReplicationTest method replicaThreadLeaderBasedReplicationForPUTMessagesTest.
/**
* Test leader based replication to ensure token is advanced correctly for standby replicas when missing PUT messages
* are fetched via intra-dc replication.
* @throws Exception
*/
@Test
public void replicaThreadLeaderBasedReplicationForPUTMessagesTest() throws Exception {
Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, mockHelixParticipant);
StorageManager storageManager = managers.getFirst();
MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
/*
Scenario:
we have 3 nodes that have replicas belonging to same partitions:
a) local node
b) remote node in local DC
c) remote node in remote DC
Each node have few of its partitions as leaders and others are standby. They are randomly assigned during creation
of replicas for mock partitions.
We have 4 PUT messages in each of the partitions in remote nodes of local DC and remote DC that needs to be
replicated at local node.
Steps:
1. Replicate (send metadata exchange and get messages) with remote node in remote DC (cross-colo replication).
Expectations:
a) We should see that metadata exchange is sent for all replicas while GET messages are only sent for leader replicas.
b) All the PUT messages should be replicated locally and remote token should be moved forward for leader partitions.
c) For non-leader replicas, metadata response should be stored locally.
2. Replicate (send metadata exchange and get messages) with remote node in local DC (intra-colo replication).
Expectations:
a) Metadata exchange and GET messages are sent for all replicas.
b) PUT messages should be replicated locally for all replicas.
c) Missing messages in stored metadata response of non-leader replicas for remoteNodeInRemoteDC should become empty
and remote token should be advanced.
*/
int batchSize = 4;
// set mock local stores on all remoteReplicaInfos which will used during replication.
for (PartitionId partitionId : replicationManager.partitionToPartitionInfo.keySet()) {
localHost.addStore(partitionId, null);
Store localStore = localHost.getStore(partitionId);
localStore.start();
List<RemoteReplicaInfo> remoteReplicaInfos = replicationManager.partitionToPartitionInfo.get(partitionId).getRemoteReplicaInfos();
remoteReplicaInfos.forEach(remoteReplicaInfo -> remoteReplicaInfo.setLocalStore(localStore));
}
// get remote replicas and replica thread for remote host on local datacenter
ReplicaThread intraColoReplicaThread = replicationManager.dataNodeIdToReplicaThread.get(remoteNodeInLocalDC);
List<RemoteReplicaInfo> remoteReplicaInfosForLocalDC = intraColoReplicaThread.getRemoteReplicaInfos().get(remoteNodeInLocalDC);
// get remote replicas and replica thread for remote host on remote datacenter
ReplicaThread crossColoReplicaThread = replicationManager.dataNodeIdToReplicaThread.get(remoteNodeInRemoteDC);
List<RemoteReplicaInfo> remoteReplicaInfosForRemoteDC = crossColoReplicaThread.getRemoteReplicaInfos().get(remoteNodeInRemoteDC);
// mock helix transition state from standby to leader for local leader partitions
List<? extends ReplicaId> replicaIds = clusterMap.getReplicaIds(replicationManager.dataNodeId);
for (ReplicaId replicaId : replicaIds) {
MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
if (mockReplicaId.getReplicaState() == ReplicaState.LEADER) {
MockPartitionId mockPartitionId = (MockPartitionId) replicaId.getPartitionId();
mockHelixParticipant.onPartitionBecomeLeaderFromStandby(mockPartitionId.toPathString());
}
}
// Add put messages to all partitions on remoteHost1 and remoteHost2
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
for (PartitionId partitionId : partitionIds) {
// add batchSize messages to the remoteHost1 and remote host 2 from which local host will replicate.
addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHostInLocalDC, remoteHostInRemoteDC), batchSize);
}
// Choose partitions that are leaders on both local and remote nodes
Set<ReplicaId> leaderReplicasOnLocalAndRemoteNodes = getRemoteLeaderReplicasWithLeaderPartitionsOnLocalNode(clusterMap, replicationManager.dataNodeId, remoteNodeInRemoteDC);
// Replicate with remoteHost2 in remote data center.
List<ReplicaThread.ExchangeMetadataResponse> responseListForRemoteNodeInRemoteDC = crossColoReplicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHostInRemoteDC, batchSize), remoteReplicaInfosForRemoteDC);
// Metadata requests should be sent to both leader and standby replicas.
assertEquals("Response should contain a response for each replica", remoteReplicaInfosForRemoteDC.size(), responseListForRemoteNodeInRemoteDC.size());
// verify that missing messages size equals to the min{batch size, number of PUT messages} placed on remote hosts
int expectedIndex = batchSize - 1;
for (ReplicaThread.ExchangeMetadataResponse exchangeMetadataResponse : responseListForRemoteNodeInRemoteDC) {
assertEquals("mismatch in number of missing messages", batchSize, exchangeMetadataResponse.missingStoreMessages.size());
}
// Filter leader replicas to fetch missing keys
List<RemoteReplicaInfo> leaderReplicas = new ArrayList<>();
List<ReplicaThread.ExchangeMetadataResponse> exchangeMetadataResponseListForLeaderReplicas = new ArrayList<>();
crossColoReplicaThread.getLeaderReplicaList(remoteReplicaInfosForRemoteDC, responseListForRemoteNodeInRemoteDC, leaderReplicas, exchangeMetadataResponseListForLeaderReplicas);
// verify that only leader replicas in remoteHost2 are chosen for fetching missing messages.
Set<ReplicaId> remoteReplicasToFetchInReplicaThread = leaderReplicas.stream().map(RemoteReplicaInfo::getReplicaId).collect(Collectors.toSet());
assertThat("mismatch in leader remote replicas to fetch missing keys", leaderReplicasOnLocalAndRemoteNodes, is(remoteReplicasToFetchInReplicaThread));
// fetch missing keys for leader replicas from remoteHost2
if (leaderReplicas.size() > 0) {
crossColoReplicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHostInRemoteDC, batchSize), leaderReplicas, exchangeMetadataResponseListForLeaderReplicas, false);
}
// missing messages are not fetched yet.
for (int i = 0; i < remoteReplicaInfosForRemoteDC.size(); i++) {
if (leaderReplicasOnLocalAndRemoteNodes.contains(remoteReplicaInfosForRemoteDC.get(i).getReplicaId())) {
assertEquals("remote token mismatch for leader replicas", remoteReplicaInfosForRemoteDC.get(i).getToken(), responseListForRemoteNodeInRemoteDC.get(i).remoteToken);
} else {
assertEquals("missing keys in metadata response should be stored for standby replicas", remoteReplicaInfosForRemoteDC.get(i).getExchangeMetadataResponse().missingStoreMessages.size(), responseListForRemoteNodeInRemoteDC.get(i).missingStoreMessages.size());
assertThat("remote token should not move forward for standby replicas until missing keys are fetched", remoteReplicaInfosForRemoteDC.get(i).getToken(), not(responseListForRemoteNodeInRemoteDC.get(i).remoteToken));
}
}
// Replication with remoteHost1 in local data center
List<ReplicaThread.ExchangeMetadataResponse> responseForRemoteNodeInLocalDC = intraColoReplicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHostInLocalDC, batchSize), remoteReplicaInfosForLocalDC);
assertEquals("Response should contain a response for each replica", remoteReplicaInfosForLocalDC.size(), responseForRemoteNodeInLocalDC.size());
// fetch missing keys from remoteHost1
intraColoReplicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHostInLocalDC, batchSize), remoteReplicaInfosForLocalDC, responseForRemoteNodeInLocalDC, false);
for (int i = 0; i < responseForRemoteNodeInLocalDC.size(); i++) {
assertEquals("mismatch in remote token set for intra colo replicas", remoteReplicaInfosForLocalDC.get(i).getToken(), (responseForRemoteNodeInLocalDC.get(i).remoteToken));
}
// process missing keys for cross colo standby replicas from previous metadata exchange
for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfosForRemoteDC) {
crossColoReplicaThread.processMissingKeysFromPreviousMetadataResponse(remoteReplicaInfo);
}
// for standbys are received via intra-dc replication.
for (int i = 0; i < responseListForRemoteNodeInRemoteDC.size(); i++) {
assertEquals("mismatch in remote token set for cross colo replicas", remoteReplicaInfosForRemoteDC.get(i).getToken(), (responseListForRemoteNodeInRemoteDC.get(i).remoteToken));
}
// verify replication metrics to track number of cross colo get requests and cross colo bytes fetch rate for standby
// replicas should be 0 since all missing blobs are obtained via local leader in intra-dc replication.
String remoteDataCenter = remoteReplicaInfosForRemoteDC.get(0).getReplicaId().getDataNodeId().getDatacenterName();
assertEquals("mismatch in number of cross colo get requests tracked for standby replicas", crossColoReplicaThread.getReplicationMetrics().interColoReplicationGetRequestCountForStandbyReplicas.get(remoteDataCenter).getCount(), 0);
assertEquals("mismatch in bytes fetch rate for cross colo get requests tracked for standby replicas", crossColoReplicaThread.getReplicationMetrics().interColoReplicationFetchBytesRateForStandbyReplicas.get(remoteDataCenter).getCount(), 0);
storageManager.shutdown();
}
use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class LeaderBasedReplicationTest method replicaThreadLeaderBasedReplicationForTTLUpdatesDeleteAndUndeleteMessagesTest.
/**
* Test leader based replication to ensure token is advanced correctly and blob properties ttl
* _update, delete, undelete are applied correctly when missing messages in standby replicas
* are fetched via intra-dc replication.
* @throws Exception
*/
@Test
public void replicaThreadLeaderBasedReplicationForTTLUpdatesDeleteAndUndeleteMessagesTest() throws Exception {
Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, mockHelixParticipant);
StorageManager storageManager = managers.getFirst();
MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
int batchSize = 10;
// set mock local stores on all remoteReplicaInfos which will used during replication.
for (PartitionId partitionId : replicationManager.partitionToPartitionInfo.keySet()) {
localHost.addStore(partitionId, null);
Store localStore = localHost.getStore(partitionId);
localStore.start();
List<RemoteReplicaInfo> remoteReplicaInfos = replicationManager.partitionToPartitionInfo.get(partitionId).getRemoteReplicaInfos();
remoteReplicaInfos.forEach(remoteReplicaInfo -> remoteReplicaInfo.setLocalStore(localStore));
}
// get remote replicas and replica thread for remote host on local datacenter
ReplicaThread intraColoReplicaThread = replicationManager.dataNodeIdToReplicaThread.get(remoteNodeInLocalDC);
List<RemoteReplicaInfo> remoteReplicaInfosForLocalDC = intraColoReplicaThread.getRemoteReplicaInfos().get(remoteNodeInLocalDC);
// get remote replicas and replica thread for remote host on remote datacenter
ReplicaThread crossColoReplicaThread = replicationManager.dataNodeIdToReplicaThread.get(remoteNodeInRemoteDC);
List<RemoteReplicaInfo> remoteReplicaInfosForRemoteDC = crossColoReplicaThread.getRemoteReplicaInfos().get(remoteNodeInRemoteDC);
// mock helix transition state from standby to leader for local leader partitions
List<? extends ReplicaId> replicaIds = clusterMap.getReplicaIds(replicationManager.dataNodeId);
for (ReplicaId replicaId : replicaIds) {
MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
if (mockReplicaId.getReplicaState() == ReplicaState.LEADER) {
MockPartitionId mockPartitionId = (MockPartitionId) replicaId.getPartitionId();
mockHelixParticipant.onPartitionBecomeLeaderFromStandby(mockPartitionId.toPathString());
}
}
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
Map<PartitionId, List<StoreKey>> idsToBeIgnoredByPartition = new HashMap<>();
Map<PartitionId, List<StoreKey>> idsToBeTtlUpdatedByPartition = new HashMap<>();
for (PartitionId id : partitionIds) {
List<StoreKey> toBeIgnored = new ArrayList<>();
List<StoreKey> toBeUndeleted = new ArrayList<>();
// Adding 4 PUT messages b0, b1, b2, b3 to remoteNodeInLocalDC and remoteNodeInRemoteDC
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(id, Arrays.asList(remoteHostInLocalDC, remoteHostInRemoteDC), 4);
// ttl update to be added to b1, b2, b3
List<StoreKey> toBeTtlUpdated = new ArrayList<>(ids);
toBeTtlUpdated.remove(ids.get(0));
// delete to be added to b2, b3
toBeIgnored.add(ids.get(2));
toBeIgnored.add(ids.get(3));
// un-delete to be added to b3
toBeUndeleted.add(ids.get(3));
toBeIgnored.remove(ids.get(3));
// Add TTLUpdate records for blobs b1,b2,b3 in remoteNodeInLocalDC and remoteNodeInRemoteDC
for (int j = 0; j < toBeTtlUpdated.size(); j++) {
addTtlUpdateMessagesToReplicasOfPartition(id, toBeTtlUpdated.get(j), Collections.singletonList(remoteHostInLocalDC), UPDATED_EXPIRY_TIME_MS);
addTtlUpdateMessagesToReplicasOfPartition(id, toBeTtlUpdated.get(toBeTtlUpdated.size() - 1 - j), Collections.singletonList(remoteHostInRemoteDC), UPDATED_EXPIRY_TIME_MS);
}
// Add delete records for blobs b2,b3 in remoteNodeInLocalDC and remoteNodeInRemoteDC
for (int j = 0; j < toBeIgnored.size(); j++) {
addDeleteMessagesToReplicasOfPartition(id, toBeIgnored.get(j), Collections.singletonList(remoteHostInLocalDC), (short) 0, EXPIRY_TIME_MS);
addDeleteMessagesToReplicasOfPartition(id, toBeIgnored.get(toBeIgnored.size() - 1 - j), Collections.singletonList(remoteHostInRemoteDC), (short) 0, EXPIRY_TIME_MS);
}
// Add un-delete records for blob b3 with life_version as 1 in remoteNodeInLocalDC and remoteNodeInRemoteDC
for (StoreKey storeKey : toBeUndeleted) {
addUndeleteMessagesToReplicasOfPartition(id, storeKey, Arrays.asList(remoteHostInLocalDC, remoteHostInRemoteDC), (short) 1);
}
// will be used later while comparing the final message records in local and remote nodes
idsToBeIgnoredByPartition.put(id, toBeIgnored);
idsToBeTtlUpdatedByPartition.put(id, toBeTtlUpdated);
}
// Inter-dc replication
// Send metadata request to remoteNodeInRemoteDC to fetch missing keys information.
List<ReplicaThread.ExchangeMetadataResponse> responseForRemoteNodeInRemoteDC = crossColoReplicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHostInRemoteDC, batchSize), remoteReplicaInfosForRemoteDC);
assertEquals("Response should contain a response for each replica", remoteReplicaInfosForRemoteDC.size(), responseForRemoteNodeInRemoteDC.size());
// Filter leader replicas to fetch missing keys
List<RemoteReplicaInfo> leaderReplicas = new ArrayList<>();
List<ReplicaThread.ExchangeMetadataResponse> exchangeMetadataResponseListForLeaderReplicas = new ArrayList<>();
crossColoReplicaThread.getLeaderReplicaList(remoteReplicaInfosForRemoteDC, responseForRemoteNodeInRemoteDC, leaderReplicas, exchangeMetadataResponseListForLeaderReplicas);
// verify that only leader partitions in local and remote nodes are chosen for fetching missing messages.
Set<ReplicaId> remoteLeaderReplicasWithLeaderPartitionsOnLocalNode = getRemoteLeaderReplicasWithLeaderPartitionsOnLocalNode(clusterMap, replicationManager.dataNodeId, remoteNodeInRemoteDC);
Set<ReplicaId> leaderReplicaSetInReplicaThread = leaderReplicas.stream().map(RemoteReplicaInfo::getReplicaId).collect(Collectors.toSet());
assertThat("mismatch in leader remote replicas to fetch missing keys", remoteLeaderReplicasWithLeaderPartitionsOnLocalNode, is(leaderReplicaSetInReplicaThread));
// fetch missing keys for leader replicas from remoteNodeInRemoteDC
if (leaderReplicas.size() > 0) {
crossColoReplicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHostInRemoteDC, batchSize), leaderReplicas, exchangeMetadataResponseListForLeaderReplicas, false);
}
// For standby replicas, token index will remain 0 and metadata information would be stored.
for (int i = 0; i < remoteReplicaInfosForRemoteDC.size(); i++) {
if (remoteLeaderReplicasWithLeaderPartitionsOnLocalNode.contains(remoteReplicaInfosForRemoteDC.get(i).getReplicaId())) {
assertEquals("remote Token should be updated for leader replica", remoteReplicaInfosForRemoteDC.get(i).getToken(), (responseForRemoteNodeInRemoteDC.get(i).remoteToken));
} else {
assertThat("remote Token should not be updated for standby replica", remoteReplicaInfosForRemoteDC.get(i).getToken(), not(responseForRemoteNodeInRemoteDC.get(i).remoteToken));
assertEquals("missing messages in metadata exchange should be stored for standby replica", remoteReplicaInfosForRemoteDC.get(i).getExchangeMetadataResponse().missingStoreMessages.size(), responseForRemoteNodeInRemoteDC.get(i).missingStoreMessages.size());
}
}
// Intra-dc replication
List<ReplicaThread.ExchangeMetadataResponse> responseForRemoteNodeInLocalDC = intraColoReplicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHostInLocalDC, batchSize), remoteReplicaInfosForLocalDC);
intraColoReplicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHostInLocalDC, batchSize), remoteReplicaInfosForLocalDC, responseForRemoteNodeInLocalDC, false);
// Verify that the remote token for all intra-colo replicas has been moved
for (int i = 0; i < responseForRemoteNodeInLocalDC.size(); i++) {
assertEquals(remoteReplicaInfosForLocalDC.get(i).getToken(), responseForRemoteNodeInLocalDC.get(i).remoteToken);
}
// arrived via intra-dc replication
for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfosForRemoteDC) {
crossColoReplicaThread.processMissingKeysFromPreviousMetadataResponse(remoteReplicaInfo);
}
// arrived via intra-dc replication
for (int i = 0; i < responseForRemoteNodeInLocalDC.size(); i++) {
assertEquals(remoteReplicaInfosForRemoteDC.get(i).getToken(), responseForRemoteNodeInRemoteDC.get(i).remoteToken);
}
// compare the messages and buffers are in sync at local host and remote host1
checkBlobMessagesAreEqualInLocalAndRemoteHosts(localHost, remoteHostInLocalDC, idsToBeIgnoredByPartition, idsToBeTtlUpdatedByPartition);
// compare the messages and buffers are in sync at local host and remote host1
checkBlobMessagesAreEqualInLocalAndRemoteHosts(localHost, remoteHostInRemoteDC, idsToBeIgnoredByPartition, idsToBeTtlUpdatedByPartition);
storageManager.shutdown();
}
use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class ReplicationTestHelper method getRemoteLeaderReplicasWithLeaderPartitionsOnLocalNode.
/**
* Get remote leader replicas in remote node whose partitions have leaders in local node as well.
* @return list of leader replicas
*/
protected Set<ReplicaId> getRemoteLeaderReplicasWithLeaderPartitionsOnLocalNode(ClusterMap clusterMap, DataNodeId localNode, DataNodeId remoteNode) {
Set<ReplicaId> remoteLeaderReplicas = new HashSet<>();
List<? extends ReplicaId> localReplicas = clusterMap.getReplicaIds(localNode);
List<? extends ReplicaId> remoteReplicas = clusterMap.getReplicaIds(remoteNode);
for (int i = 0; i < localReplicas.size(); i++) {
MockReplicaId localReplica = (MockReplicaId) localReplicas.get(i);
MockReplicaId remoteReplica = (MockReplicaId) remoteReplicas.get(i);
if (localReplica.getReplicaState() == ReplicaState.LEADER && remoteReplica.getReplicaState() == ReplicaState.LEADER) {
remoteLeaderReplicas.add(remoteReplicas.get(i));
}
}
return remoteLeaderReplicas;
}
use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class OperationTrackerTest method populateReplicaList.
/**
* Populate replicas for a partition.
* @param replicaCount The number of replicas to populate.
* @param replicaState The {@link ReplicaState} associated with these replicas.
* @param datanodes the datanodes to populate with replicas
*/
private void populateReplicaList(int replicaCount, ReplicaState replicaState, List<MockDataNodeId> datanodes) {
for (int i = 0; i < replicaCount; i++) {
ReplicaId replicaId = new MockReplicaId(PORT, mockPartition, datanodes.get(i % datanodes.size()), 0);
mockPartition.replicaIds.add(replicaId);
mockPartition.replicaAndState.put(replicaId, replicaState);
}
}
use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class ServerTestUtil method endToEndReplicationWithMultiNodeMultiPartitionMultiDCTest.
static void endToEndReplicationWithMultiNodeMultiPartitionMultiDCTest(String sourceDatacenter, String sslEnabledDatacenters, PortType portType, MockCluster cluster, MockNotificationSystem notificationSystem, Properties routerProps) throws Exception {
Properties props = new Properties();
props.setProperty("router.hostname", "localhost");
props.setProperty("router.datacenter.name", sourceDatacenter);
props.setProperty("router.put.request.parallelism", "1");
props.setProperty("router.put.success.target", "1");
props.setProperty("clustermap.cluster.name", "test");
props.setProperty("clustermap.datacenter.name", sourceDatacenter);
props.setProperty("clustermap.host.name", "localhost");
props.setProperty("kms.default.container.key", TestUtils.getRandomKey(32));
props.putAll(routerProps);
VerifiableProperties verifiableProperties = new VerifiableProperties(props);
AccountService accountService = new InMemAccountService(false, true);
Router router = new NonBlockingRouterFactory(verifiableProperties, cluster.getClusterMap(), notificationSystem, getSSLFactoryIfRequired(verifiableProperties), accountService).getRouter();
int numberOfRequestsToSend = 15;
int numberOfVerifierThreads = 3;
final LinkedBlockingQueue<Payload> payloadQueue = new LinkedBlockingQueue<Payload>();
final AtomicReference<Exception> exceptionRef = new AtomicReference<>(null);
final CountDownLatch callbackLatch = new CountDownLatch(numberOfRequestsToSend);
List<Future<String>> putFutures = new ArrayList<>(numberOfRequestsToSend);
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
for (int i = 0; i < numberOfRequestsToSend; i++) {
int size = new Random().nextInt(5000);
final BlobProperties properties = new BlobProperties(size, "service1", "owner id check", "image/jpeg", false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
final byte[] metadata = new byte[new Random().nextInt(1000)];
final byte[] blob = new byte[size];
TestUtils.RANDOM.nextBytes(metadata);
TestUtils.RANDOM.nextBytes(blob);
Future<String> future = router.putBlob(properties, metadata, new ByteBufferReadableStreamChannel(ByteBuffer.wrap(blob)), new PutBlobOptionsBuilder().build(), new Callback<String>() {
@Override
public void onCompletion(String result, Exception exception) {
if (exception == null) {
payloadQueue.add(new Payload(properties, metadata, blob, result));
} else {
exceptionRef.set(exception);
}
callbackLatch.countDown();
}
}, QUOTA_CHARGE_EVENT_LISTENER);
putFutures.add(future);
}
for (Future<String> future : putFutures) {
future.get(20, TimeUnit.SECONDS);
}
assertTrue("Did not receive all callbacks in time", callbackLatch.await(1, TimeUnit.SECONDS));
if (exceptionRef.get() != null) {
throw exceptionRef.get();
}
// put away for future use
Payload payload1 = payloadQueue.peek();
MockClusterMap clusterMap = cluster.getClusterMap();
BlobId blobId1 = new BlobId(payload1.blobId, clusterMap);
assertEquals("Did not put expected number of blobs", numberOfRequestsToSend, payloadQueue.size());
Properties sslProps = new Properties();
sslProps.putAll(routerProps);
sslProps.setProperty("clustermap.ssl.enabled.datacenters", sslEnabledDatacenters);
sslProps.setProperty("clustermap.cluster.name", "test");
sslProps.setProperty("clustermap.datacenter.name", sourceDatacenter);
sslProps.setProperty("clustermap.host.name", "localhost");
sslProps.setProperty("connectionpool.read.timeout.ms", "15000");
VerifiableProperties vProps = new VerifiableProperties(sslProps);
ConnectionPool connectionPool = new BlockingChannelConnectionPool(new ConnectionPoolConfig(vProps), new SSLConfig(vProps), new ClusterMapConfig(vProps), new MetricRegistry());
CountDownLatch verifierLatch = new CountDownLatch(numberOfVerifierThreads);
AtomicInteger totalRequests = new AtomicInteger(numberOfRequestsToSend);
AtomicInteger verifiedRequests = new AtomicInteger(0);
AtomicBoolean cancelTest = new AtomicBoolean(false);
for (int i = 0; i < numberOfVerifierThreads; i++) {
Thread thread = new Thread(new Verifier(payloadQueue, verifierLatch, totalRequests, verifiedRequests, cluster.getClusterMap(), cancelTest, portType, connectionPool, notificationSystem, cluster.time));
thread.start();
}
assertTrue("Did not verify in 2 minutes", verifierLatch.await(2, TimeUnit.MINUTES));
assertEquals(totalRequests.get(), verifiedRequests.get());
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
MockDataNodeId dataNodeId = clusterMap.getDataNodes().get(0);
Port port = new Port(portType == PortType.PLAINTEXT ? dataNodeId.getPort() : dataNodeId.getSSLPort(), portType);
ConnectedChannel channel = connectionPool.checkOutConnection("localhost", port, 10000);
PartitionId partitionId = blobId1.getPartition();
// stop the store via AdminRequest
System.out.println("Begin to stop a BlobStore");
AdminRequest adminRequest = new AdminRequest(AdminRequestOrResponseType.BlobStoreControl, partitionId, 1, "clientid2");
BlobStoreControlAdminRequest controlRequest = new BlobStoreControlAdminRequest((short) 0, BlobStoreControlAction.StopStore, adminRequest);
DataInputStream stream = channel.sendAndReceive(controlRequest).getInputStream();
AdminResponse adminResponse = AdminResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Stop store admin request should succeed", ServerErrorCode.No_Error, adminResponse.getError());
// put a blob on a stopped store, which should fail
byte[] userMetadata = new byte[1000];
byte[] data = new byte[3187];
BlobProperties properties = new BlobProperties(3187, "serviceid1", accountId, containerId, false, cluster.time.milliseconds());
BlobId blobId2 = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), accountId, containerId, partitionId, false, BlobId.BlobDataType.DATACHUNK);
PutRequest putRequest2 = new PutRequest(1, "clientId2", blobId2, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
DataInputStream putResponseStream = channel.sendAndReceive(putRequest2).getInputStream();
PutResponse response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals("Put blob on stopped store should fail", ServerErrorCode.Replica_Unavailable, response2.getError());
// get a blob properties on a stopped store, which should fail
ArrayList<BlobId> ids = new ArrayList<>();
ids.add(blobId1);
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partitionId, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest1 = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest1).getInputStream();
GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
assertEquals("Get blob properties on stopped store should fail", ServerErrorCode.Replica_Unavailable, resp1.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
// delete a blob on a stopped store, which should fail
DeleteRequest deleteRequest = new DeleteRequest(1, "clientId1", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Delete blob on stopped store should fail", ServerErrorCode.Replica_Unavailable, deleteResponse.getError());
// start the store via AdminRequest
System.out.println("Begin to restart the BlobStore");
adminRequest = new AdminRequest(AdminRequestOrResponseType.BlobStoreControl, partitionId, 1, "clientId");
controlRequest = new BlobStoreControlAdminRequest((short) 0, BlobStoreControlAction.StartStore, adminRequest);
stream = channel.sendAndReceive(controlRequest).getInputStream();
adminResponse = AdminResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Start store admin request should succeed", ServerErrorCode.No_Error, adminResponse.getError());
List<? extends ReplicaId> replicaIds = partitionId.getReplicaIds();
for (ReplicaId replicaId : replicaIds) {
// forcibly mark replicas and disks as up.
MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
mockReplicaId.markReplicaDownStatus(false);
((MockDiskId) mockReplicaId.getDiskId()).setDiskState(HardwareState.AVAILABLE, false);
}
// put a blob on a restarted store , which should succeed
putRequest2 = new PutRequest(1, "clientId2", blobId2, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
putResponseStream = channel.sendAndReceive(putRequest2).getInputStream();
response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals("Put blob on restarted store should succeed", ServerErrorCode.No_Error, response2.getError());
// verify the put blob has been replicated successfully.
notificationSystem.awaitBlobCreations(blobId2.getID());
// get a blob on a restarted store , which should succeed
ids = new ArrayList<BlobId>();
ids.add(blobId2);
partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
partitionRequestInfo = new PartitionRequestInfo(partitionId, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest2 = new GetRequest(1, "clientId2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest2).getInputStream();
GetResponse resp2 = GetResponse.readFrom(stream, clusterMap);
InputStream responseStream = resp2.getInputStream();
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch.", data, actualBlobData);
releaseNettyBufUnderneathStream(stream);
// delete a blob on a restarted store , which should succeed
deleteRequest = new DeleteRequest(1, "clientId2", blobId2, System.currentTimeMillis());
stream = channel.sendAndReceive(deleteRequest).getInputStream();
deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Delete blob on restarted store should succeed", ServerErrorCode.No_Error, deleteResponse.getError());
router.close();
connectionPool.shutdown();
}
Aggregations