use of com.github.ambry.cloud.CloudDestinationFactory in project ambry by linkedin.
the class ServerTestUtil method endToEndCloudBackupTest.
/**
* Tests blobs put to dataNode can be backed up by {@link com.github.ambry.cloud.VcrReplicationManager}.
* @param cluster the {@link MockCluster} of dataNodes.
* @param zkConnectString ZK endpoint to establish VCR cluster
* @param vcrClusterName the name of VCR cluster
* @param dataNode the datanode where blobs are originally put.
* @param clientSSLConfig the {@link SSLConfig}.
* @param clientSSLSocketFactory the {@link SSLSocketFactory}.
* @param notificationSystem the {@link MockNotificationSystem} to track blobs event in {@link MockCluster}.
* @param vcrSSLProps SSL related properties for VCR. Can be {@code null}.
* @param doTtlUpdate Do ttlUpdate request if {@code true}.
*/
static void endToEndCloudBackupTest(MockCluster cluster, String zkConnectString, String vcrClusterName, DataNodeId dataNode, SSLConfig clientSSLConfig, SSLSocketFactory clientSSLSocketFactory, MockNotificationSystem notificationSystem, Properties vcrSSLProps, boolean doTtlUpdate) throws Exception {
int blobBackupCount = 10;
int blobSize = 100;
int userMetaDataSize = 100;
ClusterAgentsFactory clusterAgentsFactory = cluster.getClusterAgentsFactory();
// Send blobs to DataNode
byte[] userMetadata = new byte[userMetaDataSize];
byte[] data = new byte[blobSize];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
long ttl = doTtlUpdate ? TimeUnit.DAYS.toMillis(1) : Utils.Infinite_Time;
BlobProperties properties = new BlobProperties(blobSize, "serviceid1", null, null, false, ttl, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
TestUtils.RANDOM.nextBytes(userMetadata);
TestUtils.RANDOM.nextBytes(data);
Port port;
if (clientSSLConfig == null) {
port = new Port(dataNode.getPort(), PortType.PLAINTEXT);
} else {
port = new Port(dataNode.getSSLPort(), PortType.SSL);
}
ConnectedChannel channel = getBlockingChannelBasedOnPortType(port, "localhost", clientSSLSocketFactory, clientSSLConfig);
channel.connect();
CountDownLatch latch = new CountDownLatch(1);
DirectSender runnable = new DirectSender(cluster, channel, blobBackupCount, data, userMetadata, properties, null, latch);
Thread threadToRun = new Thread(runnable);
threadToRun.start();
assertTrue("Did not put all blobs in 2 minutes", latch.await(2, TimeUnit.MINUTES));
// TODO: remove this temp fix after fixing race condition in MockCluster/MockNotificationSystem
Thread.sleep(3000);
List<BlobId> blobIds = runnable.getBlobIds();
for (BlobId blobId : blobIds) {
notificationSystem.awaitBlobCreations(blobId.getID());
if (doTtlUpdate) {
updateBlobTtl(channel, blobId, cluster.time.milliseconds());
}
}
HelixControllerManager helixControllerManager = VcrTestUtil.populateZkInfoAndStartController(zkConnectString, vcrClusterName, cluster.getClusterMap());
// Start the VCR and CloudBackupManager
Properties props = VcrTestUtil.createVcrProperties(dataNode.getDatacenterName(), vcrClusterName, zkConnectString, 12310, 12410, 12510, vcrSSLProps);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIds, clusterAgentsFactory.getClusterMap());
CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
VcrServer vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(props), clusterAgentsFactory, notificationSystem, cloudDestinationFactory);
vcrServer.startup();
// Waiting for backup done
assertTrue("Did not backup all blobs in 2 minutes", latchBasedInMemoryCloudDestination.awaitUpload(2, TimeUnit.MINUTES));
Map<String, CloudBlobMetadata> cloudBlobMetadataMap = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIds);
for (BlobId blobId : blobIds) {
CloudBlobMetadata cloudBlobMetadata = cloudBlobMetadataMap.get(blobId.toString());
assertNotNull("cloudBlobMetadata should not be null", cloudBlobMetadata);
assertEquals("AccountId mismatch", accountId, cloudBlobMetadata.getAccountId());
assertEquals("ContainerId mismatch", containerId, cloudBlobMetadata.getContainerId());
assertEquals("Expiration time mismatch", Utils.Infinite_Time, cloudBlobMetadata.getExpirationTime());
// TODO: verify other metadata and blob data
}
vcrServer.shutdown();
helixControllerManager.syncStop();
}
use of com.github.ambry.cloud.CloudDestinationFactory in project ambry by linkedin.
the class VcrAutomationTest method testSimpleVcrAutomation.
/**
* Test basic partition add and remove cases.
*/
@Test
public void testSimpleVcrAutomation() throws Exception {
List<ZkInfo> zkInfoList = new ArrayList<>();
String mainClusterStateModelDef = ClusterMapConfig.AMBRY_STATE_MODEL_DEF;
DataNodeConfigSourceType dataNodeConfigSourceType = DataNodeConfigSourceType.INSTANCE_CONFIG;
int zkPort = 2100;
int numberOfDataNode = 3;
int partitionCount = 9;
int newPartitionCount = 13;
String zkHostName = "localhost";
String zkConnectString = zkHostName + ":" + zkPort;
String clusterPrefix = "";
String clusterName = "MainCluster";
String vcrClusterName = "VcrCluster";
String dcName = "DC0";
TestHardwareLayout testHardwareLayout;
TestPartitionLayout testPartitionLayout;
String hardwareLayoutPath;
String partitionLayoutPath;
String zkLayoutPath;
zkInfoList.add(new ZkInfo(TestUtils.getTempDir("tempZk"), "DC0", (byte) 0, zkPort, true));
String tempDirPath = getTempDir(clusterName + "-");
hardwareLayoutPath = tempDirPath + "/hardwareLayoutTest.json";
partitionLayoutPath = tempDirPath + "/partitionLayoutTest.json";
zkLayoutPath = tempDirPath + "/zkLayoutPath.json";
testHardwareLayout = new TestHardwareLayout(clusterName, 1, 10737418240L, numberOfDataNode, 1, 18088, 20, false);
testPartitionLayout = constructInitialPartitionLayoutJSON(testHardwareLayout, partitionCount, null);
JSONObject zkJson = constructZkLayoutJSON(zkInfoList);
Utils.writeJsonObjectToFile(zkJson, zkLayoutPath);
Utils.writeJsonObjectToFile(testHardwareLayout.getHardwareLayout().toJSONObject(), hardwareLayoutPath);
Utils.writeJsonObjectToFile(testPartitionLayout.getPartitionLayout().toJSONObject(), partitionLayoutPath);
Properties props = VcrTestUtil.createVcrProperties("DC0", vcrClusterName, zkConnectString, 12300, 12400, 12510, null);
props.setProperty("clustermap.host.name", "localhost");
props.setProperty("clustermap.port", "1100");
props.setProperty("clustermap.cluster.name", clusterName);
props.setProperty("clustermap.datacenter.name", dcName);
props.setProperty("clustermap.dcs.zk.connect.strings", zkJson.toString(2));
props.setProperty("clustermap.state.model.definition", mainClusterStateModelDef);
props.setProperty("clustermap.data.node.config.source.type", dataNodeConfigSourceType.name());
props.setProperty("clustermap.cluster.change.handler.type", "DynamicClusterChangeHandler");
props.setProperty("vcr.helix.updater.partition.id", "1");
props.setProperty("vcr.helix.update.delay.time.in.seconds", "1");
HelixBootstrapUpgradeUtil.bootstrapOrUpgrade(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterPrefix, dcName, 10, false, false, new HelixAdminFactory(), false, mainClusterStateModelDef, HelixBootstrapUpgradeUtil.HelixAdminOperation.BootstrapCluster, dataNodeConfigSourceType, false);
HelixControllerManager helixControllerManager = new HelixControllerManager(zkConnectString, clusterPrefix + clusterName);
helixControllerManager.syncStart();
// Main cluster helix setup done.
HelixVcrUtil.VcrHelixConfig vcrHelixConfig;
String vcConfigData = CloudConfig.DEFAULT_VCR_HELIX_UPDATE_CONFIG;
try (InputStream input = new ByteArrayInputStream(vcConfigData.getBytes())) {
vcrHelixConfig = new ObjectMapper().readValue(input, HelixVcrUtil.VcrHelixConfig.class);
} catch (IOException ex) {
throw new IllegalStateException("Could not load config from config data: " + vcConfigData);
}
HelixVcrUtil.createCluster(zkConnectString, vcrClusterName, vcrHelixConfig);
HelixVcrUtil.updateResourceAndPartition(zkConnectString, clusterPrefix + clusterName, zkConnectString, vcrClusterName, vcrHelixConfig, false);
Assert.assertTrue("Dest and Src should be same", isSrcDestSync(zkConnectString, clusterPrefix + clusterName, zkConnectString, vcrClusterName));
HelixControllerManager vcrHelixControllerManager = new HelixControllerManager(zkConnectString, vcrClusterName);
vcrHelixControllerManager.syncStart();
StrictMatchExternalViewVerifier helixBalanceVerifier = new StrictMatchExternalViewVerifier(zkConnectString, vcrClusterName, Collections.singleton(VcrTestUtil.helixResource), null);
// VCR cluster helix setup done.
VerifiableProperties verifiableProperties = new VerifiableProperties(props);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
HelixClusterAgentsFactory helixClusterAgentsFactory = new HelixClusterAgentsFactory(clusterMapConfig, null, null);
VcrServer vcrServer = new VcrServer(verifiableProperties, helixClusterAgentsFactory, null, new CloudDestinationFactory() {
@Override
public CloudDestination getCloudDestination() throws IllegalStateException {
return mock(CloudDestination.class);
}
}, null);
vcrServer.startup();
makeSureHelixBalance(vcrServer, helixBalanceVerifier);
Assert.assertTrue("Partition assignment is not correct.", TestUtils.checkAndSleep(partitionCount, () -> vcrServer.getVcrClusterParticipant().getAssignedPartitionIds().size(), 5000));
// vcr server start up done.
// Partition add case:
testPartitionLayout = constructInitialPartitionLayoutJSON(testHardwareLayout, partitionCount + newPartitionCount, null);
Utils.writeJsonObjectToFile(testPartitionLayout.getPartitionLayout().toJSONObject(), partitionLayoutPath);
HelixBootstrapUpgradeUtil.bootstrapOrUpgrade(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterPrefix, dcName, 10, false, false, new HelixAdminFactory(), false, mainClusterStateModelDef, HelixBootstrapUpgradeUtil.HelixAdminOperation.BootstrapCluster, dataNodeConfigSourceType, false);
makeSureHelixBalance(vcrServer, helixBalanceVerifier);
Assert.assertTrue("Partition assignment is not correct.", TestUtils.checkAndSleep(partitionCount + newPartitionCount, () -> vcrServer.getVcrClusterParticipant().getAssignedPartitionIds().size(), 5000));
// Partition remove case:
testPartitionLayout = constructInitialPartitionLayoutJSON(testHardwareLayout, partitionCount, null);
Utils.writeJsonObjectToFile(testPartitionLayout.getPartitionLayout().toJSONObject(), partitionLayoutPath);
HelixBootstrapUpgradeUtil.bootstrapOrUpgrade(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterPrefix, dcName, 10, false, true, new HelixAdminFactory(), false, mainClusterStateModelDef, HelixBootstrapUpgradeUtil.HelixAdminOperation.BootstrapCluster, dataNodeConfigSourceType, false);
makeSureHelixBalance(vcrServer, helixBalanceVerifier);
Assert.assertTrue("Partition assignment is not correct.", TestUtils.checkAndSleep(partitionCount, () -> vcrServer.getVcrClusterParticipant().getAssignedPartitionIds().size(), 5000));
helixControllerManager.syncStop();
vcrHelixControllerManager.syncStop();
zkInfoList.get(0).shutdown();
}
use of com.github.ambry.cloud.CloudDestinationFactory in project ambry by linkedin.
the class VcrBackupTest method basicTest.
/**
* Basic test to make sure VCR can backup with HelixVcrCluster.
*/
@Test
public void basicTest() throws Exception {
List<BlobId> blobIds = sendBlobToDataNode(dataNode, 10);
// Start the VCR and CloudBackupManager
Properties props = VcrTestUtil.createVcrProperties(dataNode.getDatacenterName(), vcrClusterName, zkConnectString, clusterMapPort, 12410, 12510, serverSSLProps, vcrHelixStateModelFactoryClass, true);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIds, mockCluster.getClusterMap());
CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
VcrServer vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(props), mockCluster.getClusterAgentsFactory(), notificationSystem, cloudDestinationFactory);
vcrServer.startup();
// Waiting for backup done
assertTrue("Did not backup all blobs in 2 minutes", latchBasedInMemoryCloudDestination.awaitUpload(2, TimeUnit.MINUTES));
// Verify a blob by making a http2 request.
MockClusterMap clusterMap = mockCluster.getClusterMap();
SSLConfig clientSSLConfig = new SSLConfig(new VerifiableProperties(clientSSLProps));
ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(new Port(clusterMap.getDataNodes().get(0).getHttp2Port(), PortType.HTTP2), "localhost", null, clientSSLConfig);
BlobId blobToVerify = blobIds.get(0);
ArrayList<BlobId> idList = new ArrayList<>(Arrays.asList(blobToVerify));
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobToVerify.getPartition(), idList);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest1 = new GetRequest(1, "clientid1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest1).getInputStream();
GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
// Do a simple check
assertEquals(blobSize, propertyOutput.getBlobSize());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
vcrServer.shutdown();
assertTrue("VCR server shutdown timeout.", vcrServer.awaitShutdown(5000));
}
use of com.github.ambry.cloud.CloudDestinationFactory in project ambry by linkedin.
the class VcrBackupTest method multipleVcrTest.
/**
* A multiple VCR test to test helix assignment and backup.
*/
@Test
public void multipleVcrTest() throws Exception {
StrictMatchExternalViewVerifier helixBalanceVerifier = new StrictMatchExternalViewVerifier(zkConnectString, vcrClusterName, Collections.singleton(VcrTestUtil.helixResource), null);
int initialNumOfVcrs = 5;
// create a shared in memory destination.
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(new ArrayList<>(), mockCluster.getClusterMap());
CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
// 1st phase: Start VCRs to do backup.
List<VcrServer> vcrServers = new ArrayList<>();
List<MockNotificationSystem> vcrNotificationSystems = new ArrayList<>();
for (int port = 12310; port < 12310 + initialNumOfVcrs; port++) {
Properties props = VcrTestUtil.createVcrProperties(dataNode.getDatacenterName(), vcrClusterName, zkConnectString, port, port + 100, port + 200, null, vcrHelixStateModelFactoryClass, true);
MockNotificationSystem vcrNotificationSystem = new MockNotificationSystem(mockCluster.getClusterMap());
VcrServer vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(props), mockCluster.getClusterAgentsFactory(), vcrNotificationSystem, cloudDestinationFactory);
vcrServer.startup();
vcrServers.add(vcrServer);
vcrNotificationSystems.add(vcrNotificationSystem);
}
makeSureHelixBalance(vcrServers.get(vcrServers.size() - 1), helixBalanceVerifier);
int numOfBlobs = 100;
sendBlobToDataNode(dataNode, numOfBlobs);
// Make sure blobs are backed up.
TestUtils.checkAndSleep(numOfBlobs, () -> vcrNotificationSystems.stream().mapToInt(i -> i.getBlobIds().size()).sum(), 5000);
// verify each VCR is only replicating partitions assigned to it.
for (int i = 0; i < initialNumOfVcrs; i++) {
Set<PartitionId> partitionIdSet = vcrNotificationSystems.get(i).getBlobIds().stream().map(blobIdStr -> {
try {
return new BlobId(blobIdStr, mockCluster.getClusterMap()).getPartition();
} catch (IOException e) {
e.printStackTrace();
return null;
}
}).collect(Collectors.toSet());
assertTrue("Each VCR should have some assignment.", vcrServers.get(i).getVcrClusterParticipant().getAssignedPartitionIds().size() > 0);
assertTrue("Each VCR should only backup its assigned partitions.", new HashSet<>(vcrServers.get(i).getVcrClusterParticipant().getAssignedPartitionIds()).containsAll(partitionIdSet));
}
logger.info("Phase 1 done.");
// 2nd phase: Add a new VCR to cluster.
Properties props = VcrTestUtil.createVcrProperties(dataNode.getDatacenterName(), vcrClusterName, zkConnectString, 12310 + initialNumOfVcrs, 12310 + initialNumOfVcrs + 100, 12310 + initialNumOfVcrs + 200, null, vcrHelixStateModelFactoryClass, true);
MockNotificationSystem vcrNotificationSystem = new MockNotificationSystem(mockCluster.getClusterMap());
VcrServer vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(props), mockCluster.getClusterAgentsFactory(), vcrNotificationSystem, cloudDestinationFactory);
vcrServer.startup();
vcrServers.add(vcrServer);
vcrNotificationSystems.add(vcrNotificationSystem);
makeSureHelixBalance(vcrServers.get(vcrServers.size() - 1), helixBalanceVerifier);
int secondNumOfBlobs = 100;
sendBlobToDataNode(dataNode, secondNumOfBlobs);
Assert.assertTrue("All blobs should be back up.", TestUtils.checkAndSleep(numOfBlobs + secondNumOfBlobs, () -> vcrNotificationSystems.stream().mapToInt(i -> i.getBlobIds().size()).sum(), 5000));
logger.info("Phase 2 done.");
// 3rd phase: Remove last VCR from cluster.
vcrServers.get(vcrServers.size() - 1).shutdown();
assertTrue("VCR server shutdown timeout.", vcrServers.get(vcrServers.size() - 1).awaitShutdown(5000));
// Error metrics should be zero.
Assert.assertEquals("Error count should be zero", 0, vcrServers.get(vcrServers.size() - 1).getVcrReplicationManager().getVcrMetrics().addPartitionErrorCount.getCount());
Assert.assertEquals("Error count should be zero", 0, vcrServers.get(vcrServers.size() - 1).getVcrReplicationManager().getVcrMetrics().removePartitionErrorCount.getCount());
int temp = vcrNotificationSystems.get(vcrNotificationSystems.size() - 1).getBlobIds().size();
assertTrue("Helix balance timeout.", helixBalanceVerifier.verify(5000));
int thirdNumOfBlobs = 100;
sendBlobToDataNode(dataNode, thirdNumOfBlobs);
Assert.assertTrue("All blobs should be back up.", TestUtils.checkAndSleep(numOfBlobs + secondNumOfBlobs + thirdNumOfBlobs, () -> vcrNotificationSystems.stream().mapToInt(i -> i.getBlobIds().size()).sum(), 5000));
Assert.assertEquals("The removed vcr shouldn't have any change", temp, vcrNotificationSystems.get(vcrNotificationSystems.size() - 1).getBlobIds().size());
logger.info("Phase 3 done.");
// Shutdown all others.
for (int i = 0; i < initialNumOfVcrs; i++) {
// Error metrics should be zero.
Assert.assertEquals("Error count should be zero", 0, vcrServers.get(i).getVcrReplicationManager().getVcrMetrics().addPartitionErrorCount.getCount());
Assert.assertEquals("Error count should be zero", 0, vcrServers.get(i).getVcrReplicationManager().getVcrMetrics().removePartitionErrorCount.getCount());
vcrServers.get(i).shutdown();
assertTrue("VCR server shutdown timeout.", vcrServers.get(i).awaitShutdown(5000));
}
logger.info("Test done.");
}
use of com.github.ambry.cloud.CloudDestinationFactory in project ambry by linkedin.
the class CloudAndStoreReplicationTest method setup.
/**
* Create a cluster with one vcr node and two ambry server data nodes.
* @throws Exception on {@link Exception}
*/
@Before
public void setup() throws Exception {
String vcrMountPath = ClusterMapSnapshotConstants.CLOUD_REPLICA_MOUNT + "/1";
recoveryProperties = new Properties();
recoveryProperties.setProperty("replication.metadata.request.version", "2");
recoveryProperties.setProperty("replication.enabled.with.vcr.cluster", "true");
recoveryProperties.setProperty("clustermap.vcr.datacenter.name", cloudDc);
if (!vcrRecoveryPartitionConfig.isEmpty()) {
recoveryProperties.setProperty("vcr.recovery.partitions", vcrRecoveryPartitionConfig);
}
TestSSLUtils.addHttp2Properties(recoveryProperties, SSLFactory.Mode.SERVER, true);
// create vcr node
List<Port> vcrPortList = Arrays.asList(new Port(12310, PortType.PLAINTEXT), new Port(12410, PortType.SSL));
MockDataNodeId vcrNode = new MockDataNodeId("localhost", vcrPortList, Collections.singletonList(vcrMountPath), cloudDc);
// create ambry server recovery cluster
MockClusterMap serverClusterMap = new MockClusterMap(false, true, 2, 1, 1, true, false, null);
recoveryCluster = new MockCluster(serverClusterMap, Collections.singletonList(vcrNode), recoveryProperties);
partitionId = recoveryCluster.getClusterMap().getWritablePartitionIds(null).get(0);
allRecoveryNodes = serverClusterMap.getDataNodes();
// record ambry server node which will get partition leadership notification.
partitionLeaderRecoveryNode = allRecoveryNodes.get(0);
MockClusterAgentsFactory leaderMockClusterAgentsFactory = new MockClusterAgentsFactory(serverClusterMap, serverClusterMap.getAllPartitionIds(null).stream().map(PartitionId::toPathString).collect(Collectors.toList()));
// Start Helix Controller and ZK Server.
if (!zkInfo.isZkServerStarted()) {
zkInfo.startZkServer();
}
helixControllerManager = VcrTestUtil.populateZkInfoAndStartController(zkConnectString, vcrClusterName, recoveryCluster.getClusterMap());
Properties vcrProperties = VcrTestUtil.createVcrProperties(vcrNode.getDatacenterName(), vcrClusterName, zkConnectString, 12310, 12410, 12510, null);
vcrProperties.putAll(recoveryProperties);
MockNotificationSystem notificationSystem = new MockNotificationSystem(recoveryCluster.getClusterMap());
// Create blobs and data for upload to vcr.
int blobCount = 10;
cloudBlobIds = ServerTestUtil.createBlobIds(blobCount, recoveryCluster.getClusterMap(), accountId, containerId, partitionId);
serverBlobIds = ServerTestUtil.createBlobIds(blobCount, recoveryCluster.getClusterMap(), accountId, containerId, partitionId);
// Create cloud destination and start vcr server.
latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(cloudBlobIds, recoveryCluster.getClusterMap());
CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(vcrProperties), recoveryCluster.getClusterAgentsFactory(), notificationSystem, cloudDestinationFactory);
vcrServer.startup();
// initialize and start ambry servers
for (MockDataNodeId serverNode : allRecoveryNodes) {
AmbryServer server = recoveryCluster.initializeServer(serverNode, recoveryProperties, false, notificationSystem, SystemTime.getInstance(), serverNode.equals(partitionLeaderRecoveryNode) ? leaderMockClusterAgentsFactory : null);
recoveryCluster.addServer(server);
}
recoveryCluster.startServers();
}
Aggregations