use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class HelixBootstrapUpgradeUtil method startClusterManager.
/**
* Start the Helix Cluster Manager to be used for validation.
* @throws IOException if there was an error instantiating the cluster manager.
*/
private void startClusterManager() throws IOException {
ClusterMapConfig clusterMapConfig = getClusterMapConfig(clusterName, adminForDc.keySet().iterator().next(), zkLayoutPath);
HelixClusterAgentsFactory helixClusterAgentsFactory = new HelixClusterAgentsFactory(clusterMapConfig, null, null);
validatingHelixClusterManager = helixClusterAgentsFactory.getClusterMap();
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class HelixBootstrapUpgradeUtil method migrateToPropertyStore.
/**
* Convert instance configs to the new DataNodeConfig format and persist them in the property store.
*/
private void migrateToPropertyStore() throws InterruptedException {
CountDownLatch migrationComplete = new CountDownLatch(adminForDc.size());
// different DCs can be migrated in parallel
adminForDc.forEach((dcName, helixAdmin) -> Utils.newThread(() -> {
try {
logger.info("Starting property store migration in {}", dcName);
ClusterMapConfig config = getClusterMapConfig(clusterName, dcName, null);
InstanceConfigToDataNodeConfigAdapter.Converter instanceConfigConverter = new InstanceConfigToDataNodeConfigAdapter.Converter(config);
String zkConnectStr = dataCenterToZkAddress.get(dcName).getZkConnectStrs().get(0);
try (DataNodeConfigSource source = new PropertyStoreToDataNodeConfigAdapter(zkConnectStr, config)) {
List<String> instanceNames = helixAdmin.getInstancesInCluster(clusterName);
logger.info("Found {} instances in cluster", instanceNames.size());
instanceNames.forEach(instanceName -> {
logger.info("Copying config for node {}", instanceName);
InstanceConfig instanceConfig = helixAdmin.getInstanceConfig(clusterName, instanceName);
DataNodeConfig dataNodeConfig = instanceConfigConverter.convert(instanceConfig);
logger.debug("Writing {} to property store in {}", dataNodeConfig, dcName);
if (!source.set(dataNodeConfig)) {
logger.error("Failed to persist config for node {} in the property store.", dataNodeConfig.getInstanceName());
}
});
}
logger.info("Successfully migrated to property store in {}", dcName);
} catch (Throwable t) {
logger.error("Error while migrating to property store in {}", dcName, t);
} finally {
migrationComplete.countDown();
}
}, false).start());
migrationComplete.await();
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class CloudBlobStoreTest method testPutWithTtl.
/**
* Test PUT(with TTL) and TtlUpdate record replication.
* Replication may happen after PUT and after TtlUpdate, or after TtlUpdate only.
* PUT may already expired, expiration time < upload threshold or expiration time >= upload threshold.
* @throws Exception
*/
@Test
public void testPutWithTtl() throws Exception {
// Set up remote host
MockClusterMap clusterMap = new MockClusterMap();
MockHost remoteHost = getLocalAndRemoteHosts(clusterMap).getSecond();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
PartitionId partitionId = partitionIds.get(0);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<DataNodeId, MockHost> hosts = new HashMap<>();
hosts.put(remoteHost.dataNodeId, remoteHost);
MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
// Generate BlobIds for following PUT.
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
List<BlobId> blobIdList = new ArrayList<>();
for (int i = 0; i < 6; i++) {
blobIdList.add(new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK));
}
// Set up VCR
Properties props = new Properties();
setBasicProperties(props);
props.setProperty("clustermap.port", "12300");
props.setProperty("vcr.ssl.port", "12345");
ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
CloudDataNode cloudDataNode = new CloudDataNode(cloudConfig, clusterMapConfig);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIdList, clusterMap);
CloudReplica cloudReplica = new CloudReplica(partitionId, cloudDataNode);
CloudBlobStore cloudBlobStore = new CloudBlobStore(new VerifiableProperties(props), partitionId, latchBasedInMemoryCloudDestination, clusterMap, new VcrMetrics(new MetricRegistry()));
cloudBlobStore.start();
// Create ReplicaThread and add RemoteReplicaInfo to it.
ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), Collections.emptyList());
ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), cloudDataNode, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, cloudDataNode.getDatacenterName(), new ResponseHandler(clusterMap), new MockTime(), null, null, null);
for (ReplicaId replica : partitionId.getReplicaIds()) {
if (replica.getDataNodeId() == remoteHost.dataNodeId) {
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(replica, cloudReplica, cloudBlobStore, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(remoteHost.dataNodeId.getPort(), PortType.PLAINTEXT));
replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
break;
}
}
long referenceTime = System.currentTimeMillis();
// Case 1: Put already expired. Replication happens after Put and after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
BlobId id = blobIdList.get(0);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
replicaThread.replicate();
assertFalse("Blob should not exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 2: Put already expired. Replication happens after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(1);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 3: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(2);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
replicaThread.replicate();
if (isVcr) {
assertFalse("Blob should not exist (vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
} else {
assertTrue("Blob should exist (not vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
}
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 4: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(3);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 5: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
// Upload to Cloud after Put and update ttl after TtlUpdate.
id = blobIdList.get(4);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
replicaThread.replicate();
assertTrue(latchBasedInMemoryCloudDestination.doesBlobExist(id));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 6: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
// Upload to Cloud after TtlUpdate.
id = blobIdList.get(5);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Verify expiration time of all blobs.
Map<String, CloudBlobMetadata> map = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIdList);
for (BlobId blobId : blobIdList) {
assertEquals("Blob ttl should be infinite now.", Utils.Infinite_Time, map.get(blobId.toString()).getExpirationTime());
}
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class HelixVcrClusterParticipantTest method createHelixVcrClusterParticipant.
/**
* Helper function to create {@link HelixVcrClusterParticipant}.
* @param clusterMapPort The clusterMapPort of the instance.
* @param vcrSslPort The vcrSslPort of this vcr.
*/
private VcrClusterParticipant createHelixVcrClusterParticipant(int clusterMapPort, int vcrSslPort) throws Exception {
Properties props = new Properties();
props.setProperty("clustermap.host.name", "localhost");
props.setProperty("clustermap.resolve.hostnames", "false");
props.setProperty("clustermap.cluster.name", "clusterName");
props.setProperty("clustermap.datacenter.name", "DC1");
props.setProperty("clustermap.ssl.enabled.datacenters", "DC1,DC2");
props.setProperty("clustermap.port", Integer.toString(clusterMapPort));
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
props = new Properties();
props.setProperty("vcr.ssl.port", Integer.toString(vcrSslPort));
props.setProperty(CloudConfig.VCR_CLUSTER_ZK_CONNECT_STRING, ZK_SERVER_HOSTNAME + ":" + ZK_SERVER_PORT);
props.setProperty(CloudConfig.VCR_CLUSTER_NAME, VCR_CLUSTER_NAME);
VerifiableProperties verifiableProperties = new VerifiableProperties(props);
CloudConfig cloudConfig = new CloudConfig(verifiableProperties);
return new HelixVcrClusterAgentsFactory(cloudConfig, clusterMapConfig, mockClusterMap, Mockito.mock(AccountService.class), new StoreConfig(verifiableProperties), null, new MetricRegistry()).getVcrClusterParticipant();
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class CloudTokenPersistorTest method basicTest.
@Test
public void basicTest() throws Exception {
Properties props = VcrTestUtil.createVcrProperties("DC1", "vcrClusterName", "zkConnectString", 12310, 12410, 12510, null);
props.setProperty("replication.cloud.token.factory", replicationCloudTokenFactory);
CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
ClusterMap clusterMap = new MockClusterMap();
DataNodeId dataNodeId = new CloudDataNode(cloudConfig, clusterMapConfig);
Map<String, Set<PartitionInfo>> mountPathToPartitionInfoList = new HashMap<>();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
StoreFindTokenFactory factory = new StoreFindTokenFactory(blobIdFactory);
PartitionId partitionId = clusterMap.getAllPartitionIds(null).get(0);
ReplicaId cloudReplicaId = new CloudReplica(partitionId, dataNodeId);
List<? extends ReplicaId> peerReplicas = cloudReplicaId.getPeerReplicaIds();
List<RemoteReplicaInfo> remoteReplicas = new ArrayList<RemoteReplicaInfo>();
List<RemoteReplicaInfo.ReplicaTokenInfo> replicaTokenInfos = new ArrayList<>();
for (ReplicaId remoteReplica : peerReplicas) {
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(remoteReplica, cloudReplicaId, null, factory.getNewFindToken(), 10, SystemTime.getInstance(), remoteReplica.getDataNodeId().getPortToConnectTo());
remoteReplicas.add(remoteReplicaInfo);
replicaTokenInfos.add(new RemoteReplicaInfo.ReplicaTokenInfo(remoteReplicaInfo));
}
PartitionInfo partitionInfo = new PartitionInfo(remoteReplicas, partitionId, null, cloudReplicaId);
mountPathToPartitionInfoList.computeIfAbsent(cloudReplicaId.getMountPath(), key -> ConcurrentHashMap.newKeySet()).add(partitionInfo);
LatchBasedInMemoryCloudDestination cloudDestination = new LatchBasedInMemoryCloudDestination(Collections.emptyList(), AzureCloudDestinationFactory.getReplicationFeedType(new VerifiableProperties(props)), clusterMap);
ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
CloudTokenPersistor cloudTokenPersistor = new CloudTokenPersistor("replicaTokens", mountPathToPartitionInfoList, new ReplicationMetrics(new MetricRegistry(), Collections.emptyList()), clusterMap, new FindTokenHelper(blobIdFactory, replicationConfig), cloudDestination);
cloudTokenPersistor.persist(cloudReplicaId.getMountPath(), replicaTokenInfos);
List<RemoteReplicaInfo.ReplicaTokenInfo> retrievedReplicaTokenInfos = cloudTokenPersistor.retrieve(cloudReplicaId.getMountPath());
Assert.assertEquals("Number of tokens doesn't match.", replicaTokenInfos.size(), retrievedReplicaTokenInfos.size());
for (int i = 0; i < replicaTokenInfos.size(); i++) {
Assert.assertArrayEquals("Token is not correct.", replicaTokenInfos.get(i).getReplicaToken().toBytes(), retrievedReplicaTokenInfos.get(i).getReplicaToken().toBytes());
}
}
Aggregations