Search in sources :

Example 1 with CloudReplica

use of com.github.ambry.clustermap.CloudReplica in project ambry by linkedin.

the class VcrReplicationManager method addReplica.

/**
 * Add a replica of given {@link PartitionId} and its {@link RemoteReplicaInfo}s to backup list.
 * @param partitionId the {@link PartitionId} of the replica to add.
 * @throws ReplicationException if replicas initialization failed.
 */
void addReplica(PartitionId partitionId) throws ReplicationException {
    if (partitionToPartitionInfo.containsKey(partitionId)) {
        throw new ReplicationException("Partition " + partitionId + " already exists on " + dataNodeId);
    }
    ReplicaId cloudReplica = new CloudReplica(partitionId, vcrClusterParticipant.getCurrentDataNodeId());
    if (!storeManager.addBlobStore(cloudReplica)) {
        logger.error("Can't start cloudstore for replica {}", cloudReplica);
        throw new ReplicationException("Can't start cloudstore for replica " + cloudReplica);
    }
    List<? extends ReplicaId> peerReplicas = cloudReplica.getPeerReplicaIds();
    List<RemoteReplicaInfo> remoteReplicaInfos = new ArrayList<>();
    Store store = storeManager.getStore(partitionId);
    if (peerReplicas != null) {
        for (ReplicaId peerReplica : peerReplicas) {
            if (!shouldReplicateFromDc(peerReplica.getDataNodeId().getDatacenterName())) {
                continue;
            }
            // We need to ensure that a replica token gets persisted only after the corresponding data in the
            // store gets flushed to cloud. We use the store flush interval multiplied by a constant factor
            // to determine the token flush interval
            FindTokenFactory findTokenFactory = tokenHelper.getFindTokenFactoryFromReplicaType(peerReplica.getReplicaType());
            RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(peerReplica, cloudReplica, store, findTokenFactory.getNewFindToken(), storeConfig.storeDataFlushIntervalSeconds * SystemTime.MsPerSec * Replication_Delay_Multiplier, SystemTime.getInstance(), peerReplica.getDataNodeId().getPortToConnectTo());
            replicationMetrics.addMetricsForRemoteReplicaInfo(remoteReplicaInfo, trackPerDatacenterLagInMetric);
            remoteReplicaInfos.add(remoteReplicaInfo);
        }
        rwLock.writeLock().lock();
        try {
            updatePartitionInfoMaps(remoteReplicaInfos, cloudReplica);
            partitionStoreMap.put(partitionId.toPathString(), store);
            // Reload replication token if exist.
            int tokenReloadFailCount = reloadReplicationTokenIfExists(cloudReplica, remoteReplicaInfos);
            vcrMetrics.tokenReloadWarnCount.inc(tokenReloadFailCount);
            // Add remoteReplicaInfos to {@link ReplicaThread}.
            addRemoteReplicaInfoToReplicaThread(remoteReplicaInfos, true);
            if (replicationConfig.replicationTrackPerPartitionLagFromRemote) {
                replicationMetrics.addLagMetricForPartition(partitionId, true);
            }
        } finally {
            rwLock.writeLock().unlock();
        }
    } else {
        try {
            storeManager.shutdownBlobStore(partitionId);
            storeManager.removeBlobStore(partitionId);
        } finally {
            throw new ReplicationException("Failed to add Partition " + partitionId + " on " + dataNodeId + " , because no peer replicas found.");
        }
    }
}
Also used : CloudReplica(com.github.ambry.clustermap.CloudReplica) RemoteReplicaInfo(com.github.ambry.replication.RemoteReplicaInfo) ArrayList(java.util.ArrayList) Store(com.github.ambry.store.Store) ReplicationException(com.github.ambry.replication.ReplicationException) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) ReplicaId(com.github.ambry.clustermap.ReplicaId)

Example 2 with CloudReplica

use of com.github.ambry.clustermap.CloudReplica in project ambry by linkedin.

the class CloudBlobStoreTest method testPutWithTtl.

/**
 * Test PUT(with TTL) and TtlUpdate record replication.
 * Replication may happen after PUT and after TtlUpdate, or after TtlUpdate only.
 * PUT may already expired, expiration time < upload threshold or expiration time >= upload threshold.
 * @throws Exception
 */
@Test
public void testPutWithTtl() throws Exception {
    // Set up remote host
    MockClusterMap clusterMap = new MockClusterMap();
    MockHost remoteHost = getLocalAndRemoteHosts(clusterMap).getSecond();
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
    PartitionId partitionId = partitionIds.get(0);
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
    Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
    Map<DataNodeId, MockHost> hosts = new HashMap<>();
    hosts.put(remoteHost.dataNodeId, remoteHost);
    MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
    // Generate BlobIds for following PUT.
    short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
    List<BlobId> blobIdList = new ArrayList<>();
    for (int i = 0; i < 6; i++) {
        blobIdList.add(new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK));
    }
    // Set up VCR
    Properties props = new Properties();
    setBasicProperties(props);
    props.setProperty("clustermap.port", "12300");
    props.setProperty("vcr.ssl.port", "12345");
    ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
    CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
    CloudDataNode cloudDataNode = new CloudDataNode(cloudConfig, clusterMapConfig);
    LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIdList, clusterMap);
    CloudReplica cloudReplica = new CloudReplica(partitionId, cloudDataNode);
    CloudBlobStore cloudBlobStore = new CloudBlobStore(new VerifiableProperties(props), partitionId, latchBasedInMemoryCloudDestination, clusterMap, new VcrMetrics(new MetricRegistry()));
    cloudBlobStore.start();
    // Create ReplicaThread and add RemoteReplicaInfo to it.
    ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), Collections.emptyList());
    ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), cloudDataNode, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, cloudDataNode.getDatacenterName(), new ResponseHandler(clusterMap), new MockTime(), null, null, null);
    for (ReplicaId replica : partitionId.getReplicaIds()) {
        if (replica.getDataNodeId() == remoteHost.dataNodeId) {
            RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(replica, cloudReplica, cloudBlobStore, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(remoteHost.dataNodeId.getPort(), PortType.PLAINTEXT));
            replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
            break;
        }
    }
    long referenceTime = System.currentTimeMillis();
    // Case 1: Put already expired. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    BlobId id = blobIdList.get(0);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
    replicaThread.replicate();
    assertFalse("Blob should not exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 2: Put already expired. Replication happens after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(1);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 3: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(2);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
    replicaThread.replicate();
    if (isVcr) {
        assertFalse("Blob should not exist (vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    } else {
        assertTrue("Blob should exist (not vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    }
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 4: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(3);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 5: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud after Put and update ttl after TtlUpdate.
    id = blobIdList.get(4);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
    replicaThread.replicate();
    assertTrue(latchBasedInMemoryCloudDestination.doesBlobExist(id));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 6: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
    // Upload to Cloud after TtlUpdate.
    id = blobIdList.get(5);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Verify expiration time of all blobs.
    Map<String, CloudBlobMetadata> map = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIdList);
    for (BlobId blobId : blobIdList) {
        assertEquals("Blob ttl should be infinite now.", Utils.Infinite_Time, map.get(blobId.toString()).getExpirationTime());
    }
}
Also used : ReplicaThread(com.github.ambry.replication.ReplicaThread) Transformer(com.github.ambry.store.Transformer) BlobIdTransformer(com.github.ambry.replication.BlobIdTransformer) RemoteReplicaInfo(com.github.ambry.replication.RemoteReplicaInfo) ResponseHandler(com.github.ambry.commons.ResponseHandler) HashMap(java.util.HashMap) MockConnectionPool(com.github.ambry.replication.MockConnectionPool) ReplicationMetrics(com.github.ambry.replication.ReplicationMetrics) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) CloudConfig(com.github.ambry.config.CloudConfig) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) BlobIdTransformer(com.github.ambry.replication.BlobIdTransformer) MockTime(com.github.ambry.utils.MockTime) CloudReplica(com.github.ambry.clustermap.CloudReplica) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) MockFindTokenHelper(com.github.ambry.replication.MockFindTokenHelper) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockHost(com.github.ambry.replication.MockHost) MockFindToken(com.github.ambry.replication.MockFindToken) DataNodeId(com.github.ambry.clustermap.DataNodeId) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) ReplicationTest(com.github.ambry.replication.ReplicationTest) Test(org.junit.Test)

Example 3 with CloudReplica

use of com.github.ambry.clustermap.CloudReplica in project ambry by linkedin.

the class CloudTokenPersistorTest method basicTest.

@Test
public void basicTest() throws Exception {
    Properties props = VcrTestUtil.createVcrProperties("DC1", "vcrClusterName", "zkConnectString", 12310, 12410, 12510, null);
    props.setProperty("replication.cloud.token.factory", replicationCloudTokenFactory);
    CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
    ClusterMap clusterMap = new MockClusterMap();
    DataNodeId dataNodeId = new CloudDataNode(cloudConfig, clusterMapConfig);
    Map<String, Set<PartitionInfo>> mountPathToPartitionInfoList = new HashMap<>();
    BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
    StoreFindTokenFactory factory = new StoreFindTokenFactory(blobIdFactory);
    PartitionId partitionId = clusterMap.getAllPartitionIds(null).get(0);
    ReplicaId cloudReplicaId = new CloudReplica(partitionId, dataNodeId);
    List<? extends ReplicaId> peerReplicas = cloudReplicaId.getPeerReplicaIds();
    List<RemoteReplicaInfo> remoteReplicas = new ArrayList<RemoteReplicaInfo>();
    List<RemoteReplicaInfo.ReplicaTokenInfo> replicaTokenInfos = new ArrayList<>();
    for (ReplicaId remoteReplica : peerReplicas) {
        RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(remoteReplica, cloudReplicaId, null, factory.getNewFindToken(), 10, SystemTime.getInstance(), remoteReplica.getDataNodeId().getPortToConnectTo());
        remoteReplicas.add(remoteReplicaInfo);
        replicaTokenInfos.add(new RemoteReplicaInfo.ReplicaTokenInfo(remoteReplicaInfo));
    }
    PartitionInfo partitionInfo = new PartitionInfo(remoteReplicas, partitionId, null, cloudReplicaId);
    mountPathToPartitionInfoList.computeIfAbsent(cloudReplicaId.getMountPath(), key -> ConcurrentHashMap.newKeySet()).add(partitionInfo);
    LatchBasedInMemoryCloudDestination cloudDestination = new LatchBasedInMemoryCloudDestination(Collections.emptyList(), AzureCloudDestinationFactory.getReplicationFeedType(new VerifiableProperties(props)), clusterMap);
    ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
    CloudTokenPersistor cloudTokenPersistor = new CloudTokenPersistor("replicaTokens", mountPathToPartitionInfoList, new ReplicationMetrics(new MetricRegistry(), Collections.emptyList()), clusterMap, new FindTokenHelper(blobIdFactory, replicationConfig), cloudDestination);
    cloudTokenPersistor.persist(cloudReplicaId.getMountPath(), replicaTokenInfos);
    List<RemoteReplicaInfo.ReplicaTokenInfo> retrievedReplicaTokenInfos = cloudTokenPersistor.retrieve(cloudReplicaId.getMountPath());
    Assert.assertEquals("Number of tokens doesn't match.", replicaTokenInfos.size(), retrievedReplicaTokenInfos.size());
    for (int i = 0; i < replicaTokenInfos.size(); i++) {
        Assert.assertArrayEquals("Token is not correct.", replicaTokenInfos.get(i).getReplicaToken().toBytes(), retrievedReplicaTokenInfos.get(i).getReplicaToken().toBytes());
    }
}
Also used : Arrays(java.util.Arrays) AzureCloudDestinationFactory(com.github.ambry.cloud.azure.AzureCloudDestinationFactory) CloudReplica(com.github.ambry.clustermap.CloudReplica) DataNodeId(com.github.ambry.clustermap.DataNodeId) ReplicationMetrics(com.github.ambry.replication.ReplicationMetrics) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) CloudConfig(com.github.ambry.config.CloudConfig) Map(java.util.Map) SystemTime(com.github.ambry.utils.SystemTime) RemoteReplicaInfo(com.github.ambry.replication.RemoteReplicaInfo) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) PartitionInfo(com.github.ambry.replication.PartitionInfo) Parameterized(org.junit.runners.Parameterized) ReplicationConfig(com.github.ambry.config.ReplicationConfig) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ClusterMap(com.github.ambry.clustermap.ClusterMap) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) List(java.util.List) ReplicaId(com.github.ambry.clustermap.ReplicaId) StoreFindTokenFactory(com.github.ambry.store.StoreFindTokenFactory) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) PartitionId(com.github.ambry.clustermap.PartitionId) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Set(java.util.Set) RemoteReplicaInfo(com.github.ambry.replication.RemoteReplicaInfo) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ReplicationMetrics(com.github.ambry.replication.ReplicationMetrics) CloudConfig(com.github.ambry.config.CloudConfig) ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PartitionInfo(com.github.ambry.replication.PartitionInfo) CloudReplica(com.github.ambry.clustermap.CloudReplica) StoreFindTokenFactory(com.github.ambry.store.StoreFindTokenFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) MetricRegistry(com.codahale.metrics.MetricRegistry) PartitionId(com.github.ambry.clustermap.PartitionId) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 4 with CloudReplica

use of com.github.ambry.clustermap.CloudReplica in project ambry by linkedin.

the class CloudToStoreReplicationManager method addCloudReplica.

/**
 * Add a replica of given partition and its {@link RemoteReplicaInfo}s to backup list.
 * @param partitionName name of the partition of the replica to add.
 * @throws ReplicationException if replicas initialization failed.
 */
private void addCloudReplica(String partitionName) throws ReplicationException {
    // Adding cloud replica occurs when replica becomes leader from standby. Hence, if this a new added replica, it
    // should be present in storage manager already.
    ReplicaId localReplica = storeManager.getReplica(partitionName);
    if (localReplica == null) {
        logger.warn("Got partition leader notification for partition {} that is not present on the node", partitionName);
        return;
    }
    PartitionId partitionId = localReplica.getPartitionId();
    Store store = storeManager.getStore(partitionId);
    if (store == null) {
        logger.warn("Unable to add cloud replica for partition {} as store for the partition is not present or started.", partitionName);
        return;
    }
    DataNodeId cloudDataNode = getCloudDataNode();
    CloudReplica peerCloudReplica = new CloudReplica(partitionId, cloudDataNode);
    FindTokenFactory findTokenFactory = tokenHelper.getFindTokenFactoryFromReplicaType(peerCloudReplica.getReplicaType());
    RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(peerCloudReplica, localReplica, store, findTokenFactory.getNewFindToken(), storeConfig.storeDataFlushIntervalSeconds * SystemTime.MsPerSec * Replication_Delay_Multiplier, SystemTime.getInstance(), peerCloudReplica.getDataNodeId().getPortToConnectTo());
    replicationMetrics.addMetricsForRemoteReplicaInfo(remoteReplicaInfo, trackPerDatacenterLagInMetric);
    // Note that for each replica on a Ambry server node, there is only one cloud replica that it will be replicating from.
    List<RemoteReplicaInfo> remoteReplicaInfos = Collections.singletonList(remoteReplicaInfo);
    PartitionInfo partitionInfo = new PartitionInfo(remoteReplicaInfos, partitionId, store, localReplica);
    partitionToPartitionInfo.put(partitionId, partitionInfo);
    mountPathToPartitionInfos.computeIfAbsent(localReplica.getMountPath(), key -> ConcurrentHashMap.newKeySet()).add(partitionInfo);
    logger.info("Cloud Partition {} added to {}. CloudNode {} port {}", partitionName, dataNodeId, cloudDataNode, cloudDataNode.getPortToConnectTo());
    // Reload replication token if exist.
    reloadReplicationTokenIfExists(localReplica, remoteReplicaInfos);
    // Add remoteReplicaInfos to {@link ReplicaThread}.
    addRemoteReplicaInfoToReplicaThread(remoteReplicaInfos, true);
    if (replicationConfig.replicationTrackPerPartitionLagFromRemote) {
        replicationMetrics.addLagMetricForPartition(partitionId, true);
    }
    replicationMetrics.addCatchUpPointMetricForPartition(partitionId);
}
Also used : CloudReplica(com.github.ambry.clustermap.CloudReplica) NotificationContext(org.apache.helix.NotificationContext) StoreManager(com.github.ambry.server.StoreManager) ClusterMapUtils(com.github.ambry.clustermap.ClusterMapUtils) CloudReplica(com.github.ambry.clustermap.CloudReplica) DataNodeId(com.github.ambry.clustermap.DataNodeId) LiveInstance(org.apache.helix.model.LiveInstance) Random(java.util.Random) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) CloudConfig(com.github.ambry.config.CloudConfig) PortType(com.github.ambry.network.PortType) Map(java.util.Map) ClusterParticipant(com.github.ambry.clustermap.ClusterParticipant) SystemTime(com.github.ambry.utils.SystemTime) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) InstanceConfigChangeListener(org.apache.helix.api.listeners.InstanceConfigChangeListener) LinkedList(java.util.LinkedList) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) ReplicationConfig(com.github.ambry.config.ReplicationConfig) NotificationSystem(com.github.ambry.notification.NotificationSystem) StateModelListenerType(com.github.ambry.clustermap.StateModelListenerType) StoreKeyConverterFactory(com.github.ambry.store.StoreKeyConverterFactory) StoreConfig(com.github.ambry.config.StoreConfig) MetricRegistry(com.codahale.metrics.MetricRegistry) ConnectionPool(com.github.ambry.network.ConnectionPool) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) VcrClusterSpectator(com.github.ambry.clustermap.VcrClusterSpectator) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) Set(java.util.Set) ClusterMap(com.github.ambry.clustermap.ClusterMap) Utils(com.github.ambry.utils.Utils) LiveInstanceChangeListener(org.apache.helix.api.listeners.LiveInstanceChangeListener) Collectors(java.util.stream.Collectors) InstanceConfig(org.apache.helix.model.InstanceConfig) TimeUnit(java.util.concurrent.TimeUnit) Store(com.github.ambry.store.Store) List(java.util.List) PartitionStateChangeListener(com.github.ambry.clustermap.PartitionStateChangeListener) ReplicaId(com.github.ambry.clustermap.ReplicaId) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Port(com.github.ambry.network.Port) Collections(java.util.Collections) PartitionId(com.github.ambry.clustermap.PartitionId) Store(com.github.ambry.store.Store) PartitionId(com.github.ambry.clustermap.PartitionId) DataNodeId(com.github.ambry.clustermap.DataNodeId) ReplicaId(com.github.ambry.clustermap.ReplicaId)

Aggregations

CloudReplica (com.github.ambry.clustermap.CloudReplica)4 ReplicaId (com.github.ambry.clustermap.ReplicaId)4 ArrayList (java.util.ArrayList)4 MetricRegistry (com.codahale.metrics.MetricRegistry)3 CloudDataNode (com.github.ambry.clustermap.CloudDataNode)3 DataNodeId (com.github.ambry.clustermap.DataNodeId)3 PartitionId (com.github.ambry.clustermap.PartitionId)3 CloudConfig (com.github.ambry.config.CloudConfig)3 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)3 ReplicationConfig (com.github.ambry.config.ReplicationConfig)3 RemoteReplicaInfo (com.github.ambry.replication.RemoteReplicaInfo)3 ClusterMap (com.github.ambry.clustermap.ClusterMap)2 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)2 BlobIdFactory (com.github.ambry.commons.BlobIdFactory)2 VerifiableProperties (com.github.ambry.config.VerifiableProperties)2 Port (com.github.ambry.network.Port)2 ReplicationMetrics (com.github.ambry.replication.ReplicationMetrics)2 Store (com.github.ambry.store.Store)2 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)2 SystemTime (com.github.ambry.utils.SystemTime)2