Search in sources :

Example 46 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class ReplicationTest method dcLevelReplicationLagMetricsTest.

/**
 * Test that metrics that track remote replicas lag behind local replicas in each dc.
 * @throws Exception
 */
@Test
public void dcLevelReplicationLagMetricsTest() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, null);
    MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
    Set<String> remoteDcNames = new HashSet<>(Arrays.asList("DC1", "DC2", "DC3"));
    String localDcName = clusterMap.getDataNodeIds().get(0).getDatacenterName();
    remoteDcNames.remove(localDcName);
    // before updating replication lag, make sure avg lag in each dc is 0
    MetricRegistry metricRegistry = replicationManager.getMetricRegistry();
    String prefix = ReplicaThread.class.getName() + ".";
    String avgMetricSuffix = "-avgReplicaLagFromLocalInBytes";
    assertEquals("Average replication lag in local dc is not expected", 18.0, metricRegistry.getGauges().get(prefix + localDcName + avgMetricSuffix).getValue());
    for (String remoteDc : remoteDcNames) {
        assertEquals("Average replication lag in remote dc is not expected", 18.0, metricRegistry.getGauges().get(prefix + remoteDc + avgMetricSuffix).getValue());
    }
    // iterate over all partitions on current node and make sure all their peer replicas in local dc have fully caught up
    for (Map.Entry<PartitionId, PartitionInfo> entry : replicationManager.partitionToPartitionInfo.entrySet()) {
        PartitionId localPartition = entry.getKey();
        PartitionInfo partitionInfo = entry.getValue();
        List<RemoteReplicaInfo> remoteReplicaInfos = partitionInfo.getRemoteReplicaInfos().stream().filter(info -> info.getReplicaId().getDataNodeId().getDatacenterName().equals(localDcName)).collect(Collectors.toList());
        for (RemoteReplicaInfo remoteReplicaInfoInLocalDc : remoteReplicaInfos) {
            ReplicaId peerReplicaInLocalDc = remoteReplicaInfoInLocalDc.getReplicaId();
            replicationManager.updateTotalBytesReadByRemoteReplica(localPartition, peerReplicaInLocalDc.getDataNodeId().getHostname(), peerReplicaInLocalDc.getReplicaPath(), 18);
        }
    }
    // verify that after updating replication lag for all peer replicas in local dc, the avg lag in local dc has updated
    assertEquals("Average replication lag in local dc is not expected", 0.0, metricRegistry.getGauges().get(prefix + localDcName + avgMetricSuffix).getValue());
    // for remote dc, the avg lag is still 18.0
    for (String remoteDc : remoteDcNames) {
        assertEquals("Average replication lag in remote dc is not expected", 18.0, metricRegistry.getGauges().get(prefix + remoteDc + avgMetricSuffix).getValue());
    }
}
Also used : CoreMatchers(org.hamcrest.CoreMatchers) Arrays(java.util.Arrays) StorageManager(com.github.ambry.store.StorageManager) StoreKeyConverter(com.github.ambry.store.StoreKeyConverter) DataNodeId(com.github.ambry.clustermap.DataNodeId) Random(java.util.Random) ByteBuffer(java.nio.ByteBuffer) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) PortType(com.github.ambry.network.PortType) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) Parameterized(org.junit.runners.Parameterized) ReplicationConfig(com.github.ambry.config.ReplicationConfig) Container(com.github.ambry.account.Container) DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) Predicate(java.util.function.Predicate) ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) Collection(java.util.Collection) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) Set(java.util.Set) Utils(com.github.ambry.utils.Utils) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) Collectors(java.util.stream.Collectors) ConnectedChannel(com.github.ambry.network.ConnectedChannel) CountDownLatch(java.util.concurrent.CountDownLatch) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) ReplicaMetadataResponse(com.github.ambry.protocol.ReplicaMetadataResponse) PartitionStateChangeListener(com.github.ambry.clustermap.PartitionStateChangeListener) MockTime(com.github.ambry.utils.MockTime) Account(com.github.ambry.account.Account) Optional(java.util.Optional) TransitionErrorCode(com.github.ambry.clustermap.StateTransitionException.TransitionErrorCode) MockId(com.github.ambry.store.MockId) InMemAccountService(com.github.ambry.account.InMemAccountService) AmbryReplicaSyncUpManager(com.github.ambry.clustermap.AmbryReplicaSyncUpManager) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) ResponseHandler(com.github.ambry.commons.ResponseHandler) ClusterMapChangeListener(com.github.ambry.clustermap.ClusterMapChangeListener) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Transformer(com.github.ambry.store.Transformer) MockHelixParticipant(com.github.ambry.clustermap.MockHelixParticipant) CommonTestUtils(com.github.ambry.commons.CommonTestUtils) ReplicaMetadataResponseInfo(com.github.ambry.protocol.ReplicaMetadataResponseInfo) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) Time(com.github.ambry.utils.Time) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) ReplicaState(com.github.ambry.clustermap.ReplicaState) StateModelListenerType(com.github.ambry.clustermap.StateModelListenerType) StoreConfig(com.github.ambry.config.StoreConfig) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) Pair(com.github.ambry.utils.Pair) Iterator(java.util.Iterator) ReplicaType(com.github.ambry.clustermap.ReplicaType) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMap(com.github.ambry.clustermap.ClusterMap) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) Store(com.github.ambry.store.Store) Mockito(org.mockito.Mockito) MessageInfo(com.github.ambry.store.MessageInfo) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) ReplicaId(com.github.ambry.clustermap.ReplicaId) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Port(com.github.ambry.network.Port) Comparator(java.util.Comparator) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) MetricRegistry(com.codahale.metrics.MetricRegistry) StorageManager(com.github.ambry.store.StorageManager) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) ReplicaId(com.github.ambry.clustermap.ReplicaId) Map(java.util.Map) HashMap(java.util.HashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 47 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class LeaderBasedReplicationTest method replicaThreadLeaderBasedReplicationStandByCrossColoFetchTest.

/**
 * Test leader based replication to verify cross colo gets for standby replicas after they have have timed out
 * waiting for missing keys.
 * @throws Exception
 */
@Test
public void replicaThreadLeaderBasedReplicationStandByCrossColoFetchTest() throws Exception {
    Map<DataNodeId, MockHost> hosts = new HashMap<>();
    hosts.put(remoteNodeInLocalDC, remoteHostInLocalDC);
    hosts.put(remoteNodeInRemoteDC, remoteHostInRemoteDC);
    int batchSize = 5;
    int numOfMessagesOnRemoteNodeInLocalDC = 3;
    int numOfMessagesOnRemoteNodeInRemoteDC = 10;
    ConnectionPool mockConnectionPool = new MockConnectionPool(hosts, clusterMap, batchSize);
    Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, mockHelixParticipant, mockConnectionPool);
    StorageManager storageManager = managers.getFirst();
    MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
    // set mock local stores on all remoteReplicaInfos which will used during replication.
    for (PartitionId partitionId : replicationManager.partitionToPartitionInfo.keySet()) {
        localHost.addStore(partitionId, null);
        Store localStore = localHost.getStore(partitionId);
        localStore.start();
        List<RemoteReplicaInfo> remoteReplicaInfos = replicationManager.partitionToPartitionInfo.get(partitionId).getRemoteReplicaInfos();
        remoteReplicaInfos.forEach(remoteReplicaInfo -> remoteReplicaInfo.setLocalStore(localStore));
    }
    // get remote replicas and replica thread for remote host on local datacenter
    ReplicaThread intraColoReplicaThread = replicationManager.dataNodeIdToReplicaThread.get(remoteNodeInLocalDC);
    List<RemoteReplicaInfo> remoteReplicaInfosForLocalDC = intraColoReplicaThread.getRemoteReplicaInfos().get(remoteNodeInLocalDC);
    // get remote replicas and replica thread for remote host on remote datacenter
    ReplicaThread crossColoReplicaThread = replicationManager.dataNodeIdToReplicaThread.get(remoteNodeInRemoteDC);
    List<RemoteReplicaInfo> remoteReplicaInfosForRemoteDC = crossColoReplicaThread.getRemoteReplicaInfos().get(remoteNodeInRemoteDC);
    // mock helix transition state from standby to leader for local leader partitions
    List<? extends ReplicaId> replicaIds = clusterMap.getReplicaIds(replicationManager.dataNodeId);
    for (ReplicaId replicaId : replicaIds) {
        MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
        if (mockReplicaId.getReplicaState() == ReplicaState.LEADER) {
            MockPartitionId mockPartitionId = (MockPartitionId) replicaId.getPartitionId();
            mockHelixParticipant.onPartitionBecomeLeaderFromStandby(mockPartitionId.toPathString());
        }
    }
    // Add put messages to all partitions on remoteHost1 and remoteHost2
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
    for (PartitionId partitionId : partitionIds) {
        // add 3 put messages to the remoteNodeInLocalDC and remoteNodeInRemoteDC from which local host will replicate.
        addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHostInLocalDC, remoteHostInRemoteDC), numOfMessagesOnRemoteNodeInLocalDC);
        // add 1 put message to the remoteNodeInRemoteDC only. Since this message is not present in remoteNodeInLocalDC, it
        // doesn't come to local node via intra-dc replication. We should see time out for remote standby replicas waiting for this
        // message and see a cross colo fetch happening.
        addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHostInRemoteDC), numOfMessagesOnRemoteNodeInRemoteDC - numOfMessagesOnRemoteNodeInLocalDC);
    }
    // Choose partitions that are leaders on both local and remote nodes
    Set<ReplicaId> leaderReplicasOnLocalAndRemoteNodes = getRemoteLeaderReplicasWithLeaderPartitionsOnLocalNode(clusterMap, replicationManager.dataNodeId, remoteNodeInRemoteDC);
    // replicate with remote node in remote DC
    crossColoReplicaThread.replicate();
    // missing messages are not fetched yet.
    for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfosForRemoteDC) {
        if (leaderReplicasOnLocalAndRemoteNodes.contains(remoteReplicaInfo.getReplicaId())) {
            assertEquals("remote token mismatch for leader replicas", ((MockFindToken) remoteReplicaInfo.getToken()).getIndex(), batchSize - 1);
        } else {
            assertEquals("remote token should not move forward for standby replicas until missing keys are fetched", ((MockFindToken) remoteReplicaInfo.getToken()).getIndex(), 0);
        }
    }
    // Replicate with remote node in local dc
    intraColoReplicaThread.replicate();
    // verify that remote token will be moved for all replicas as it is intra-dc replication
    for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfosForLocalDC) {
        assertEquals("mismatch in remote token set for intra colo replicas", ((MockFindToken) remoteReplicaInfo.getToken()).getIndex(), numOfMessagesOnRemoteNodeInLocalDC - 1);
    }
    // via intra-dc replication
    for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfosForRemoteDC) {
        crossColoReplicaThread.processMissingKeysFromPreviousMetadataResponse(remoteReplicaInfo);
    }
    // verify that the remote token will remain 0 for standby replicas as one message in its missing set is not fetched yet.
    for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfosForRemoteDC) {
        if (!leaderReplicasOnLocalAndRemoteNodes.contains(remoteReplicaInfo.getReplicaId())) {
            assertTrue("missing store messages should still exist for standby replicas", crossColoReplicaThread.containsMissingKeysFromPreviousMetadataExchange(remoteReplicaInfo));
            assertEquals("remote token should not move forward for standby replicas until missing keys are fetched", ((MockFindToken) remoteReplicaInfo.getToken()).getIndex(), 0);
            assertEquals("incorrect number of missing store messages found for standby replicas", remoteReplicaInfo.getExchangeMetadataResponse().missingStoreMessages.size(), batchSize - numOfMessagesOnRemoteNodeInLocalDC);
        }
    }
    // Attempt replication with remoteNodeInRemoteDC, we should not see any replication attempt for standby replicas
    // and their remote token stays as 0.
    crossColoReplicaThread.replicate();
    for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfosForRemoteDC) {
        if (!leaderReplicasOnLocalAndRemoteNodes.contains(remoteReplicaInfo.getReplicaId())) {
            assertEquals("remote token should not move forward for standby replicas until missing keys are fetched", ((MockFindToken) remoteReplicaInfo.getToken()).getIndex(), 0);
            assertTrue("missing store messages should still exist for standby replicas", crossColoReplicaThread.containsMissingKeysFromPreviousMetadataExchange(remoteReplicaInfo));
        }
    }
    // Move time forward by replicationStandbyWaitTimeoutToTriggerCrossColoFetchSeconds+1 seconds and attempt replication.
    // We should see cross colo fetch for standby replicas now since missing keys haven't arrived for
    // replicationConfig.replicationStandbyWaitTimeoutToTriggerCrossColoFetchSeconds.
    time.sleep((replicationConfig.replicationStandbyWaitTimeoutToTriggerCrossColoFetchSeconds + 1) * 1000);
    // verify that we get the list of standby replicas that timed out on no progress
    Set<RemoteReplicaInfo> allStandbyReplicas = remoteReplicaInfosForRemoteDC.stream().filter(info -> !leaderReplicasOnLocalAndRemoteNodes.contains(info.getReplicaId())).collect(Collectors.toSet());
    assertEquals("mismatch in list of standby replicas timed out on no progress", new HashSet<>(crossColoReplicaThread.getRemoteStandbyReplicasTimedOutOnNoProgress(remoteReplicaInfosForRemoteDC)), allStandbyReplicas);
    crossColoReplicaThread.replicate();
    // token index for all standby replicas will move forward after fetching missing keys themselves
    for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfosForRemoteDC) {
        if (!leaderReplicasOnLocalAndRemoteNodes.contains(remoteReplicaInfo.getReplicaId())) {
            assertEquals("mismatch in remote token set for standby cross colo replicas", ((MockFindToken) remoteReplicaInfo.getToken()).getIndex(), batchSize - 1);
            assertFalse("missing store messages should be empty for standby replicas now", crossColoReplicaThread.containsMissingKeysFromPreviousMetadataExchange(remoteReplicaInfo));
        }
    }
    // verify replication metrics to track number of cross colo get requests for standby replicas. If all replicas are
    // leaders, we should have 0 cross colo get requests.
    String remoteDataCenter = remoteReplicaInfosForRemoteDC.get(0).getReplicaId().getDataNodeId().getDatacenterName();
    assertEquals("mismatch in number of cross colo get requests tracked for standby replicas", crossColoReplicaThread.getReplicationMetrics().interColoReplicationGetRequestCountForStandbyReplicas.get(remoteDataCenter).getCount(), leaderReplicasOnLocalAndRemoteNodes.size() != remoteReplicaInfosForRemoteDC.size() ? 1 : 0);
    storageManager.shutdown();
}
Also used : ConnectionPool(com.github.ambry.network.ConnectionPool) CoreMatchers(org.hamcrest.CoreMatchers) Arrays(java.util.Arrays) StorageManager(com.github.ambry.store.StorageManager) ClusterMapChangeListener(com.github.ambry.clustermap.ClusterMapChangeListener) DataNodeId(com.github.ambry.clustermap.DataNodeId) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) MockHelixParticipant(com.github.ambry.clustermap.MockHelixParticipant) Map(java.util.Map) Parameterized(org.junit.runners.Parameterized) ReplicationConfig(com.github.ambry.config.ReplicationConfig) ReplicaState(com.github.ambry.clustermap.ReplicaState) MetricRegistry(com.codahale.metrics.MetricRegistry) Pair(com.github.ambry.utils.Pair) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ConnectionPool(com.github.ambry.network.ConnectionPool) Set(java.util.Set) IOException(java.io.IOException) Test(org.junit.Test) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) Collectors(java.util.stream.Collectors) Store(com.github.ambry.store.Store) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) ReplicaId(com.github.ambry.clustermap.ReplicaId) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) PartitionId(com.github.ambry.clustermap.PartitionId) HashMap(java.util.HashMap) StorageManager(com.github.ambry.store.StorageManager) Store(com.github.ambry.store.Store) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) DataNodeId(com.github.ambry.clustermap.DataNodeId) Test(org.junit.Test)

Example 48 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class ReplicationTestHelper method createStorageManagerAndReplicationManager.

/**
 * Helper method to create storage manager and replication manager
 * @param clusterMap {@link ClusterMap} to use
 * @param clusterMapConfig {@link ClusterMapConfig} to use
 * @param clusterParticipant {@link com.github.ambry.clustermap.ClusterParticipant} for listener registration.
 * @return a pair of storage manager and replication manager
 * @throws Exception
 */
protected Pair<StorageManager, ReplicationManager> createStorageManagerAndReplicationManager(ClusterMap clusterMap, ClusterMapConfig clusterMapConfig, MockHelixParticipant clusterParticipant, ConnectionPool mockConnectionPool) throws Exception {
    StoreConfig storeConfig = new StoreConfig(verifiableProperties);
    DataNodeId dataNodeId = clusterMap.getDataNodeIds().get(0);
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, dataNodeId, null, clusterParticipant == null ? null : Collections.singletonList(clusterParticipant), new MockTime(), null, new InMemAccountService(false, false));
    storageManager.start();
    MockReplicationManager replicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, dataNodeId, storeKeyConverterFactory, clusterParticipant, mockConnectionPool, new MockFindTokenHelper(storeKeyFactory, replicationConfig), BlobIdTransformer.class.getName(), storeKeyFactory, time);
    return new Pair<>(storageManager, replicationManager);
}
Also used : DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) MetricRegistry(com.codahale.metrics.MetricRegistry) StorageManager(com.github.ambry.store.StorageManager) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) InMemAccountService(com.github.ambry.account.InMemAccountService) StoreConfig(com.github.ambry.config.StoreConfig) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockTime(com.github.ambry.utils.MockTime) Pair(com.github.ambry.utils.Pair)

Example 49 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class NonBlockingRouterTestBase method ensureStitchInAllServers.

/**
 * Ensure that Stitch requests for given blob id reaches to all the mock servees in the {@link MockServerLayout}.
 * @param blobId The blob id of which stitch request will be created.
 * @param serverLayout The mock server layout.
 * @param chunksToStitch The list of {@link ChunkInfo} to stitch.
 * @param singleBlobSize The size of each chunk
 * @throws IOException
 */
protected void ensureStitchInAllServers(String blobId, MockServerLayout serverLayout, List<ChunkInfo> chunksToStitch, int singleBlobSize) throws IOException {
    TreeMap<Integer, Pair<StoreKey, Long>> indexToChunkIdsAndChunkSizes = new TreeMap<>();
    int i = 0;
    for (ChunkInfo chunkInfo : chunksToStitch) {
        indexToChunkIdsAndChunkSizes.put(i, new Pair<>(new BlobId(chunkInfo.getBlobId(), mockClusterMap), chunkInfo.getChunkSizeInBytes()));
        i++;
    }
    ByteBuffer serializedContent;
    int totalSize = singleBlobSize * chunksToStitch.size();
    if (routerConfig.routerMetadataContentVersion == MessageFormatRecord.Metadata_Content_Version_V2) {
        serializedContent = MetadataContentSerDe.serializeMetadataContentV2(singleBlobSize, totalSize, indexToChunkIdsAndChunkSizes.values().stream().map(Pair::getFirst).collect(Collectors.toList()));
    } else {
        List<Pair<StoreKey, Long>> orderedChunkIdList = new ArrayList<>(indexToChunkIdsAndChunkSizes.values());
        serializedContent = MetadataContentSerDe.serializeMetadataContentV3(totalSize, orderedChunkIdList);
    }
    BlobId id = new BlobId(blobId, mockClusterMap);
    for (MockServer server : serverLayout.getMockServers()) {
        if (!server.getBlobs().containsKey(blobId)) {
            PutRequest putRequest = new PutRequest(NonBlockingRouter.correlationIdGenerator.incrementAndGet(), routerConfig.routerHostname, id, putBlobProperties, ByteBuffer.wrap(putUserMetadata), Unpooled.wrappedBuffer(serializedContent), serializedContent.remaining(), BlobType.MetadataBlob, null);
            server.send(putRequest).release();
            putRequest.release();
        }
    }
}
Also used : ArrayList(java.util.ArrayList) PutRequest(com.github.ambry.protocol.PutRequest) TreeMap(java.util.TreeMap) ByteBuffer(java.nio.ByteBuffer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BlobId(com.github.ambry.commons.BlobId) Pair(com.github.ambry.utils.Pair)

Example 50 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class IndexTest method findEntriesSinceToJournalBasedTest.

/**
 * Tests all cases of {@link PersistentIndex#findEntriesSince(FindToken, long)} that result in an journal based
 * {@link StoreFindToken} being returned.
 * 1. Uninited -> Journal
 * 2. Index -> Journal
 * 3. Journal -> Journal
 * 4. No movement.
 * @throws StoreException
 */
private void findEntriesSinceToJournalBasedTest() throws StoreException {
    IndexSegment segmentOfToken = state.index.getIndexSegments().lastEntry().getValue();
    StoreFindToken absoluteEndToken = new StoreFindToken(state.logOrder.lastKey(), state.sessionId, state.incarnationId, false, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
    absoluteEndToken.setBytesRead(state.index.getLogUsedCapacity());
    // ------------------
    // 1. Uninitialized -> Journal
    doFindEntriesSinceTest(new StoreFindToken(), Long.MAX_VALUE, state.allKeys.keySet(), absoluteEndToken);
    // ------------------
    // 2. Index -> Journal
    Offset firstIndexSegmentStartOffset = state.referenceIndex.firstKey();
    StoreKey firstStoreKey = state.referenceIndex.get(firstIndexSegmentStartOffset).firstKey();
    StoreFindToken startToken = new StoreFindToken(firstStoreKey, firstIndexSegmentStartOffset, state.sessionId, state.incarnationId, null, null, UNINITIALIZED_RESET_KEY_VERSION);
    Set<MockId> expectedKeys = new HashSet<>(state.allKeys.keySet());
    if (!state.deletedKeys.contains(firstStoreKey)) {
        // if firstStoreKey has not been deleted, it will not show up in findEntries since its PUT record is ignored
        expectedKeys.remove(firstStoreKey);
    }
    doFindEntriesSinceTest(startToken, Long.MAX_VALUE, expectedKeys, absoluteEndToken);
    // ------------------
    // 3. Journal -> Journal
    // a. Token no longer in journal
    startToken = new StoreFindToken(state.logOrder.firstKey(), state.sessionId, state.incarnationId, false, null, null, UNINITIALIZED_RESET_KEY_VERSION);
    doFindEntriesSinceTest(startToken, Long.MAX_VALUE, state.allKeys.keySet(), absoluteEndToken);
    // b. Token still in journal
    startToken = new StoreFindToken(state.index.journal.getFirstOffset(), state.sessionId, state.incarnationId, false, null, null, UNINITIALIZED_RESET_KEY_VERSION);
    expectedKeys = new HashSet<>();
    for (Map.Entry<Offset, Pair<MockId, CuratedLogIndexState.LogEntry>> entry : state.logOrder.tailMap(startToken.getOffset(), false).entrySet()) {
        expectedKeys.add(entry.getValue().getFirst());
    }
    doFindEntriesSinceTest(startToken, Long.MAX_VALUE, expectedKeys, absoluteEndToken);
    // c. Token still in journal with inclusiveness set to true
    startToken = new StoreFindToken(state.index.journal.getFirstOffset(), state.sessionId, state.incarnationId, true, null, null, UNINITIALIZED_RESET_KEY_VERSION);
    expectedKeys.add(state.logOrder.tailMap(startToken.getOffset(), true).firstEntry().getValue().getFirst());
    doFindEntriesSinceTest(startToken, Long.MAX_VALUE, expectedKeys, absoluteEndToken);
    // ------------------
    // 4. Journal no change
    doFindEntriesSinceTest(absoluteEndToken, Long.MAX_VALUE, Collections.emptySet(), absoluteEndToken);
}
Also used : StoreFindToken(com.github.ambry.store.StoreFindToken) CuratedLogIndexState(com.github.ambry.store.CuratedLogIndexState) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) TreeMap(java.util.TreeMap) HashSet(java.util.HashSet) Pair(com.github.ambry.utils.Pair)

Aggregations

Pair (com.github.ambry.utils.Pair)64 ArrayList (java.util.ArrayList)29 HashMap (java.util.HashMap)28 Map (java.util.Map)28 Test (org.junit.Test)20 IOException (java.io.IOException)15 MetricRegistry (com.codahale.metrics.MetricRegistry)14 List (java.util.List)14 ByteBuffer (java.nio.ByteBuffer)13 Collections (java.util.Collections)13 File (java.io.File)12 Assert (org.junit.Assert)12 VerifiableProperties (com.github.ambry.config.VerifiableProperties)11 Utils (com.github.ambry.utils.Utils)10 HashSet (java.util.HashSet)10 Properties (java.util.Properties)10 Container (com.github.ambry.account.Container)9 TestUtils (com.github.ambry.utils.TestUtils)9 Arrays (java.util.Arrays)9 Set (java.util.Set)9