Search in sources :

Example 11 with MockStoreKeyConverterFactory

use of com.github.ambry.store.MockStoreKeyConverterFactory in project ambry by linkedin.

the class ReplicationTestHelper method getStoreKeyConverter.

/**
 * @return {@link StoreKeyConverter} used in replication.
 */
protected StoreKeyConverter getStoreKeyConverter() {
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    return storeKeyConverterFactory.getStoreKeyConverter();
}
Also used : MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory)

Example 12 with MockStoreKeyConverterFactory

use of com.github.ambry.store.MockStoreKeyConverterFactory in project ambry by linkedin.

the class CloudBlobStoreTest method testPutWithTtl.

/**
 * Test PUT(with TTL) and TtlUpdate record replication.
 * Replication may happen after PUT and after TtlUpdate, or after TtlUpdate only.
 * PUT may already expired, expiration time < upload threshold or expiration time >= upload threshold.
 * @throws Exception
 */
@Test
public void testPutWithTtl() throws Exception {
    // Set up remote host
    MockClusterMap clusterMap = new MockClusterMap();
    MockHost remoteHost = getLocalAndRemoteHosts(clusterMap).getSecond();
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
    PartitionId partitionId = partitionIds.get(0);
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
    Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
    Map<DataNodeId, MockHost> hosts = new HashMap<>();
    hosts.put(remoteHost.dataNodeId, remoteHost);
    MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
    // Generate BlobIds for following PUT.
    short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
    List<BlobId> blobIdList = new ArrayList<>();
    for (int i = 0; i < 6; i++) {
        blobIdList.add(new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK));
    }
    // Set up VCR
    Properties props = new Properties();
    setBasicProperties(props);
    props.setProperty("clustermap.port", "12300");
    props.setProperty("vcr.ssl.port", "12345");
    ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
    CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
    CloudDataNode cloudDataNode = new CloudDataNode(cloudConfig, clusterMapConfig);
    LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIdList, clusterMap);
    CloudReplica cloudReplica = new CloudReplica(partitionId, cloudDataNode);
    CloudBlobStore cloudBlobStore = new CloudBlobStore(new VerifiableProperties(props), partitionId, latchBasedInMemoryCloudDestination, clusterMap, new VcrMetrics(new MetricRegistry()));
    cloudBlobStore.start();
    // Create ReplicaThread and add RemoteReplicaInfo to it.
    ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), Collections.emptyList());
    ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), cloudDataNode, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, cloudDataNode.getDatacenterName(), new ResponseHandler(clusterMap), new MockTime(), null, null, null);
    for (ReplicaId replica : partitionId.getReplicaIds()) {
        if (replica.getDataNodeId() == remoteHost.dataNodeId) {
            RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(replica, cloudReplica, cloudBlobStore, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(remoteHost.dataNodeId.getPort(), PortType.PLAINTEXT));
            replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
            break;
        }
    }
    long referenceTime = System.currentTimeMillis();
    // Case 1: Put already expired. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    BlobId id = blobIdList.get(0);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
    replicaThread.replicate();
    assertFalse("Blob should not exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 2: Put already expired. Replication happens after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(1);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 3: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(2);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
    replicaThread.replicate();
    if (isVcr) {
        assertFalse("Blob should not exist (vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    } else {
        assertTrue("Blob should exist (not vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    }
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 4: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(3);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 5: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud after Put and update ttl after TtlUpdate.
    id = blobIdList.get(4);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
    replicaThread.replicate();
    assertTrue(latchBasedInMemoryCloudDestination.doesBlobExist(id));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 6: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
    // Upload to Cloud after TtlUpdate.
    id = blobIdList.get(5);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Verify expiration time of all blobs.
    Map<String, CloudBlobMetadata> map = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIdList);
    for (BlobId blobId : blobIdList) {
        assertEquals("Blob ttl should be infinite now.", Utils.Infinite_Time, map.get(blobId.toString()).getExpirationTime());
    }
}
Also used : ReplicaThread(com.github.ambry.replication.ReplicaThread) Transformer(com.github.ambry.store.Transformer) BlobIdTransformer(com.github.ambry.replication.BlobIdTransformer) RemoteReplicaInfo(com.github.ambry.replication.RemoteReplicaInfo) ResponseHandler(com.github.ambry.commons.ResponseHandler) HashMap(java.util.HashMap) MockConnectionPool(com.github.ambry.replication.MockConnectionPool) ReplicationMetrics(com.github.ambry.replication.ReplicationMetrics) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) CloudConfig(com.github.ambry.config.CloudConfig) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) BlobIdTransformer(com.github.ambry.replication.BlobIdTransformer) MockTime(com.github.ambry.utils.MockTime) CloudReplica(com.github.ambry.clustermap.CloudReplica) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) MockFindTokenHelper(com.github.ambry.replication.MockFindTokenHelper) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockHost(com.github.ambry.replication.MockHost) MockFindToken(com.github.ambry.replication.MockFindToken) DataNodeId(com.github.ambry.clustermap.DataNodeId) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) ReplicationTest(com.github.ambry.replication.ReplicationTest) Test(org.junit.Test)

Example 13 with MockStoreKeyConverterFactory

use of com.github.ambry.store.MockStoreKeyConverterFactory in project ambry by linkedin.

the class ReplicationTest method replicaThreadTestConverter.

/**
 * Tests that replication between a local and remote server who have different
 * blob IDs for the same blobs (via StoreKeyConverter)
 * @throws Exception
 */
@Test
public void replicaThreadTestConverter() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
    MockHost localHost = localAndRemoteHosts.getFirst();
    MockHost remoteHost = localAndRemoteHosts.getSecond();
    MockHost expectedLocalHost = new MockHost(localHost.dataNodeId, clusterMap);
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
    Map<PartitionId, List<StoreKey>> idsToBeIgnoredByPartition = new HashMap<>();
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
    int batchSize = 4;
    Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
    Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = replicasAndThread.getFirst();
    ReplicaThread replicaThread = replicasAndThread.getSecond();
    /*
        STORE KEY CONVERTER MAPPING
        Key     Value
        B0      B0'
        B1      B1'
        B2      null

        BEFORE
        Local   Remote
        B0'     B0
                B1
                B2

        AFTER
        Local   Remote
        B0'     B0
        B1'     B1
                B2
        B0 is B0' for local,
        B1 is B1' for local,
        B2 is null for local,
        so it already has B0/B0'
        B1 is transferred to B1'
        and B2 is invalid for L
        so it does not count as missing
        Missing Keys: 1
    */
    Map<PartitionId, List<BlobId>> partitionIdToDeleteBlobId = new HashMap<>();
    Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
    Map<PartitionId, BlobId> expectedPartitionIdToDeleteBlobId = new HashMap<>();
    for (int i = 0; i < partitionIds.size(); i++) {
        PartitionId partitionId = partitionIds.get(i);
        List<BlobId> deleteBlobIds = new ArrayList<>();
        partitionIdToDeleteBlobId.put(partitionId, deleteBlobIds);
        BlobId b0 = generateRandomBlobId(partitionId);
        deleteBlobIds.add(b0);
        BlobId b0p = generateRandomBlobId(partitionId);
        expectedPartitionIdToDeleteBlobId.put(partitionId, b0p);
        BlobId b1 = generateRandomBlobId(partitionId);
        BlobId b1p = generateRandomBlobId(partitionId);
        BlobId b2 = generateRandomBlobId(partitionId);
        deleteBlobIds.add(b2);
        conversionMap.put(b0, b0p);
        conversionMap.put(b1, b1p);
        conversionMap.put(b2, null);
        // Convert current conversion map so that BlobIdTransformer can
        // create b1p in expectedLocalHost
        storeKeyConverter.setConversionMap(conversionMap);
        storeKeyConverter.convert(conversionMap.keySet());
        addPutMessagesToReplicasOfPartition(Arrays.asList(b0p), Arrays.asList(localHost));
        addPutMessagesToReplicasOfPartition(Arrays.asList(b0, b1, b2), Arrays.asList(remoteHost));
        addPutMessagesToReplicasOfPartition(Arrays.asList(b0p, b1), Arrays.asList(null, transformer), Arrays.asList(expectedLocalHost));
        // Check that expected local host contains the correct blob ids
        Set<BlobId> expectedLocalHostBlobIds = new HashSet<>();
        expectedLocalHostBlobIds.add(b0p);
        expectedLocalHostBlobIds.add(b1p);
        for (MessageInfo messageInfo : expectedLocalHost.infosByPartition.get(partitionId)) {
            assertTrue("Remove should never fail", expectedLocalHostBlobIds.remove(messageInfo.getStoreKey()));
        }
        assertTrue("expectedLocalHostBlobIds should now be empty", expectedLocalHostBlobIds.isEmpty());
    }
    storeKeyConverter.setConversionMap(conversionMap);
    int expectedIndex = assertMissingKeysAndFixMissingStoreKeys(0, 2, batchSize, 1, replicaThread, remoteHost, replicasToReplicate);
    // Check that there are no missing buffers between expectedLocalHost and LocalHost
    Map<PartitionId, List<ByteBuffer>> missingBuffers = expectedLocalHost.getMissingBuffers(localHost.buffersByPartition);
    assertTrue(missingBuffers.isEmpty());
    missingBuffers = localHost.getMissingBuffers(expectedLocalHost.buffersByPartition);
    assertTrue(missingBuffers.isEmpty());
    // delete blob
    for (int i = 0; i < partitionIds.size(); i++) {
        PartitionId partitionId = partitionIds.get(i);
        List<BlobId> deleteBlobIds = partitionIdToDeleteBlobId.get(partitionId);
        for (BlobId deleteBlobId : deleteBlobIds) {
            addDeleteMessagesToReplicasOfPartition(partitionId, deleteBlobId, Arrays.asList(remoteHost));
        }
        addDeleteMessagesToReplicasOfPartition(partitionId, expectedPartitionIdToDeleteBlobId.get(partitionId), Arrays.asList(expectedLocalHost));
    }
    expectedIndex = assertMissingKeysAndFixMissingStoreKeys(expectedIndex, 2, batchSize, 0, replicaThread, remoteHost, replicasToReplicate);
    // Check that there are no missing buffers between expectedLocalHost and LocalHost
    missingBuffers = expectedLocalHost.getMissingBuffers(localHost.buffersByPartition);
    assertTrue(missingBuffers.isEmpty());
    missingBuffers = localHost.getMissingBuffers(expectedLocalHost.buffersByPartition);
    assertTrue(missingBuffers.isEmpty());
    // 3 unconverted + 2 unconverted deleted expected missing buffers
    verifyNoMoreMissingKeysAndExpectedMissingBufferCount(remoteHost, localHost, replicaThread, replicasToReplicate, idsToBeIgnoredByPartition, storeKeyConverter, expectedIndex, expectedIndex, 5);
}
Also used : ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) Transformer(com.github.ambry.store.Transformer) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) List(java.util.List) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) StoreKey(com.github.ambry.store.StoreKey) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) MessageInfo(com.github.ambry.store.MessageInfo) Map(java.util.Map) HashMap(java.util.HashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 14 with MockStoreKeyConverterFactory

use of com.github.ambry.store.MockStoreKeyConverterFactory in project ambry by linkedin.

the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_MissingPuts.

/**
 * Tests when the local store missing put records with lifeVersion greater than 0
 */
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_MissingPuts() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
    MockHost localHost = localAndRemoteHosts.getFirst();
    MockHost remoteHost = localAndRemoteHosts.getSecond();
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
    Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
    storeKeyConverter.setConversionMap(conversionMap);
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
    Map<PartitionId, List<StoreKey>> idsToBeIgnoredByPartition = new HashMap<>();
    Map<PartitionId, List<StoreKey>> idsToBeTtlUpdatedByPartition = new HashMap<>();
    short lifeVersion = 1;
    for (int i = 0; i < partitionIds.size(); i++) {
        List<StoreKey> toBeIgnored = new ArrayList<>();
        List<StoreKey> toBeTtlUpdated = new ArrayList<>();
        PartitionId partitionId = partitionIds.get(i);
        // Adding 1 put to remoteHost at lifeVersion 0
        List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 1);
        // Adding 1 put to remoteHost at lifeVersion 1
        ids.addAll(addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), lifeVersion, 1));
        // Adding one put to remoteHost at lifeVersion 1, which would be ttl updated later at lifeVersion 1
        StoreKey toTtlUpdateId = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), lifeVersion, 1).get(0);
        ids.add(toTtlUpdateId);
        addTtlUpdateMessagesToReplicasOfPartition(partitionId, toTtlUpdateId, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, lifeVersion);
        toBeTtlUpdated.add(toTtlUpdateId);
        // Adding one put to remoteHost at lifeVersion 0, which would be ttl updated later at lifeVersion 1
        toTtlUpdateId = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 1).get(0);
        ids.add(toTtlUpdateId);
        addTtlUpdateMessagesToReplicasOfPartition(partitionId, toTtlUpdateId, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, lifeVersion);
        toBeTtlUpdated.add(toTtlUpdateId);
        // Adding one put to remoteHost, which would be deleted later
        StoreKey toDeleteId = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), lifeVersion, 1).get(0);
        ids.add(toDeleteId);
        addDeleteMessagesToReplicasOfPartition(partitionId, toDeleteId, Collections.singletonList(remoteHost), lifeVersion, EXPIRY_TIME_MS);
        toBeIgnored.add(toDeleteId);
        // Adding one put to remoteHost, which would be ttl updated and deleted later
        StoreKey toDeleteAndTtlUpdateId = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), lifeVersion, 1).get(0);
        ids.add(toDeleteAndTtlUpdateId);
        addTtlUpdateMessagesToReplicasOfPartition(partitionId, toDeleteAndTtlUpdateId, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, lifeVersion);
        toBeTtlUpdated.add(toDeleteAndTtlUpdateId);
        addDeleteMessagesToReplicasOfPartition(partitionId, toDeleteAndTtlUpdateId, Collections.singletonList(remoteHost), lifeVersion, UPDATED_EXPIRY_TIME_MS);
        toBeIgnored.add(toDeleteAndTtlUpdateId);
        // Adding one put to remoteHost at lifeVersion 0, delete it and then add undelete at lifeVersion 1
        StoreKey deleteAndUndeleteId = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 1).get(0);
        ids.add(deleteAndUndeleteId);
        addDeleteMessagesToReplicasOfPartition(partitionId, deleteAndUndeleteId, Collections.singletonList(remoteHost), (short) 0, EXPIRY_TIME_MS);
        addUndeleteMessagesToReplicasOfPartition(partitionId, deleteAndUndeleteId, Collections.singletonList(remoteHost), lifeVersion);
        idsToBeIgnoredByPartition.put(partitionId, toBeIgnored);
        idsToBeTtlUpdatedByPartition.put(partitionId, toBeTtlUpdated);
        // Adding one put to both remote and local host.
        ids.addAll(addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(localHost, remoteHost), lifeVersion, 1));
    }
    int batchSize = 100;
    Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
    List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
    ReplicaThread replicaThread = replicasAndThread.getSecond();
    int missingKeyCount = 5;
    List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
    assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
    for (int i = 0; i < response.size(); i++) {
        assertEquals(missingKeyCount, response.get(i).missingStoreMessages.size());
        remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
    }
    replicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos, response, false);
    for (int i = 0; i < response.size(); i++) {
        assertEquals("Token should have been set correctly in fixMissingStoreKeys()", response.get(i).remoteToken, remoteReplicaInfos.get(i).getToken());
    }
    // Don't compare buffers here, PutBuffer might be different since we might change the lifeVersion.
    for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
        assertEquals("MessageInfo number mismatch", 8, localInfoEntry.getValue().size());
    }
    checkBlobMessagesAreEqualInLocalAndRemoteHosts(localHost, remoteHost, idsToBeIgnoredByPartition, idsToBeTtlUpdatedByPartition);
}
Also used : ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) Transformer(com.github.ambry.store.Transformer) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) List(java.util.List) ArrayList(java.util.ArrayList) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) StoreKey(com.github.ambry.store.StoreKey) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) Map(java.util.Map) HashMap(java.util.HashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 15 with MockStoreKeyConverterFactory

use of com.github.ambry.store.MockStoreKeyConverterFactory in project ambry by linkedin.

the class ReplicationTest method replicationAllPauseTest.

/**
 * Tests pausing all partitions and makes sure that the replica thread pauses. Also tests that it resumes when one
 * eligible partition is re-enabled and that replication completes successfully.
 * @throws Exception
 */
@Test
public void replicationAllPauseTest() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
    MockHost localHost = localAndRemoteHosts.getFirst();
    MockHost remoteHost = localAndRemoteHosts.getSecond();
    List<PartitionId> partitionIds = clusterMap.getAllPartitionIds(null);
    for (PartitionId partitionId : partitionIds) {
        // add 10 messages into each partition and place it on remote host only
        addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 10);
    }
    StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
    MockStoreKeyConverterFactory mockStoreKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    mockStoreKeyConverterFactory.setReturnInputIfAbsent(true);
    mockStoreKeyConverterFactory.setConversionMap(new HashMap<>());
    int batchSize = 4;
    StoreKeyConverter storeKeyConverter = mockStoreKeyConverterFactory.getStoreKeyConverter();
    Transformer transformer = new ValidatingTransformer(storeKeyFactory, storeKeyConverter);
    CountDownLatch readyToPause = new CountDownLatch(1);
    CountDownLatch readyToProceed = new CountDownLatch(1);
    AtomicReference<CountDownLatch> reachedLimitLatch = new AtomicReference<>(new CountDownLatch(1));
    AtomicReference<Exception> exception = new AtomicReference<>();
    Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, (store, messageInfos) -> {
        try {
            readyToPause.countDown();
            readyToProceed.await();
            if (store.messageInfos.size() == remoteHost.infosByPartition.get(store.id).size()) {
                reachedLimitLatch.get().countDown();
            }
        } catch (Exception e) {
            exception.set(e);
        }
    }, null);
    ReplicaThread replicaThread = replicasAndThread.getSecond();
    Thread thread = Utils.newThread(replicaThread, false);
    thread.start();
    assertEquals("There should be no disabled partitions", 0, replicaThread.getReplicationDisabledPartitions().size());
    // wait to pause replication
    readyToPause.await(10, TimeUnit.SECONDS);
    replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(null), false);
    Set<PartitionId> expectedPaused = new HashSet<>(clusterMap.getAllPartitionIds(null));
    assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
    // signal the replica thread to move forward
    readyToProceed.countDown();
    // wait for the thread to go into waiting state
    assertTrue("Replica thread did not go into waiting state", TestUtils.waitUntilExpectedState(thread, Thread.State.WAITING, 10000));
    // unpause one partition
    replicaThread.controlReplicationForPartitions(Collections.singletonList(partitionIds.get(0)), true);
    expectedPaused.remove(partitionIds.get(0));
    assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
    // wait for it to catch up
    reachedLimitLatch.get().await(10, TimeUnit.SECONDS);
    // reset limit
    reachedLimitLatch.set(new CountDownLatch(partitionIds.size() - 1));
    // unpause all partitions
    replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(null), true);
    assertEquals("There should be no disabled partitions", 0, replicaThread.getReplicationDisabledPartitions().size());
    // wait until all catch up
    reachedLimitLatch.get().await(10, TimeUnit.SECONDS);
    // shutdown
    replicaThread.shutdown();
    if (exception.get() != null) {
        throw exception.get();
    }
    Map<PartitionId, List<MessageInfo>> missingInfos = remoteHost.getMissingInfos(localHost.infosByPartition);
    for (Map.Entry<PartitionId, List<MessageInfo>> entry : missingInfos.entrySet()) {
        assertEquals("No infos should be missing", 0, entry.getValue().size());
    }
    Map<PartitionId, List<ByteBuffer>> missingBuffers = remoteHost.getMissingBuffers(localHost.buffersByPartition);
    for (Map.Entry<PartitionId, List<ByteBuffer>> entry : missingBuffers.entrySet()) {
        assertEquals("No buffers should be missing", 0, entry.getValue().size());
    }
}
Also used : ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) Transformer(com.github.ambry.store.Transformer) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) List(java.util.List) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CountDownLatch(java.util.concurrent.CountDownLatch) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) StoreKeyConverter(com.github.ambry.store.StoreKeyConverter) Map(java.util.Map) HashMap(java.util.HashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Aggregations

MockStoreKeyConverterFactory (com.github.ambry.store.MockStoreKeyConverterFactory)21 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)19 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)18 HashMap (java.util.HashMap)18 Test (org.junit.Test)18 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)17 PartitionId (com.github.ambry.clustermap.PartitionId)17 Transformer (com.github.ambry.store.Transformer)17 ValidatingTransformer (com.github.ambry.messageformat.ValidatingTransformer)16 ClusterMap (com.github.ambry.clustermap.ClusterMap)15 BlobIdFactory (com.github.ambry.commons.BlobIdFactory)15 ArrayList (java.util.ArrayList)15 Map (java.util.Map)15 List (java.util.List)14 DataNodeId (com.github.ambry.clustermap.DataNodeId)12 MessageInfo (com.github.ambry.store.MessageInfo)11 StoreKey (com.github.ambry.store.StoreKey)11 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)10 BlobId (com.github.ambry.commons.BlobId)8 MetricRegistry (com.codahale.metrics.MetricRegistry)7