Search in sources :

Example 6 with ResponseHandler

use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.

the class NonBlockingRouterTest method testResponseHandling.

/**
 * Response handling related tests for all operation managers.
 */
@Test
public void testResponseHandling() throws Exception {
    try {
        Properties props = getNonBlockingRouterProperties("DC1");
        VerifiableProperties verifiableProperties = new VerifiableProperties((props));
        setOperationParams();
        final List<ReplicaId> failedReplicaIds = new ArrayList<>();
        final AtomicInteger successfulResponseCount = new AtomicInteger(0);
        final AtomicBoolean invalidResponse = new AtomicBoolean(false);
        ResponseHandler mockResponseHandler = new ResponseHandler(mockClusterMap) {

            @Override
            public void onEvent(ReplicaId replicaId, Object e) {
                if (e instanceof ServerErrorCode) {
                    if (e == ServerErrorCode.No_Error) {
                        successfulResponseCount.incrementAndGet();
                    } else {
                        invalidResponse.set(true);
                    }
                } else {
                    failedReplicaIds.add(replicaId);
                }
            }
        };
        // Instantiate a router just to put a blob successfully.
        MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
        setRouter(props, mockServerLayout, new LoggingNotificationSystem());
        setOperationParams();
        // More extensive test for puts present elsewhere - these statements are here just to exercise the flow within the
        // NonBlockingRouter class, and to ensure that operations submitted to a router eventually completes.
        String blobIdStr = router.putBlob(putBlobProperties, putUserMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
        BlobId blobId = RouterUtils.getBlobIdFromString(blobIdStr, mockClusterMap);
        router.close();
        for (MockServer mockServer : mockServerLayout.getMockServers()) {
            mockServer.setServerErrorForAllRequests(ServerErrorCode.No_Error);
        }
        SocketNetworkClient networkClient = new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime).getNetworkClient();
        cryptoJobHandler = new CryptoJobHandler(CryptoJobHandlerTest.DEFAULT_THREAD_COUNT);
        KeyManagementService localKMS = new MockKeyManagementService(new KMSConfig(verifiableProperties), singleKeyForKMS);
        putManager = new PutManager(mockClusterMap, mockResponseHandler, new LoggingNotificationSystem(), new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap, null), new RouterCallback(networkClient, new ArrayList<>()), "0", localKMS, cryptoService, cryptoJobHandler, accountService, mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
        OperationHelper opHelper = new OperationHelper(OperationType.PUT);
        testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, null, successfulResponseCount, invalidResponse, -1);
        // Test that if a failed response comes before the operation is completed, failure detector is notified.
        testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, null, successfulResponseCount, invalidResponse, 0);
        // Test that if a failed response comes after the operation is completed, failure detector is notified.
        testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, null, successfulResponseCount, invalidResponse, PUT_REQUEST_PARALLELISM - 1);
        testNoResponseNoNotification(opHelper, failedReplicaIds, null, successfulResponseCount, invalidResponse);
        testResponseDeserializationError(opHelper, networkClient, null);
        opHelper = new OperationHelper(OperationType.GET);
        getManager = new GetManager(mockClusterMap, mockResponseHandler, new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap, null), new RouterCallback(networkClient, new ArrayList<BackgroundDeleteRequest>()), localKMS, cryptoService, cryptoJobHandler, mockTime);
        testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, -1);
        // Test that if a failed response comes before the operation is completed, failure detector is notified.
        testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, 0);
        // Test that if a failed response comes after the operation is completed, failure detector is notified.
        testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, GET_REQUEST_PARALLELISM - 1);
        testNoResponseNoNotification(opHelper, failedReplicaIds, blobId, successfulResponseCount, invalidResponse);
        testResponseDeserializationError(opHelper, networkClient, blobId);
        opHelper = new OperationHelper(OperationType.DELETE);
        deleteManager = new DeleteManager(mockClusterMap, mockResponseHandler, accountService, new LoggingNotificationSystem(), new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap, null), new RouterCallback(null, new ArrayList<BackgroundDeleteRequest>()), mockTime);
        testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, -1);
        // Test that if a failed response comes before the operation is completed, failure detector is notified.
        testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, 0);
        // Test that if a failed response comes after the operation is completed, failure detector is notified.
        testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount, invalidResponse, DELETE_REQUEST_PARALLELISM - 1);
        testNoResponseNoNotification(opHelper, failedReplicaIds, blobId, successfulResponseCount, invalidResponse);
        testResponseDeserializationError(opHelper, networkClient, blobId);
    } finally {
        if (putManager != null) {
            putManager.close();
        }
        if (getManager != null) {
            getManager.close();
        }
        if (deleteManager != null) {
            deleteManager.close();
        }
    }
}
Also used : KMSConfig(com.github.ambry.config.KMSConfig) ResponseHandler(com.github.ambry.commons.ResponseHandler) ArrayList(java.util.ArrayList) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) SocketNetworkClient(com.github.ambry.network.SocketNetworkClient) ReplicaId(com.github.ambry.clustermap.ReplicaId) ServerErrorCode(com.github.ambry.server.ServerErrorCode) RouterConfig(com.github.ambry.config.RouterConfig) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) JSONObject(org.json.JSONObject) BlobId(com.github.ambry.commons.BlobId) Test(org.junit.Test)

Example 7 with ResponseHandler

use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.

the class MockRouterCallback method testInstantiation.

/**
 * Test {@link GetBlobInfoOperation} instantiation and validate the get methods.
 */
@Test
public void testInstantiation() {
    BlobId blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(random), Utils.getRandomShort(random), mockClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), false, BlobId.BlobDataType.DATACHUNK);
    Callback<GetBlobResultInternal> getOperationCallback = (result, exception) -> {
    // no op.
    };
    // test a good case
    GetBlobInfoOperation op = new GetBlobInfoOperation(routerConfig, routerMetrics, mockClusterMap, responseHandler, blobId, options, getOperationCallback, routerCallback, kms, cryptoService, cryptoJobHandler, time, false, quotaChargeCallback);
    Assert.assertEquals("Callback must match", getOperationCallback, op.getCallback());
    Assert.assertEquals("Blob ids must match", blobId.getID(), op.getBlobIdStr());
    // test the case where the tracker type is bad
    Properties properties = getNonBlockingRouterProperties(true);
    properties.setProperty("router.get.operation.tracker.type", "NonExistentTracker");
    RouterConfig badConfig = new RouterConfig(new VerifiableProperties(properties));
    try {
        new GetBlobInfoOperation(badConfig, routerMetrics, mockClusterMap, responseHandler, blobId, options, getOperationCallback, routerCallback, kms, cryptoService, cryptoJobHandler, time, false, quotaChargeCallback);
        Assert.fail("Instantiation of GetBlobInfoOperation with an invalid tracker type must fail");
    } catch (IllegalArgumentException e) {
    // expected. Nothing to do.
    }
}
Also used : ResponseInfo(com.github.ambry.network.ResponseInfo) Arrays(java.util.Arrays) BlobProperties(com.github.ambry.messageformat.BlobProperties) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) Random(java.util.Random) ByteBuffer(java.nio.ByteBuffer) GetResponse(com.github.ambry.protocol.GetResponse) GeneralSecurityException(java.security.GeneralSecurityException) NetworkClientErrorCode(com.github.ambry.network.NetworkClientErrorCode) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) After(org.junit.After) NettyByteBufLeakHelper(com.github.ambry.utils.NettyByteBufLeakHelper) EnumSet(java.util.EnumSet) Parameterized(org.junit.runners.Parameterized) Utils(com.github.ambry.utils.Utils) CryptoServiceConfig(com.github.ambry.config.CryptoServiceConfig) BlobInfo(com.github.ambry.messageformat.BlobInfo) RouterConfig(com.github.ambry.config.RouterConfig) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) MockTime(com.github.ambry.utils.MockTime) QuotaTestUtils(com.github.ambry.quota.QuotaTestUtils) Callback(com.github.ambry.commons.Callback) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) InMemAccountService(com.github.ambry.account.InMemAccountService) BlobId(com.github.ambry.commons.BlobId) ResponseHandler(com.github.ambry.commons.ResponseHandler) ServerErrorCode(com.github.ambry.server.ServerErrorCode) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) PutManagerTest(com.github.ambry.router.PutManagerTest) SocketNetworkClient(com.github.ambry.network.SocketNetworkClient) Assume(org.junit.Assume) Before(org.junit.Before) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) QuotaChargeCallback(com.github.ambry.quota.QuotaChargeCallback) ClusterMap(com.github.ambry.clustermap.ClusterMap) IOException(java.io.IOException) Test(org.junit.Test) RequestInfo(com.github.ambry.network.RequestInfo) TimeUnit(java.util.concurrent.TimeUnit) KMSConfig(com.github.ambry.config.KMSConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) BlobId(com.github.ambry.commons.BlobId) RouterConfig(com.github.ambry.config.RouterConfig) PutManagerTest(com.github.ambry.router.PutManagerTest) Test(org.junit.Test)

Example 8 with ResponseHandler

use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.

the class ReplicationManager method assignReplicasToThreadPool.

/**
 * Partitions the list of data nodes between given set of replica threads for the given DC
 */
private void assignReplicasToThreadPool() {
    for (Map.Entry<String, DataNodeRemoteReplicaInfos> mapEntry : dataNodeRemoteReplicaInfosPerDC.entrySet()) {
        String datacenter = mapEntry.getKey();
        DataNodeRemoteReplicaInfos dataNodeRemoteReplicaInfos = mapEntry.getValue();
        Set<DataNodeId> dataNodesToReplicate = dataNodeRemoteReplicaInfos.getDataNodeIds();
        int dataNodesCount = dataNodesToReplicate.size();
        int replicaThreadCount = numberOfReplicaThreads.get(datacenter);
        if (replicaThreadCount <= 0) {
            logger.warn("Number of replica threads is smaller or equal to 0, not starting any replica threads for {} ", datacenter);
            continue;
        } else if (dataNodesCount == 0) {
            logger.warn("Number of nodes to replicate from is 0, not starting any replica threads for {} ", datacenter);
            continue;
        }
        // Divide the nodes between the replica threads if the number of replica threads is less than or equal to the
        // number of nodes. Otherwise, assign one thread to one node.
        logger.info("Number of replica threads to replicate from {}: {}", datacenter, replicaThreadCount);
        logger.info("Number of dataNodes to replicate :", dataNodesCount);
        if (dataNodesCount < replicaThreadCount) {
            logger.warn("Number of replica threads: {} is more than the number of nodes to replicate from: {}", replicaThreadCount, dataNodesCount);
            replicaThreadCount = dataNodesCount;
        }
        ResponseHandler responseHandler = new ResponseHandler(clusterMap);
        int numberOfNodesPerThread = dataNodesCount / replicaThreadCount;
        int remainingNodes = dataNodesCount % replicaThreadCount;
        Iterator<DataNodeId> dataNodeIdIterator = dataNodesToReplicate.iterator();
        for (int i = 0; i < replicaThreadCount; i++) {
            // create the list of nodes for the replica thread
            Map<DataNodeId, List<RemoteReplicaInfo>> replicasForThread = new HashMap<DataNodeId, List<RemoteReplicaInfo>>();
            int nodesAssignedToThread = 0;
            while (nodesAssignedToThread < numberOfNodesPerThread) {
                DataNodeId dataNodeToReplicate = dataNodeIdIterator.next();
                replicasForThread.put(dataNodeToReplicate, dataNodeRemoteReplicaInfos.getRemoteReplicaListForDataNode(dataNodeToReplicate));
                dataNodeIdIterator.remove();
                nodesAssignedToThread++;
            }
            if (remainingNodes > 0) {
                DataNodeId dataNodeToReplicate = dataNodeIdIterator.next();
                replicasForThread.put(dataNodeToReplicate, dataNodeRemoteReplicaInfos.getRemoteReplicaListForDataNode(dataNodeToReplicate));
                dataNodeIdIterator.remove();
                remainingNodes--;
            }
            boolean replicatingOverSsl = sslEnabledDatacenters.contains(datacenter);
            String threadIdentity = "Replica Thread-" + (dataNodeId.getDatacenterName().equals(datacenter) ? "Intra-" : "Inter") + i + datacenter;
            ReplicaThread replicaThread = new ReplicaThread(threadIdentity, replicasForThread, factory, clusterMap, correlationIdGenerator, dataNodeId, connectionPool, replicationConfig, replicationMetrics, notification, storeKeyFactory, replicationConfig.replicationValidateMessageStream, metricRegistry, replicatingOverSsl, datacenter, responseHandler);
            if (replicaThreadPools.containsKey(datacenter)) {
                replicaThreadPools.get(datacenter).add(replicaThread);
            } else {
                replicaThreadPools.put(datacenter, new ArrayList<>(Arrays.asList(replicaThread)));
            }
        }
    }
}
Also used : ResponseHandler(com.github.ambry.commons.ResponseHandler) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) ClusterMap(com.github.ambry.clustermap.ClusterMap) DataNodeId(com.github.ambry.clustermap.DataNodeId)

Example 9 with ResponseHandler

use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.

the class CloudBlobStoreTest method testPutWithTtl.

/**
 * Test PUT(with TTL) and TtlUpdate record replication.
 * Replication may happen after PUT and after TtlUpdate, or after TtlUpdate only.
 * PUT may already expired, expiration time < upload threshold or expiration time >= upload threshold.
 * @throws Exception
 */
@Test
public void testPutWithTtl() throws Exception {
    // Set up remote host
    MockClusterMap clusterMap = new MockClusterMap();
    MockHost remoteHost = getLocalAndRemoteHosts(clusterMap).getSecond();
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
    PartitionId partitionId = partitionIds.get(0);
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
    Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
    Map<DataNodeId, MockHost> hosts = new HashMap<>();
    hosts.put(remoteHost.dataNodeId, remoteHost);
    MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
    // Generate BlobIds for following PUT.
    short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
    List<BlobId> blobIdList = new ArrayList<>();
    for (int i = 0; i < 6; i++) {
        blobIdList.add(new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK));
    }
    // Set up VCR
    Properties props = new Properties();
    setBasicProperties(props);
    props.setProperty("clustermap.port", "12300");
    props.setProperty("vcr.ssl.port", "12345");
    ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
    CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
    CloudDataNode cloudDataNode = new CloudDataNode(cloudConfig, clusterMapConfig);
    LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIdList, clusterMap);
    CloudReplica cloudReplica = new CloudReplica(partitionId, cloudDataNode);
    CloudBlobStore cloudBlobStore = new CloudBlobStore(new VerifiableProperties(props), partitionId, latchBasedInMemoryCloudDestination, clusterMap, new VcrMetrics(new MetricRegistry()));
    cloudBlobStore.start();
    // Create ReplicaThread and add RemoteReplicaInfo to it.
    ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), Collections.emptyList());
    ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), cloudDataNode, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, cloudDataNode.getDatacenterName(), new ResponseHandler(clusterMap), new MockTime(), null, null, null);
    for (ReplicaId replica : partitionId.getReplicaIds()) {
        if (replica.getDataNodeId() == remoteHost.dataNodeId) {
            RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(replica, cloudReplica, cloudBlobStore, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(remoteHost.dataNodeId.getPort(), PortType.PLAINTEXT));
            replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
            break;
        }
    }
    long referenceTime = System.currentTimeMillis();
    // Case 1: Put already expired. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    BlobId id = blobIdList.get(0);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
    replicaThread.replicate();
    assertFalse("Blob should not exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 2: Put already expired. Replication happens after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(1);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 3: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(2);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
    replicaThread.replicate();
    if (isVcr) {
        assertFalse("Blob should not exist (vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    } else {
        assertTrue("Blob should exist (not vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    }
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 4: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(3);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 5: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud after Put and update ttl after TtlUpdate.
    id = blobIdList.get(4);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
    replicaThread.replicate();
    assertTrue(latchBasedInMemoryCloudDestination.doesBlobExist(id));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 6: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
    // Upload to Cloud after TtlUpdate.
    id = blobIdList.get(5);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Verify expiration time of all blobs.
    Map<String, CloudBlobMetadata> map = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIdList);
    for (BlobId blobId : blobIdList) {
        assertEquals("Blob ttl should be infinite now.", Utils.Infinite_Time, map.get(blobId.toString()).getExpirationTime());
    }
}
Also used : ReplicaThread(com.github.ambry.replication.ReplicaThread) Transformer(com.github.ambry.store.Transformer) BlobIdTransformer(com.github.ambry.replication.BlobIdTransformer) RemoteReplicaInfo(com.github.ambry.replication.RemoteReplicaInfo) ResponseHandler(com.github.ambry.commons.ResponseHandler) HashMap(java.util.HashMap) MockConnectionPool(com.github.ambry.replication.MockConnectionPool) ReplicationMetrics(com.github.ambry.replication.ReplicationMetrics) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) CloudConfig(com.github.ambry.config.CloudConfig) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) BlobIdTransformer(com.github.ambry.replication.BlobIdTransformer) MockTime(com.github.ambry.utils.MockTime) CloudReplica(com.github.ambry.clustermap.CloudReplica) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) MockFindTokenHelper(com.github.ambry.replication.MockFindTokenHelper) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockHost(com.github.ambry.replication.MockHost) MockFindToken(com.github.ambry.replication.MockFindToken) DataNodeId(com.github.ambry.clustermap.DataNodeId) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) ReplicationTest(com.github.ambry.replication.ReplicationTest) Test(org.junit.Test)

Example 10 with ResponseHandler

use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.

the class TestUtils method mockServerEventsAndVerify.

/**
 * The helper method sets up initial states for datanode, disk and replica. Then it triggers specified server event and
 * verifies the states of datanode, disk and replica are expected after event.
 * @param clusterManager the {@link ClusterMap} to use.
 * @param clusterMapConfig the {@link ClusterMapConfig} to use.
 * @param initialStates the initial states for datanode, disk and replica (default order).
 * @param serverErrorCode the {@link ServerErrorCode} received for mocking event.
 * @param expectedStates the expected states for datanode, disk and replica (default order).
 */
static void mockServerEventsAndVerify(ClusterMap clusterManager, ClusterMapConfig clusterMapConfig, ResourceState[] initialStates, ServerErrorCode serverErrorCode, ResourceState[] expectedStates) {
    ResponseHandler handler = new ResponseHandler(clusterManager);
    // choose a disk backed replica
    ReplicaId replica = clusterManager.getWritablePartitionIds(null).get(0).getReplicaIds().stream().filter(replicaId -> replicaId.getReplicaType() != ReplicaType.CLOUD_BACKED).findFirst().get();
    DataNodeId dataNode = replica.getDataNodeId();
    assertTrue(clusterManager.getReplicaIds(dataNode).contains(replica));
    DiskId disk = replica.getDiskId();
    // Verify that everything is up in the beginning.
    assertFalse(replica.isDown());
    assertEquals(HardwareState.AVAILABLE, dataNode.getState());
    assertEquals(HardwareState.AVAILABLE, disk.getState());
    // Mock initial states for node, disk and replica
    if (initialStates[0] == ResourceState.Node_Down) {
        for (int i = 0; i < clusterMapConfig.clusterMapFixedTimeoutDatanodeErrorThreshold; i++) {
            clusterManager.onReplicaEvent(replica, ReplicaEventType.Node_Timeout);
        }
    }
    if (initialStates[1] == ResourceState.Disk_Down) {
        for (int i = 0; i < clusterMapConfig.clusterMapFixedTimeoutDiskErrorThreshold; i++) {
            clusterManager.onReplicaEvent(replica, ReplicaEventType.Disk_Error);
        }
    }
    if (initialStates[2] == ResourceState.Replica_Down) {
        for (int i = 0; i < clusterMapConfig.clusterMapFixedTimeoutReplicaErrorThreshold; i++) {
            clusterManager.onReplicaEvent(replica, ReplicaEventType.Replica_Unavailable);
        }
    }
    // Make sure node, disk and replica match specified initial states
    if (dataNode.getState() == HardwareState.AVAILABLE && disk.getState() == HardwareState.AVAILABLE) {
        // Since replica.isDown() will check the state of disk, if we try to mock disk is down and replica is up, we should
        // skip this check for initial state. Only when node and disk are up, we check the initial state of replica.
        assertEquals(initialStates[2], replica.isDown() ? ResourceState.Replica_Down : ResourceState.Replica_Up);
    }
    if (dataNode.getState() == HardwareState.AVAILABLE) {
        assertEquals(initialStates[1], disk.getState() == HardwareState.UNAVAILABLE ? ResourceState.Disk_Down : ResourceState.Disk_Up);
    }
    assertEquals(initialStates[0], dataNode.getState() == HardwareState.UNAVAILABLE ? ResourceState.Node_Down : ResourceState.Node_Up);
    // Trigger server event
    handler.onEvent(replica, serverErrorCode);
    // Verify node, disk and replica match expected states after server event
    assertEquals(expectedStates[2], replica.isDown() ? ResourceState.Replica_Down : ResourceState.Replica_Up);
    assertEquals(expectedStates[1], disk.getState() == HardwareState.UNAVAILABLE ? ResourceState.Disk_Down : ResourceState.Disk_Up);
    assertEquals(expectedStates[0], dataNode.getState() == HardwareState.UNAVAILABLE ? ResourceState.Node_Down : ResourceState.Node_Up);
}
Also used : ResponseHandler(com.github.ambry.commons.ResponseHandler)

Aggregations

ResponseHandler (com.github.ambry.commons.ResponseHandler)13 ArrayList (java.util.ArrayList)8 VerifiableProperties (com.github.ambry.config.VerifiableProperties)7 HashMap (java.util.HashMap)7 Test (org.junit.Test)7 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)6 BlobId (com.github.ambry.commons.BlobId)6 BlobProperties (com.github.ambry.messageformat.BlobProperties)6 DataNodeId (com.github.ambry.clustermap.DataNodeId)5 LoggingNotificationSystem (com.github.ambry.commons.LoggingNotificationSystem)5 RouterConfig (com.github.ambry.config.RouterConfig)5 List (java.util.List)5 Properties (java.util.Properties)5 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)5 MetricRegistry (com.codahale.metrics.MetricRegistry)4 PartitionId (com.github.ambry.clustermap.PartitionId)4 ReplicaId (com.github.ambry.clustermap.ReplicaId)4 MockTime (com.github.ambry.utils.MockTime)4 InMemAccountService (com.github.ambry.account.InMemAccountService)3 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)3