Search in sources :

Example 1 with ResponseHandler

use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.

the class ReplicationEngine method createThreadPool.

/**
 * Create thread pool for a datacenter.
 * @param datacenter The datacenter String.
 * @param numberOfThreads Number of threads to create for the thread pool.
 * @param startThread If thread needs to be started when create.
 */
private List<ReplicaThread> createThreadPool(String datacenter, int numberOfThreads, boolean startThread) {
    nextReplicaThreadIndexByDc.put(datacenter, new AtomicInteger(0));
    List<ReplicaThread> replicaThreads = new ArrayList<>();
    logger.info("Number of replica threads to replicate from {}: {}", datacenter, numberOfThreads);
    ResponseHandler responseHandler = new ResponseHandler(clusterMap);
    for (int i = 0; i < numberOfThreads; i++) {
        boolean replicatingOverSsl = sslEnabledDatacenters.contains(datacenter);
        String threadIdentity = getReplicaThreadName(datacenter, i);
        try {
            StoreKeyConverter threadSpecificKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
            Transformer threadSpecificTransformer = Utils.getObj(transformerClassName, storeKeyFactory, threadSpecificKeyConverter);
            ReplicaThread replicaThread = new ReplicaThread(threadIdentity, tokenHelper, clusterMap, correlationIdGenerator, dataNodeId, connectionPool, replicationConfig, replicationMetrics, notification, threadSpecificKeyConverter, threadSpecificTransformer, metricRegistry, replicatingOverSsl, datacenter, responseHandler, time, replicaSyncUpManager, skipPredicate, leaderBasedReplicationAdmin);
            replicaThreads.add(replicaThread);
            if (startThread) {
                Thread thread = Utils.newThread(replicaThread.getName(), replicaThread, false);
                thread.start();
                logger.info("Started replica thread {}", thread.getName());
            }
        } catch (Exception e) {
            throw new RuntimeException("Encountered exception instantiating ReplicaThread", e);
        }
    }
    replicationMetrics.trackLiveThreadsCount(replicaThreads, datacenter);
    replicationMetrics.populateSingleColoMetrics(datacenter);
    return replicaThreads;
}
Also used : Transformer(com.github.ambry.store.Transformer) ResponseHandler(com.github.ambry.commons.ResponseHandler) ArrayList(java.util.ArrayList) StoreException(com.github.ambry.store.StoreException) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StoreKeyConverter(com.github.ambry.store.StoreKeyConverter)

Example 2 with ResponseHandler

use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.

the class ReplicationTest method blockDeprecatedContainerReplicationTest.

/**
 * Tests if deprecated containers have been blocked during replication.
 */
@Test
public void blockDeprecatedContainerReplicationTest() throws Exception {
    Properties properties = new Properties();
    properties.setProperty("replication.container.deletion.enabled", "true");
    replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
    MockClusterMap clusterMap = new MockClusterMap();
    Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
    MockHost localHost = localAndRemoteHosts.getFirst();
    MockHost remoteHost = localAndRemoteHosts.getSecond();
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
    Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
    storeKeyConverter.setConversionMap(conversionMap);
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
    for (int i = 0; i < partitionIds.size(); i++) {
        PartitionId partitionId = partitionIds.get(i);
        BlobId b0 = generateRandomBlobId(partitionId);
        conversionMap.put(b0, b0);
        BlobId b1 = generateRandomBlobId(partitionId);
        conversionMap.put(b1, b1);
        // add 2 messages to both hosts.
        storeKeyConverter.setConversionMap(conversionMap);
        storeKeyConverter.convert(conversionMap.keySet());
        // addPutMessagesToReplicasOfPartition(Arrays.asList(b0), Arrays.asList(localHost, remoteHost));
        // add 3 messages to the remote host only
        addPutMessagesToReplicasOfPartition(Arrays.asList(b0, b1), Collections.singletonList(remoteHost));
    }
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
    int batchSize = 4;
    ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), clusterMap.getReplicaIds(localHost.dataNodeId));
    replicationMetrics.populateSingleColoMetrics(remoteHost.dataNodeId.getDatacenterName());
    List<RemoteReplicaInfo> remoteReplicaInfoList = localHost.getRemoteReplicaInfos(remoteHost, null);
    Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = Collections.singletonMap(remoteHost.dataNodeId, remoteReplicaInfoList);
    storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
    Map<DataNodeId, MockHost> hosts = new HashMap<>();
    hosts.put(remoteHost.dataNodeId, remoteHost);
    MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, batchSize);
    Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
    ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), localHost.dataNodeId, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, localHost.dataNodeId.getDatacenterName(), new ResponseHandler(clusterMap), time, null, skipPredicate);
    for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfoList) {
        replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
    }
    List<RemoteReplicaInfo> remoteReplicaInfos = replicasToReplicate.get(remoteHost.dataNodeId);
    DataNodeId remoteNode = remoteReplicaInfos.get(0).getReplicaId().getDataNodeId();
    ReplicaMetadataResponse response = replicaThread.getReplicaMetadataResponse(remoteReplicaInfos, new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteNode);
    // case1 DELETE_IN_PROGRESS container with retention time qualified.
    for (int i = 0; i < 2; i++) {
        RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
        ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
        new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
        for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
            short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
            short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
            Container container = Mockito.mock(Container.class);
            Account account = Mockito.mock(Account.class);
            Mockito.when(account.getContainerById(containerId)).thenReturn(container);
            Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
            Mockito.when(container.getDeleteTriggerTime()).thenReturn(System.currentTimeMillis() - TimeUnit.DAYS.toMillis(replicationConfig.replicationContainerDeletionRetentionDays + 1));
            Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.DELETE_IN_PROGRESS);
        }
        Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
        assertEquals("All DELETE_IN_PROGRESS blobs qualified with retention time should be skipped during replication", 0, remoteMissingStoreKeys.size());
        Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
        replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
    }
    // case2 DELETE_IN_PROGRESS container with retention time not qualified.
    for (int i = 2; i < 4; i++) {
        RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
        ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
        new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
        for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
            short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
            short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
            Container container = Mockito.mock(Container.class);
            Account account = Mockito.mock(Account.class);
            Mockito.when(account.getContainerById(containerId)).thenReturn(container);
            Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
            Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.DELETE_IN_PROGRESS);
            Mockito.when(container.getDeleteTriggerTime()).thenReturn(System.currentTimeMillis());
        }
        Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
        assertEquals("All DELETE_IN_PROGRESS blobs not qualified with retention time should not be skipped during replication", 2, remoteMissingStoreKeys.size());
        Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
        replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
    }
    // case3 INACTIVE container
    for (int i = 4; i < 6; i++) {
        RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
        ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
        new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
        for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
            short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
            short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
            Container container = Mockito.mock(Container.class);
            Account account = Mockito.mock(Account.class);
            Mockito.when(account.getContainerById(containerId)).thenReturn(container);
            Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
            Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.INACTIVE);
        }
        Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
        assertEquals("All INACTIVE blobs should be skipped during replication", 0, remoteMissingStoreKeys.size());
        Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
        replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
    }
    // case 4 ACTIVE Container
    for (int i = 6; i < 8; i++) {
        RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
        ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
        new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
        for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
            short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
            short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
            Container container = Mockito.mock(Container.class);
            Account account = Mockito.mock(Account.class);
            Mockito.when(account.getContainerById(containerId)).thenReturn(container);
            Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
            Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.ACTIVE);
        }
        Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
        assertEquals("All non-deprecated blobs should not be skipped during replication", 2, remoteMissingStoreKeys.size());
        Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
        replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
    }
}
Also used : Account(com.github.ambry.account.Account) ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) Transformer(com.github.ambry.store.Transformer) ResponseHandler(com.github.ambry.commons.ResponseHandler) HashMap(java.util.HashMap) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) Container(com.github.ambry.account.Container) List(java.util.List) ArrayList(java.util.ArrayList) ReplicaMetadataResponse(com.github.ambry.protocol.ReplicaMetadataResponse) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) ReplicaMetadataResponseInfo(com.github.ambry.protocol.ReplicaMetadataResponseInfo) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) StoreKey(com.github.ambry.store.StoreKey) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) MessageInfo(com.github.ambry.store.MessageInfo) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BlobId(com.github.ambry.commons.BlobId) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 3 with ResponseHandler

use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.

the class ReplicationTest method remoteReplicaInfoAddRemoveTest.

/**
 * Tests add/remove replicaInfo to {@link ReplicaThread}
 * @throws Exception
 */
@Test
public void remoteReplicaInfoAddRemoveTest() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
    MockHost localHost = localAndRemoteHosts.getFirst();
    MockHost remoteHost = localAndRemoteHosts.getSecond();
    StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
    MockStoreKeyConverterFactory mockStoreKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    mockStoreKeyConverterFactory.setReturnInputIfAbsent(true);
    mockStoreKeyConverterFactory.setConversionMap(new HashMap<>());
    StoreKeyConverter storeKeyConverter = mockStoreKeyConverterFactory.getStoreKeyConverter();
    Transformer transformer = new ValidatingTransformer(storeKeyFactory, storeKeyConverter);
    ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), clusterMap.getReplicaIds(localHost.dataNodeId));
    replicationMetrics.populateSingleColoMetrics(remoteHost.dataNodeId.getDatacenterName());
    List<RemoteReplicaInfo> remoteReplicaInfoList = localHost.getRemoteReplicaInfos(remoteHost, null);
    Map<DataNodeId, MockHost> hosts = new HashMap<>();
    hosts.put(remoteHost.dataNodeId, remoteHost);
    MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
    ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), localHost.dataNodeId, connectionPool, replicationConfig, replicationMetrics, null, mockStoreKeyConverterFactory.getStoreKeyConverter(), transformer, clusterMap.getMetricRegistry(), false, localHost.dataNodeId.getDatacenterName(), new ResponseHandler(clusterMap), time, null, null, null);
    for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfoList) {
        replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
    }
    List<RemoteReplicaInfo> actualRemoteReplicaInfoList = replicaThread.getRemoteReplicaInfos().get(remoteHost.dataNodeId);
    Comparator<RemoteReplicaInfo> remoteReplicaInfoComparator = Comparator.comparing(info -> info.getReplicaId().getPartitionId().toPathString());
    Collections.sort(remoteReplicaInfoList, remoteReplicaInfoComparator);
    Collections.sort(actualRemoteReplicaInfoList, remoteReplicaInfoComparator);
    assertEquals("getRemoteReplicaInfos not correct", remoteReplicaInfoList, actualRemoteReplicaInfoList);
    // Test remove remoteReplicaInfo.
    replicaThread.removeRemoteReplicaInfo(remoteReplicaInfoList.get(remoteReplicaInfoList.size() - 1));
    actualRemoteReplicaInfoList = replicaThread.getRemoteReplicaInfos().get(remoteHost.dataNodeId);
    Collections.sort(actualRemoteReplicaInfoList, remoteReplicaInfoComparator);
    remoteReplicaInfoList.remove(remoteReplicaInfoList.size() - 1);
    assertEquals("getRemoteReplicaInfos not correct", remoteReplicaInfoList, actualRemoteReplicaInfoList);
}
Also used : MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) Transformer(com.github.ambry.store.Transformer) ResponseHandler(com.github.ambry.commons.ResponseHandler) HashMap(java.util.HashMap) MetricRegistry(com.codahale.metrics.MetricRegistry) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) StoreKeyConverter(com.github.ambry.store.StoreKeyConverter) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 4 with ResponseHandler

use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.

the class ReplicationTestHelper method getRemoteReplicasAndReplicaThread.

/**
 * Creates and gets the remote replicas that the local host will deal with and the {@link ReplicaThread} to perform
 * replication with.
 * @param batchSize the number of messages to be returned in each iteration of replication
 * @param clusterMap the {@link ClusterMap} to use
 * @param localHost the local {@link MockHost} (the one running the replica thread)
 * @param remoteHost the remote {@link MockHost} (the target of replication)
 * @param storeKeyConverter the {@link StoreKeyConverter} to be used in {@link ReplicaThread}
 * @param transformer the {@link Transformer} to be used in {@link ReplicaThread}
 * @param listener the {@link StoreEventListener} to use.
 * @param replicaSyncUpManager the {@link ReplicaSyncUpManager} to help create replica thread
 * @return a pair whose first element is the set of remote replicas and the second element is the {@link ReplicaThread}
 */
protected Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> getRemoteReplicasAndReplicaThread(int batchSize, ClusterMap clusterMap, MockHost localHost, MockHost remoteHost, StoreKeyConverter storeKeyConverter, Transformer transformer, StoreEventListener listener, ReplicaSyncUpManager replicaSyncUpManager) throws ReflectiveOperationException {
    ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), clusterMap.getReplicaIds(localHost.dataNodeId));
    replicationMetrics.populateSingleColoMetrics(remoteHost.dataNodeId.getDatacenterName());
    List<RemoteReplicaInfo> remoteReplicaInfoList = localHost.getRemoteReplicaInfos(remoteHost, listener);
    Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = Collections.singletonMap(remoteHost.dataNodeId, remoteReplicaInfoList);
    StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
    Map<DataNodeId, MockHost> hosts = new HashMap<>();
    hosts.put(remoteHost.dataNodeId, remoteHost);
    MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, batchSize);
    ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), localHost.dataNodeId, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, localHost.dataNodeId.getDatacenterName(), new ResponseHandler(clusterMap), time, replicaSyncUpManager, null, null);
    for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfoList) {
        replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
    }
    for (PartitionId partitionId : clusterMap.getAllPartitionIds(null)) {
        replicationMetrics.addLagMetricForPartition(partitionId, true);
    }
    return new Pair<>(replicasToReplicate, replicaThread);
}
Also used : ResponseHandler(com.github.ambry.commons.ResponseHandler) HashMap(java.util.HashMap) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) List(java.util.List) ArrayList(java.util.ArrayList) DataNodeId(com.github.ambry.clustermap.DataNodeId) Pair(com.github.ambry.utils.Pair)

Example 5 with ResponseHandler

use of com.github.ambry.commons.ResponseHandler in project ambry by linkedin.

the class ChunkFillTest method fillChunksAndAssertSuccess.

/**
 * Create a {@link PutOperation} and pass in a channel with the blobSize set by the caller; and test the chunk
 * filling flow for puts.
 * Note that this test is for the chunk filling flow, not for the ChunkFiller thread (which never gets exercised,
 * as we do not even instantiate the {@link PutManager})
 */
private void fillChunksAndAssertSuccess() throws Exception {
    VerifiableProperties vProps = getNonBlockingRouterProperties();
    MockClusterMap mockClusterMap = new MockClusterMap();
    RouterConfig routerConfig = new RouterConfig(vProps);
    routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig);
    ResponseHandler responseHandler = new ResponseHandler(mockClusterMap);
    short accountId = Utils.getRandomShort(random);
    short containerId = Utils.getRandomShort(random);
    BlobProperties putBlobProperties = new BlobProperties(blobSize, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, accountId, containerId, testEncryption, null, null, null);
    Random random = new Random();
    byte[] putUserMetadata = new byte[10];
    random.nextBytes(putUserMetadata);
    putContent = new byte[blobSize];
    random.nextBytes(putContent);
    final ReadableStreamChannel putChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(putContent));
    FutureResult<String> futureResult = new FutureResult<String>();
    MockTime time = new MockTime();
    MockNetworkClientFactory networkClientFactory = new MockNetworkClientFactory(vProps, null, 0, 0, 0, null, time);
    if (testEncryption) {
        kms = new MockKeyManagementService(new KMSConfig(vProps), TestUtils.getRandomKey(SingleKeyManagementServiceTest.DEFAULT_KEY_SIZE_CHARS));
        cryptoService = new MockCryptoService(new CryptoServiceConfig(vProps));
        cryptoJobHandler = new CryptoJobHandler(CryptoJobHandlerTest.DEFAULT_THREAD_COUNT);
    }
    MockRouterCallback routerCallback = new MockRouterCallback(networkClientFactory.getNetworkClient(), Collections.EMPTY_LIST);
    PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), putUserMetadata, putChannel, PutBlobOptions.DEFAULT, futureResult, null, routerCallback, null, kms, cryptoService, cryptoJobHandler, time, putBlobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
    op.startOperation();
    numChunks = RouterUtils.getNumChunksForBlobAndChunkSize(blobSize, chunkSize);
    compositeBuffers = new ByteBuf[numChunks];
    compositeEncryptionKeys = new ByteBuffer[numChunks];
    compositeBlobIds = new BlobId[numChunks];
    final AtomicReference<Exception> operationException = new AtomicReference<Exception>(null);
    int chunksLeftToBeFilled = numChunks;
    do {
        if (testEncryption) {
            int chunksPerBatch = Math.min(routerConfig.routerMaxInMemPutChunks, chunksLeftToBeFilled);
            CountDownLatch onPollLatch = new CountDownLatch(chunksPerBatch);
            routerCallback.setOnPollLatch(onPollLatch);
            op.fillChunks();
            Assert.assertTrue("Latch should have been zeroed out", onPollLatch.await(1000, TimeUnit.MILLISECONDS));
            chunksLeftToBeFilled -= chunksPerBatch;
        } else {
            op.fillChunks();
        }
        // since the channel is ByteBuffer based.
        for (PutOperation.PutChunk putChunk : op.putChunks) {
            if (putChunk.isFree()) {
                continue;
            }
            Assert.assertEquals("Chunk should be ready.", PutOperation.ChunkState.Ready, putChunk.getState());
            ByteBuf buf = putChunk.buf.retainedDuplicate();
            totalSizeWritten += buf.readableBytes();
            compositeBuffers[putChunk.getChunkIndex()] = buf;
            if (testEncryption) {
                compositeEncryptionKeys[putChunk.getChunkIndex()] = putChunk.encryptedPerBlobKey.duplicate();
                compositeBlobIds[putChunk.getChunkIndex()] = putChunk.chunkBlobId;
            }
            putChunk.clear();
        }
    } while (!op.isChunkFillingDone());
    if (!testEncryption) {
        Assert.assertEquals("total size written out should match the blob size", blobSize, totalSizeWritten);
    }
    // for encrypted path, size will be implicitly tested via assertDataIdentity
    Exception exception = operationException.get();
    if (exception != null) {
        throw exception;
    }
    assertDataIdentity(mockClusterMap);
}
Also used : KMSConfig(com.github.ambry.config.KMSConfig) ResponseHandler(com.github.ambry.commons.ResponseHandler) ByteBuf(io.netty.buffer.ByteBuf) InMemAccountService(com.github.ambry.account.InMemAccountService) Random(java.util.Random) CryptoServiceConfig(com.github.ambry.config.CryptoServiceConfig) MockTime(com.github.ambry.utils.MockTime) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) VerifiableProperties(com.github.ambry.config.VerifiableProperties) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) RouterConfig(com.github.ambry.config.RouterConfig) IOException(java.io.IOException) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobProperties(com.github.ambry.messageformat.BlobProperties) MockClusterMap(com.github.ambry.clustermap.MockClusterMap)

Aggregations

ResponseHandler (com.github.ambry.commons.ResponseHandler)13 ArrayList (java.util.ArrayList)8 VerifiableProperties (com.github.ambry.config.VerifiableProperties)7 HashMap (java.util.HashMap)7 Test (org.junit.Test)7 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)6 BlobId (com.github.ambry.commons.BlobId)6 BlobProperties (com.github.ambry.messageformat.BlobProperties)6 DataNodeId (com.github.ambry.clustermap.DataNodeId)5 LoggingNotificationSystem (com.github.ambry.commons.LoggingNotificationSystem)5 RouterConfig (com.github.ambry.config.RouterConfig)5 List (java.util.List)5 Properties (java.util.Properties)5 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)5 MetricRegistry (com.codahale.metrics.MetricRegistry)4 PartitionId (com.github.ambry.clustermap.PartitionId)4 ReplicaId (com.github.ambry.clustermap.ReplicaId)4 MockTime (com.github.ambry.utils.MockTime)4 InMemAccountService (com.github.ambry.account.InMemAccountService)3 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)3