Search in sources :

Example 26 with Port

use of com.github.ambry.network.Port in project ambry by linkedin.

the class AmbryServer method startup.

public void startup() throws InstantiationException {
    try {
        logger.info("starting");
        clusterParticipants = clusterAgentsFactory.getClusterParticipants();
        logger.info("Setting up JMX.");
        long startTime = SystemTime.getInstance().milliseconds();
        reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
        reporter.start();
        logger.info("creating configs");
        NetworkConfig networkConfig = new NetworkConfig(properties);
        StoreConfig storeConfig = new StoreConfig(properties);
        DiskManagerConfig diskManagerConfig = new DiskManagerConfig(properties);
        ServerConfig serverConfig = new ServerConfig(properties);
        ReplicationConfig replicationConfig = new ReplicationConfig(properties);
        ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
        SSLConfig sslConfig = new SSLConfig(properties);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
        StatsManagerConfig statsConfig = new StatsManagerConfig(properties);
        CloudConfig cloudConfig = new CloudConfig(properties);
        // verify the configs
        properties.verify();
        scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
        // mismatch in sealed/stopped replica lists that maintained by each participant.
        if (clusterParticipants != null && clusterParticipants.size() > 1 && serverConfig.serverParticipantsConsistencyCheckerPeriodSec > 0) {
            consistencyChecker = new ParticipantsConsistencyChecker(clusterParticipants, metrics);
            logger.info("Scheduling participants consistency checker with a period of {} secs", serverConfig.serverParticipantsConsistencyCheckerPeriodSec);
            consistencyCheckerScheduler = Utils.newScheduler(1, "consistency-checker-", false);
            consistencyCheckerTask = consistencyCheckerScheduler.scheduleAtFixedRate(consistencyChecker, 0, serverConfig.serverParticipantsConsistencyCheckerPeriodSec, TimeUnit.SECONDS);
        }
        logger.info("checking if node exists in clustermap host {} port {}", networkConfig.hostName, networkConfig.port);
        DataNodeId nodeId = clusterMap.getDataNodeId(networkConfig.hostName, networkConfig.port);
        if (nodeId == null) {
            throw new IllegalArgumentException("The node " + networkConfig.hostName + ":" + networkConfig.port + "is not present in the clustermap. Failing to start the datanode");
        }
        AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
        AccountService accountService = accountServiceFactory.getAccountService();
        StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
        // In most cases, there should be only one participant in the clusterParticipants list. If there are more than one
        // and some components require sole participant, the first one in the list will be primary participant.
        storageManager = new StorageManager(storeConfig, diskManagerConfig, scheduler, registry, storeKeyFactory, clusterMap, nodeId, new BlobStoreHardDelete(), clusterParticipants, time, new BlobStoreRecovery(), accountService);
        storageManager.start();
        SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
        if (clusterMapConfig.clusterMapEnableHttp2Replication) {
            connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
        } else {
            connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
        }
        connectionPool.start();
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
        Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
        replicationManager = new ReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, clusterParticipants.get(0), skipPredicate);
        replicationManager.start();
        if (replicationConfig.replicationEnabledWithVcrCluster) {
            logger.info("Creating Helix cluster spectator for cloud to store replication.");
            vcrClusterSpectator = _vcrClusterAgentsFactory.getVcrClusterSpectator(cloudConfig, clusterMapConfig);
            cloudToStoreReplicationManager = new CloudToStoreReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, vcrClusterSpectator, clusterParticipants.get(0));
            cloudToStoreReplicationManager.start();
        }
        logger.info("Creating StatsManager to publish stats");
        accountStatsMySqlStore = statsConfig.enableMysqlReport ? (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(properties, clusterMapConfig, registry).getAccountStatsStore() : null;
        statsManager = new StatsManager(storageManager, clusterMap.getReplicaIds(nodeId), registry, statsConfig, time, clusterParticipants.get(0), accountStatsMySqlStore, accountService);
        if (serverConfig.serverStatsPublishLocalEnabled) {
            statsManager.start();
        }
        ArrayList<Port> ports = new ArrayList<Port>();
        ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
        if (nodeId.hasSSLPort()) {
            ports.add(new Port(nodeId.getSSLPort(), PortType.SSL));
        }
        networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
        FindTokenHelper findTokenHelper = new FindTokenHelper(storeKeyFactory, replicationConfig);
        requests = new AmbryServerRequests(storageManager, networkServer.getRequestResponseChannel(), clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
        requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
        networkServer.start();
        // Start netty http2 server
        if (nodeId.hasHttp2Port()) {
            NettyConfig nettyConfig = new NettyConfig(properties);
            NettyMetrics nettyMetrics = new NettyMetrics(registry);
            Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
            Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
            logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", nodeId.getHttp2Port());
            NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
            AmbryServerRequests ambryServerRequestsForHttp2 = new AmbryServerRequests(storageManager, requestResponseChannel, clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
            requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, ambryServerRequestsForHttp2);
            NioServerFactory nioServerFactory = new StorageServerNettyFactory(nodeId.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, metrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
            nettyHttp2Server = nioServerFactory.getNioServer();
            nettyHttp2Server.start();
        }
        // Other code
        List<AmbryStatsReport> ambryStatsReports = new ArrayList<>();
        Set<String> validStatsTypes = new HashSet<>();
        for (StatsReportType type : StatsReportType.values()) {
            validStatsTypes.add(type.toString());
        }
        if (serverConfig.serverStatsPublishReportEnabled) {
            serverConfig.serverStatsReportsToPublish.forEach(e -> {
                if (validStatsTypes.contains(e)) {
                    ambryStatsReports.add(new AmbryStatsReportImpl(serverConfig.serverQuotaStatsAggregateIntervalInMinutes, StatsReportType.valueOf(e)));
                }
            });
        }
        if (vcrClusterSpectator != null) {
            vcrClusterSpectator.spectate();
        }
        Callback<StatsSnapshot> accountServiceCallback = new AccountServiceCallback(accountService);
        for (ClusterParticipant clusterParticipant : clusterParticipants) {
            clusterParticipant.participate(ambryStatsReports, accountStatsMySqlStore, accountServiceCallback);
        }
        if (nettyInternalMetrics != null) {
            nettyInternalMetrics.start();
            logger.info("NettyInternalMetric starts");
        }
        logger.info("started");
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        metrics.serverStartTimeInMs.update(processingTime);
        logger.info("Server startup time in Ms {}", processingTime);
    } catch (Exception e) {
        logger.error("Error during startup", e);
        throw new InstantiationException("failure during startup " + e);
    }
}
Also used : DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) SSLFactory(com.github.ambry.commons.SSLFactory) Http2ClientMetrics(com.github.ambry.network.http2.Http2ClientMetrics) Port(com.github.ambry.network.Port) StorageManager(com.github.ambry.store.StorageManager) ReplicationSkipPredicate(com.github.ambry.replication.ReplicationSkipPredicate) ArrayList(java.util.ArrayList) NettySslHttp2Factory(com.github.ambry.commons.NettySslHttp2Factory) NettyConfig(com.github.ambry.config.NettyConfig) ServerConfig(com.github.ambry.config.ServerConfig) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) StoreKeyConverterFactory(com.github.ambry.store.StoreKeyConverterFactory) AccountStatsMySqlStoreFactory(com.github.ambry.accountstats.AccountStatsMySqlStoreFactory) HashSet(java.util.HashSet) CloudToStoreReplicationManager(com.github.ambry.replication.CloudToStoreReplicationManager) ReplicationManager(com.github.ambry.replication.ReplicationManager) ReplicationConfig(com.github.ambry.config.ReplicationConfig) Http2BlockingChannelPool(com.github.ambry.network.http2.Http2BlockingChannelPool) Http2ClientConfig(com.github.ambry.config.Http2ClientConfig) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) AccountServiceCallback(com.github.ambry.account.AccountServiceCallback) Http2ServerMetrics(com.github.ambry.network.http2.Http2ServerMetrics) BlobStoreRecovery(com.github.ambry.messageformat.BlobStoreRecovery) RequestHandlerPool(com.github.ambry.protocol.RequestHandlerPool) StoreConfig(com.github.ambry.config.StoreConfig) DataNodeId(com.github.ambry.clustermap.DataNodeId) AccountService(com.github.ambry.account.AccountService) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) CloudConfig(com.github.ambry.config.CloudConfig) StorageServerNettyFactory(com.github.ambry.rest.StorageServerNettyFactory) AccountStatsMySqlStore(com.github.ambry.accountstats.AccountStatsMySqlStore) SSLConfig(com.github.ambry.config.SSLConfig) StatsManagerConfig(com.github.ambry.config.StatsManagerConfig) SocketServer(com.github.ambry.network.SocketServer) NettyMetrics(com.github.ambry.rest.NettyMetrics) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) NettyServerRequestResponseChannel(com.github.ambry.network.NettyServerRequestResponseChannel) NetworkConfig(com.github.ambry.config.NetworkConfig) BlobStoreHardDelete(com.github.ambry.messageformat.BlobStoreHardDelete) IOException(java.io.IOException) MessageInfo(com.github.ambry.store.MessageInfo) NioServerFactory(com.github.ambry.rest.NioServerFactory) CloudToStoreReplicationManager(com.github.ambry.replication.CloudToStoreReplicationManager) ClusterParticipant(com.github.ambry.clustermap.ClusterParticipant) AccountServiceFactory(com.github.ambry.account.AccountServiceFactory)

Example 27 with Port

use of com.github.ambry.network.Port in project ambry by linkedin.

the class ServerHardDeleteTest method endToEndTestHardDeletes.

/**
 * Tests the hard delete functionality.
 * <p>
 * This test does the following:
 * 1. Makes 6 puts, waits for notification.
 * 2. Makes 2 deletes, waits for notification.
 * 3. Waits for hard deletes to catch up to the expected token value.
 * 4. Verifies that the two records that are deleted are zeroed out by hard deletes.
 * 5. Makes 3 more puts, waits for notification.
 * 6. Makes 3 deletes - 2 of records from the initial set of puts, and 1 from the new set.
 * 7. Waits for hard deletes to catch up again to the expected token value.
 * 8. Verifies that the three records that are deleted are zeroed out by hard deletes.
 *
 * @throws Exception
 */
@Test
public void endToEndTestHardDeletes() throws Exception {
    DataNodeId dataNodeId = mockClusterMap.getDataNodeIds().get(0);
    encryptionKey = new ArrayList<>(9);
    usermetadata = new ArrayList<>(9);
    data = new ArrayList<>(9);
    Random random = new Random();
    for (int i = 0; i < 9; i++) {
        if (i % 2 == 0) {
            encryptionKey.add(new byte[100]);
            random.nextBytes(encryptionKey.get(i));
        } else {
            encryptionKey.add(null);
        }
        usermetadata.add(new byte[1000 + i]);
        data.add(new byte[31870 + i]);
        random.nextBytes(usermetadata.get(i));
        random.nextBytes(data.get(i));
    }
    properties = new ArrayList<>(9);
    properties.add(new BlobProperties(31870, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
    properties.add(new BlobProperties(31871, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false));
    properties.add(new BlobProperties(31872, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
    properties.add(new BlobProperties(31873, "serviceid1", "ownerid", "jpeg", false, 0, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null));
    properties.add(new BlobProperties(31874, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
    properties.add(new BlobProperties(31875, "serviceid1", "ownerid", "jpeg", false, 0, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null));
    properties.add(new BlobProperties(31876, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
    properties.add(new BlobProperties(31877, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false));
    properties.add(new BlobProperties(31878, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
    List<PartitionId> partitionIds = mockClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
    PartitionId chosenPartition = partitionIds.get(0);
    blobIdList = new ArrayList<>(9);
    for (int i = 0; i < 9; i++) {
        blobIdList.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, mockClusterMap.getLocalDatacenterId(), properties.get(i).getAccountId(), properties.get(i).getContainerId(), chosenPartition, false, BlobId.BlobDataType.DATACHUNK));
    }
    ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(new Port(dataNodeId.getPort(), PortType.PLAINTEXT), "localhost", null, null);
    channel.connect();
    for (int i = 0; i < 6; i++) {
        // blob 3 and 5 are expired among these
        putBlob(blobIdList.get(i), properties.get(i), encryptionKey.get(i), usermetadata.get(i), data.get(i), channel);
    }
    notificationSystem.awaitBlobCreations(blobIdList.get(0).getID());
    notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
    notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
    notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
    // delete blob 1
    deleteBlob(blobIdList.get(1), channel);
    zeroOutBlobContent(1);
    // delete blob 4
    deleteBlob(blobIdList.get(4), channel);
    zeroOutBlobContent(4);
    notificationSystem.awaitBlobDeletions(blobIdList.get(1).getID());
    notificationSystem.awaitBlobDeletions(blobIdList.get(4).getID());
    time.sleep(TimeUnit.DAYS.toMillis(7));
    // For each future change to this offset, add to this variable and write an explanation of why the number changed.
    // old value: 198728. Increased by 4 to 198732 because the format for delete record went from 2 to 3 which adds
    // 4 bytes (two shorts) extra. The last record is a delete record so its extra 4 bytes are not (yet) added
    // 
    // Add 14 here when changing message header version to 3, since the message header version went from 2 to 3 and adds
    // a short to every record, which include 6 puts and 1 delete. (last delete is not included).
    // old value is 198732 + 14. Increased by 48 when adding two fields(4 BYTE CRC for each field) in blobProperty when putBlob.
    // There are 6 * (4 + 4). 6 stands for the times for putBlob, 4 stands for 4 extra blobProperty Bytes for each field.
    int expectedTokenValueT1 = 198732 + 14 + 48;
    ensureCleanupTokenCatchesUp(chosenPartition.getReplicaIds().get(0).getReplicaPath(), mockClusterMap, expectedTokenValueT1);
    getAndVerify(channel, 6);
    // put blob 6
    putBlob(blobIdList.get(6), properties.get(6), encryptionKey.get(6), usermetadata.get(6), data.get(6), channel);
    // put blob 7
    putBlob(blobIdList.get(7), properties.get(7), encryptionKey.get(7), usermetadata.get(7), data.get(7), channel);
    // put blob 8
    putBlob(blobIdList.get(8), properties.get(8), encryptionKey.get(8), usermetadata.get(8), data.get(8), channel);
    notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
    notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
    notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
    // Do more deletes
    // delete blob 3 that is expired.
    deleteBlob(blobIdList.get(3), channel);
    zeroOutBlobContent(3);
    // delete blob 0, will undelete it later, so don't zero out the content
    deleteBlob(blobIdList.get(0), channel);
    // delete blob 6.
    deleteBlob(blobIdList.get(6), channel);
    zeroOutBlobContent(6);
    notificationSystem.awaitBlobDeletions(blobIdList.get(0).getID());
    notificationSystem.awaitBlobDeletions(blobIdList.get(6).getID());
    undeleteBlob(blobIdList.get(0), channel);
    notificationSystem.awaitBlobUndeletes(blobIdList.get(0).getID());
    time.sleep(TimeUnit.DAYS.toMillis(1));
    // For each future change to this offset, add to this variable and write an explanation of why the number changed.
    int expectedTokenValueT2 = 298416 + 98 + 28 + 72;
    // old value: 298400. Increased by 16 (4 * 4) to 298416 because the format for delete record went from 2 to 3 which
    // adds 4 bytes (two shorts) extra. The last record is a delete record so its extra 4 bytes are not added
    // 
    // old value 298416. Increased by 98. The end offset is now a journal-based offset, so the offset is not inclusive.
    // It points to the last record in the journal. Before adding an undelete record, the last record in journal is the
    // delete record for blob 6, now it's undelete for blob 0. Since a delete record is 98 bytes, so increase 98 bytes.
    // 
    // old value is 298416 + 98. Increased by 28 when changing the message header version from 2 to 3, which adds a short
    // to all the records, which includes 9 puts and 5 deletes and 1 undelete. Undelete is not include since it's the last
    // record.
    // old value is 298416 + 98 + 28. Increased by 72 when adding two fields(4 BYTE CRC for each field) in blobProperty when putBlob.
    // There are 9 * (4 + 4). 9 stands for the times for putBlob, 4 stands for 4 extra blobProperty Bytes.
    ensureCleanupTokenCatchesUp(chosenPartition.getReplicaIds().get(0).getReplicaPath(), mockClusterMap, expectedTokenValueT2);
    getAndVerify(channel, 9);
}
Also used : Random(java.util.Random) BlobProperties(com.github.ambry.messageformat.BlobProperties) Port(com.github.ambry.network.Port) ConnectedChannel(com.github.ambry.network.ConnectedChannel) PartitionId(com.github.ambry.clustermap.PartitionId) DataNodeId(com.github.ambry.clustermap.DataNodeId) BlobId(com.github.ambry.commons.BlobId) Test(org.junit.Test)

Example 28 with Port

use of com.github.ambry.network.Port in project ambry by linkedin.

the class ServerPlaintextTest method endToEndTest.

@Test
public void endToEndTest() throws Exception {
    plaintextCluster.startServers();
    DataNodeId dataNodeId = plaintextCluster.getGeneralDataNode();
    ServerTestUtil.endToEndTest(new Port(dataNodeId.getPort(), PortType.PLAINTEXT), "DC1", plaintextCluster, null, null, routerProps, testEncryption);
}
Also used : Port(com.github.ambry.network.Port) DataNodeId(com.github.ambry.clustermap.DataNodeId) Test(org.junit.Test)

Example 29 with Port

use of com.github.ambry.network.Port in project ambry by linkedin.

the class ServerPlaintextTest method endToEndReplicationWithMultiNodeMultiPartitionTest.

@Test
public void endToEndReplicationWithMultiNodeMultiPartitionTest() throws Exception {
    plaintextCluster.startServers();
    DataNodeId dataNode = plaintextCluster.getClusterMap().getDataNodeIds().get(0);
    ArrayList<String> dataCenterList = Utils.splitString("DC1,DC2,DC3", ",");
    List<DataNodeId> dataNodes = plaintextCluster.getOneDataNodeFromEachDatacenter(dataCenterList);
    ServerTestUtil.endToEndReplicationWithMultiNodeMultiPartitionTest(dataNode.getPort(), new Port(dataNodes.get(0).getPort(), PortType.PLAINTEXT), new Port(dataNodes.get(1).getPort(), PortType.PLAINTEXT), new Port(dataNodes.get(2).getPort(), PortType.PLAINTEXT), plaintextCluster, null, null, null, null, null, null, notificationSystem, testEncryption);
}
Also used : Port(com.github.ambry.network.Port) DataNodeId(com.github.ambry.clustermap.DataNodeId) Test(org.junit.Test)

Example 30 with Port

use of com.github.ambry.network.Port in project ambry by linkedin.

the class ServerSSLTest method endToEndSSLReplicationWithMultiNodeMultiPartitionTest.

@Test
public void endToEndSSLReplicationWithMultiNodeMultiPartitionTest() throws Exception {
    sslCluster.startServers();
    DataNodeId dataNode = sslCluster.getClusterMap().getDataNodeIds().get(0);
    ArrayList<String> dataCenterList = new ArrayList<>(Arrays.asList("DC1", "DC2", "DC3"));
    List<DataNodeId> dataNodes = sslCluster.getOneDataNodeFromEachDatacenter(dataCenterList);
    ServerTestUtil.endToEndReplicationWithMultiNodeMultiPartitionTest(dataNode.getPort(), new Port(dataNodes.get(0).getSSLPort(), PortType.SSL), new Port(dataNodes.get(1).getSSLPort(), PortType.SSL), new Port(dataNodes.get(2).getSSLPort(), PortType.SSL), sslCluster, clientSSLConfig1, clientSSLConfig2, clientSSLConfig3, clientSSLSocketFactory1, clientSSLSocketFactory2, clientSSLSocketFactory3, notificationSystem, testEncryption);
}
Also used : Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) DataNodeId(com.github.ambry.clustermap.DataNodeId) Test(org.junit.Test)

Aggregations

Port (com.github.ambry.network.Port)64 Test (org.junit.Test)33 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)29 ArrayList (java.util.ArrayList)28 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)27 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)23 DataNodeId (com.github.ambry.clustermap.DataNodeId)22 ReplicaId (com.github.ambry.clustermap.ReplicaId)17 BlobId (com.github.ambry.commons.BlobId)15 VerifiableProperties (com.github.ambry.config.VerifiableProperties)15 BlobProperties (com.github.ambry.messageformat.BlobProperties)15 Properties (java.util.Properties)15 MetricRegistry (com.codahale.metrics.MetricRegistry)12 PartitionId (com.github.ambry.clustermap.PartitionId)12 ConnectedChannel (com.github.ambry.network.ConnectedChannel)12 PartitionRequestInfo (com.github.ambry.protocol.PartitionRequestInfo)10 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)9 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)9 GetResponse (com.github.ambry.protocol.GetResponse)9 DataInputStream (java.io.DataInputStream)9