Search in sources :

Example 41 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class IndexWritePerformance method main.

public static void main(String[] args) {
    FileWriter writer = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<Integer> numberOfIndexesOpt = parser.accepts("numberOfIndexes", "The number of indexes to create").withRequiredArg().describedAs("number_of_indexes").ofType(Integer.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> numberOfWritersOpt = parser.accepts("numberOfWriters", "The number of writers that write to a random index concurrently").withRequiredArg().describedAs("The number of writers").ofType(Integer.class).defaultsTo(4);
        ArgumentAcceptingOptionSpec<Integer> writesPerSecondOpt = parser.accepts("writesPerSecond", "The rate at which writes need to be performed").withRequiredArg().describedAs("The number of writes per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(numberOfIndexesOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        int numberOfIndexes = options.valueOf(numberOfIndexesOpt);
        int numberOfWriters = options.valueOf(numberOfWritersOpt);
        int writesPerSecond = options.valueOf(writesPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        final AtomicLong totalTimeTakenInNs = new AtomicLong(0);
        final AtomicLong totalWrites = new AtomicLong(0);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        StoreKeyFactory factory = new BlobIdFactory(map);
        File logFile = new File(System.getProperty("user.dir"), "writeperflog");
        writer = new FileWriter(logFile);
        MetricRegistry metricRegistry = new MetricRegistry();
        StoreMetrics metrics = new StoreMetrics(metricRegistry);
        DiskSpaceAllocator diskSpaceAllocator = new DiskSpaceAllocator(false, null, 0, new StorageManagerMetrics(metricRegistry));
        Properties props = new Properties();
        props.setProperty("store.index.memory.size.bytes", "2097152");
        props.setProperty("store.segment.size.in.bytes", "10");
        StoreConfig config = new StoreConfig(new VerifiableProperties(props));
        Log log = new Log(System.getProperty("user.dir"), 10, diskSpaceAllocator, config, metrics, null);
        ScheduledExecutorService s = Utils.newScheduler(numberOfWriters, "index", false);
        ArrayList<BlobIndexMetrics> indexWithMetrics = new ArrayList<BlobIndexMetrics>(numberOfIndexes);
        for (int i = 0; i < numberOfIndexes; i++) {
            File indexFile = new File(System.getProperty("user.dir"), Integer.toString(i));
            if (indexFile.exists()) {
                for (File c : indexFile.listFiles()) {
                    c.delete();
                }
            } else {
                indexFile.mkdir();
            }
            System.out.println("Creating index folder " + indexFile.getAbsolutePath());
            writer.write("logdir-" + indexFile.getAbsolutePath() + "\n");
            indexWithMetrics.add(new BlobIndexMetrics(indexFile.getAbsolutePath(), s, log, enableVerboseLogging, totalWrites, totalTimeTakenInNs, totalWrites, config, writer, factory));
        }
        final CountDownLatch latch = new CountDownLatch(numberOfWriters);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    latch.await();
                    System.out.println("Total writes : " + totalWrites.get() + "  Total time taken : " + totalTimeTakenInNs.get() + " Nano Seconds  Average time taken per write " + ((double) totalWrites.get() / totalTimeTakenInNs.get()) / SystemTime.NsPerSec + " Seconds");
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        Throttler throttler = new Throttler(writesPerSecond, 100, true, SystemTime.getInstance());
        Thread[] threadIndexPerf = new Thread[numberOfWriters];
        for (int i = 0; i < numberOfWriters; i++) {
            threadIndexPerf[i] = new Thread(new IndexWritePerfRun(indexWithMetrics, throttler, shutdown, latch, map));
            threadIndexPerf[i].start();
        }
        for (int i = 0; i < numberOfWriters; i++) {
            threadIndexPerf[i].join();
        }
    } catch (StoreException e) {
        System.err.println("Index creation error on exit " + e.getMessage());
    } catch (Exception e) {
        System.err.println("Error on exit " + e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (Exception e) {
                System.out.println("Error when closing the writer");
            }
        }
    }
}
Also used : OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) FileWriter(java.io.FileWriter) ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Throttler(com.github.ambry.utils.Throttler) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) StoreConfig(com.github.ambry.config.StoreConfig) OptionSet(joptsimple.OptionSet) File(java.io.File)

Example 42 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class HelixBootstrapUpgradeToolTest method testListSealedPartitions.

/**
 * Test listing sealed partitions in Helix cluster.
 * @throws Exception
 */
@Test
public void testListSealedPartitions() throws Exception {
    assumeTrue(!dcStr.equals("DC1"));
    String clusterName = CLUSTER_NAME_PREFIX + CLUSTER_NAME_IN_STATIC_CLUSTER_MAP;
    ClusterMapConfig clusterMapConfig = getClusterMapConfig(clusterName, "DC1", null);
    InstanceConfigToDataNodeConfigAdapter.Converter instanceConfigConverter = new InstanceConfigToDataNodeConfigAdapter.Converter(clusterMapConfig);
    // Test regular bootstrap
    long expectedResourceCount = (testPartitionLayout.getPartitionLayout().getPartitionCount() - 1) / DEFAULT_MAX_PARTITIONS_PER_RESOURCE + 1;
    writeBootstrapOrUpgrade(expectedResourceCount, false);
    Set<String> sealedPartitions = HelixBootstrapUpgradeUtil.listSealedPartition(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, CLUSTER_NAME_PREFIX, dcStr, dataNodeConfigSourceType);
    assertEquals("Sealed partition set should be empty initially", Collections.emptySet(), sealedPartitions);
    // randomly choose 20 partitions to mark as sealed
    int[] intArray = new Random().ints(0, 100).distinct().limit(20).toArray();
    Set<String> selectedSealedPartitionSet = new HashSet<>();
    for (int id : intArray) {
        selectedSealedPartitionSet.add(String.valueOf(id));
    }
    // update the sealed lists in Helix
    for (ZkInfo zkInfo : dcsToZkInfo.values()) {
        ZKHelixAdmin admin = new ZKHelixAdmin("localhost:" + zkInfo.getPort());
        PropertyStoreToDataNodeConfigAdapter propertyStoreAdapter = dataNodeConfigSourceType == DataNodeConfigSourceType.INSTANCE_CONFIG ? null : new PropertyStoreToDataNodeConfigAdapter("localhost:" + zkInfo.getPort(), clusterMapConfig);
        for (String instanceName : admin.getInstancesInCluster(clusterName)) {
            DataNodeConfig dataNodeConfig = dataNodeConfigSourceType == DataNodeConfigSourceType.INSTANCE_CONFIG ? instanceConfigConverter.convert(admin.getInstanceConfig(clusterName, instanceName)) : propertyStoreAdapter.get(instanceName);
            Set<String> localReplicas = new HashSet<>();
            for (DataNodeConfig.DiskConfig diskConfig : dataNodeConfig.getDiskConfigs().values()) {
                localReplicas.addAll(diskConfig.getReplicaConfigs().keySet());
            }
            // derive the intersection of localReplicas set and selectedSealedPartitionSet
            localReplicas.retainAll(selectedSealedPartitionSet);
            if (dataNodeConfigSourceType == DataNodeConfigSourceType.INSTANCE_CONFIG) {
                InstanceConfig instanceConfig = admin.getInstanceConfig(clusterName, instanceName);
                instanceConfig.getRecord().setListField(SEALED_STR, new ArrayList<>(localReplicas));
                admin.setInstanceConfig(clusterName, instanceName, instanceConfig);
            } else {
                dataNodeConfig.getSealedReplicas().addAll(localReplicas);
                propertyStoreAdapter.set(dataNodeConfig);
            }
        }
    }
    // query sealed partition in Helix again
    sealedPartitions = HelixBootstrapUpgradeUtil.listSealedPartition(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, CLUSTER_NAME_PREFIX, dcStr, dataNodeConfigSourceType);
    assertEquals("Mismatch in sealed partition set", selectedSealedPartitionSet, sealedPartitions);
}
Also used : ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) Random(java.util.Random) InstanceConfig(org.apache.helix.model.InstanceConfig) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 43 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class AccountTool method getRouter.

/**
 * Create a {@link Router} for {@link RouterStore}.
 * @return The {@link Router}.
 * @throws Exception Any unexpected exception.
 */
private Router getRouter() throws Exception {
    // Create a HelixAccountService for the router.
    AccountServiceFactory accountServiceFactory = Utils.getObj("com.github.ambry.account.HelixAccountServiceFactory", verifiableProperties, registry);
    accountService = (HelixAccountService) accountServiceFactory.getAccountService();
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayout, partitionLayout);
    clusterMap = clusterAgentsFactory.getClusterMap();
    SSLFactory sslFactory = getSSLFactoryIfRequired();
    // Create a NonBlockingRouter.
    RouterFactory routerFactory = Utils.getObj("com.github.ambry.router.NonBlockingRouterFactory", verifiableProperties, clusterMap, new LoggingNotificationSystem(), sslFactory, accountService);
    router = routerFactory.getRouter();
    return router;
}
Also used : SSLFactory(com.github.ambry.commons.SSLFactory) RouterFactory(com.github.ambry.router.RouterFactory) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig)

Example 44 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class StorageManagerTest method generateConfigs.

/**
 * Generates {@link StoreConfig} and {@link DiskManagerConfig} for use in tests.
 * @param segmentedLog {@code true} to set a segment capacity lower than total store capacity
 * @param updateInstanceConfig whether to update InstanceConfig in Helix
 */
private void generateConfigs(boolean segmentedLog, boolean updateInstanceConfig) {
    List<com.github.ambry.utils.TestUtils.ZkInfo> zkInfoList = new ArrayList<>();
    zkInfoList.add(new com.github.ambry.utils.TestUtils.ZkInfo(null, "DC0", (byte) 0, 2199, false));
    JSONObject zkJson = constructZkLayoutJSON(zkInfoList);
    Properties properties = new Properties();
    properties.put("disk.manager.enable.segment.pooling", "true");
    properties.put("store.compaction.triggers", "Periodic,Admin");
    properties.put("store.replica.status.delegate.enable", "true");
    properties.put("store.set.local.partition.state.enabled", "true");
    properties.setProperty("clustermap.host.name", "localhost");
    properties.setProperty("clustermap.port", "2200");
    properties.setProperty("clustermap.cluster.name", CLUSTER_NAME);
    properties.setProperty("clustermap.datacenter.name", "DC0");
    properties.setProperty("clustermap.dcs.zk.connect.strings", zkJson.toString(2));
    properties.setProperty("clustermap.update.datanode.info", Boolean.toString(updateInstanceConfig));
    if (segmentedLog) {
        long replicaCapacity = clusterMap.getAllPartitionIds(null).get(0).getReplicaIds().get(0).getCapacityInBytes();
        properties.put("store.segment.size.in.bytes", Long.toString(replicaCapacity / 2L));
    }
    VerifiableProperties vProps = new VerifiableProperties(properties);
    diskManagerConfig = new DiskManagerConfig(vProps);
    storeConfig = new StoreConfig(vProps);
    clusterMapConfig = new ClusterMapConfig(vProps);
}
Also used : DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) TestUtils(com.github.ambry.utils.TestUtils) TestUtils(com.github.ambry.clustermap.TestUtils) JSONObject(org.json.JSONObject) StoreConfig(com.github.ambry.config.StoreConfig)

Example 45 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class ServerAdminTool method main.

/**
 * Runs the server admin tool
 * @param args associated arguments.
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    ServerAdminToolConfig config = new ServerAdminToolConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap();
    SSLFactory sslFactory = !clusterMapConfig.clusterMapSslEnabledDatacenters.isEmpty() ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
    ServerAdminTool serverAdminTool = new ServerAdminTool(clusterMap, sslFactory, verifiableProperties);
    File file = new File(config.dataOutputFilePath);
    if (!file.exists() && !file.createNewFile()) {
        throw new IllegalStateException("Could not create " + file);
    }
    FileOutputStream outputFileStream = new FileOutputStream(config.dataOutputFilePath);
    DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
    if (dataNodeId == null) {
        throw new IllegalArgumentException("Could not find a data node corresponding to " + config.hostname + ":" + config.port);
    }
    switch(config.typeOfOperation) {
        case GetBlobProperties:
            BlobId blobId = new BlobId(config.blobId, clusterMap);
            Pair<ServerErrorCode, BlobProperties> bpResponse = serverAdminTool.getBlobProperties(dataNodeId, blobId, config.getOption, clusterMap);
            if (bpResponse.getFirst() == ServerErrorCode.No_Error) {
                LOGGER.info("Blob properties for {} from {}: {}", blobId, dataNodeId, bpResponse.getSecond());
            } else {
                LOGGER.error("Failed to get blob properties for {} from {} with option {}. Error code is {}", blobId, dataNodeId, config.getOption, bpResponse.getFirst());
            }
            break;
        case GetUserMetadata:
            blobId = new BlobId(config.blobId, clusterMap);
            Pair<ServerErrorCode, ByteBuffer> umResponse = serverAdminTool.getUserMetadata(dataNodeId, blobId, config.getOption, clusterMap);
            if (umResponse.getFirst() == ServerErrorCode.No_Error) {
                writeBufferToFile(umResponse.getSecond(), outputFileStream);
                LOGGER.info("User metadata for {} from {} written to {}", blobId, dataNodeId, config.dataOutputFilePath);
            } else {
                LOGGER.error("Failed to get user metadata for {} from {} with option {}. Error code is {}", blobId, dataNodeId, config.getOption, umResponse.getFirst());
            }
            break;
        case GetBlob:
            blobId = new BlobId(config.blobId, clusterMap);
            Pair<ServerErrorCode, BlobData> bResponse = serverAdminTool.getBlob(dataNodeId, blobId, config.getOption, clusterMap);
            if (bResponse.getFirst() == ServerErrorCode.No_Error) {
                LOGGER.info("Blob type of {} from {} is {}", blobId, dataNodeId, bResponse.getSecond().getBlobType());
                ByteBuf buffer = bResponse.getSecond().content();
                try {
                    writeByteBufToFile(buffer, outputFileStream);
                } finally {
                    buffer.release();
                }
                LOGGER.info("Blob data for {} from {} written to {}", blobId, dataNodeId, config.dataOutputFilePath);
            } else {
                LOGGER.error("Failed to get blob data for {} from {} with option {}. Error code is {}", blobId, dataNodeId, config.getOption, bResponse.getFirst());
            }
            break;
        case TriggerCompaction:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    ServerErrorCode errorCode = serverAdminTool.triggerCompaction(dataNodeId, partitionId);
                    if (errorCode == ServerErrorCode.No_Error) {
                        LOGGER.info("Compaction has been triggered for {} on {}", partitionId, dataNodeId);
                    } else {
                        LOGGER.error("From {}, received server error code {} for trigger compaction request on {}", dataNodeId, errorCode, partitionId);
                    }
                }
            } else {
                LOGGER.error("There were no partitions provided to trigger compaction on");
            }
            break;
        case RequestControl:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    sendRequestControlRequest(serverAdminTool, dataNodeId, partitionId, config.requestTypeToControl, config.enableState);
                }
            } else {
                LOGGER.info("No partition list provided. Requesting enable status of {} to be set to {} on all partitions", config.requestTypeToControl, config.enableState);
                sendRequestControlRequest(serverAdminTool, dataNodeId, null, config.requestTypeToControl, config.enableState);
            }
            break;
        case ReplicationControl:
            List<String> origins = Collections.emptyList();
            if (config.origins.length > 0 && !config.origins[0].isEmpty()) {
                origins = Arrays.asList(config.origins);
            }
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    sendReplicationControlRequest(serverAdminTool, dataNodeId, partitionId, origins, config.enableState);
                }
            } else {
                LOGGER.info("No partition list provided. Requesting enable status for replication from {} to be set to {} on " + "all partitions", origins.isEmpty() ? "all DCs" : origins, config.enableState);
                sendReplicationControlRequest(serverAdminTool, dataNodeId, null, origins, config.enableState);
            }
            break;
        case CatchupStatus:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    Pair<ServerErrorCode, Boolean> response = serverAdminTool.isCaughtUp(dataNodeId, partitionId, config.acceptableLagInBytes, config.numReplicasCaughtUpPerPartition);
                    if (response.getFirst() == ServerErrorCode.No_Error) {
                        LOGGER.info("Replicas are {} within {} bytes for {}", response.getSecond() ? "" : "NOT", config.acceptableLagInBytes, partitionId);
                    } else {
                        LOGGER.error("From {}, received server error code {} for request for catchup status of {}", dataNodeId, response.getFirst(), partitionId);
                    }
                }
            } else {
                Pair<ServerErrorCode, Boolean> response = serverAdminTool.isCaughtUp(dataNodeId, null, config.acceptableLagInBytes, config.numReplicasCaughtUpPerPartition);
                if (response.getFirst() == ServerErrorCode.No_Error) {
                    LOGGER.info("Replicas are {} within {} bytes for all partitions", response.getSecond() ? "" : "NOT", config.acceptableLagInBytes);
                } else {
                    LOGGER.error("From {}, received server error code {} for request for catchup status of all partitions", dataNodeId, response.getFirst());
                }
            }
            break;
        case BlobStoreControl:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    sendBlobStoreControlRequest(serverAdminTool, dataNodeId, partitionId, config.numReplicasCaughtUpPerPartition, config.storeControlRequestType);
                }
            } else {
                LOGGER.error("There were no partitions provided to be controlled (Start/Stop)");
            }
            break;
        default:
            throw new IllegalStateException("Recognized but unsupported operation: " + config.typeOfOperation);
    }
    serverAdminTool.close();
    outputFileStream.close();
    clusterMap.close();
    System.out.println("Server admin tool is safely closed");
    System.exit(0);
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) SSLFactory(com.github.ambry.commons.SSLFactory) ByteBuf(io.netty.buffer.ByteBuf) BlobData(com.github.ambry.messageformat.BlobData) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) SSLConfig(com.github.ambry.config.SSLConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PartitionId(com.github.ambry.clustermap.PartitionId) ByteBuffer(java.nio.ByteBuffer) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ServerErrorCode(com.github.ambry.server.ServerErrorCode) FileOutputStream(java.io.FileOutputStream) BlobProperties(com.github.ambry.messageformat.BlobProperties) File(java.io.File) DataNodeId(com.github.ambry.clustermap.DataNodeId) BlobId(com.github.ambry.commons.BlobId)

Aggregations

ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)100 VerifiableProperties (com.github.ambry.config.VerifiableProperties)81 Test (org.junit.Test)56 Properties (java.util.Properties)52 MetricRegistry (com.codahale.metrics.MetricRegistry)47 ArrayList (java.util.ArrayList)31 IOException (java.io.IOException)26 HashSet (java.util.HashSet)25 JSONObject (org.json.JSONObject)25 File (java.io.File)24 ClusterMap (com.github.ambry.clustermap.ClusterMap)23 HashMap (java.util.HashMap)21 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)19 ClusterAgentsFactory (com.github.ambry.clustermap.ClusterAgentsFactory)18 DataNodeId (com.github.ambry.clustermap.DataNodeId)18 StoreConfig (com.github.ambry.config.StoreConfig)18 ReplicaId (com.github.ambry.clustermap.ReplicaId)16 List (java.util.List)16 Map (java.util.Map)16 CountDownLatch (java.util.concurrent.CountDownLatch)16