Search in sources :

Example 86 with DataNodeId

use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.

the class AmbryServerTest method testAmbryServerWithReporterFactory.

/**
 * Test starting and shutting down the server with a custom {@link JmxReporter} factory.
 * @throws Exception
 */
@Test
public void testAmbryServerWithReporterFactory() throws Exception {
    ClusterAgentsFactory clusterAgentsFactory = new MockClusterAgentsFactory(false, false, 1, 1, 1);
    ObjectNameFactory spyObjectNameFactory = spy(new DefaultObjectNameFactory());
    Function<MetricRegistry, JmxReporter> reporterFactory = reporter -> JmxReporter.forRegistry(reporter).createsObjectNamesWith(spyObjectNameFactory).build();
    DataNodeId dataNodeId = clusterAgentsFactory.getClusterMap().getDataNodeIds().get(0);
    Properties props = new Properties();
    props.setProperty("host.name", dataNodeId.getHostname());
    props.setProperty("port", Integer.toString(dataNodeId.getPort()));
    props.setProperty("clustermap.cluster.name", "test");
    props.setProperty("clustermap.datacenter.name", "DC1");
    props.setProperty("clustermap.host.name", dataNodeId.getHostname());
    AmbryServer ambryServer = new AmbryServer(new VerifiableProperties(props), clusterAgentsFactory, null, new LoggingNotificationSystem(), SystemTime.getInstance(), reporterFactory);
    ambryServer.startup();
    verify(spyObjectNameFactory, atLeastOnce()).createName(anyString(), anyString(), anyString());
    ambryServer.shutdown();
}
Also used : DefaultObjectNameFactory(com.codahale.metrics.jmx.DefaultObjectNameFactory) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) DataNodeId(com.github.ambry.clustermap.DataNodeId) ObjectNameFactory(com.codahale.metrics.jmx.ObjectNameFactory) Test(org.junit.Test) Function(java.util.function.Function) JmxReporter(com.codahale.metrics.jmx.JmxReporter) Mockito(org.mockito.Mockito) SystemTime(com.github.ambry.utils.SystemTime) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) MockClusterAgentsFactory(com.github.ambry.clustermap.MockClusterAgentsFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) DefaultObjectNameFactory(com.codahale.metrics.jmx.DefaultObjectNameFactory) ObjectNameFactory(com.codahale.metrics.jmx.ObjectNameFactory) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) DefaultObjectNameFactory(com.codahale.metrics.jmx.DefaultObjectNameFactory) JmxReporter(com.codahale.metrics.jmx.JmxReporter) MockClusterAgentsFactory(com.github.ambry.clustermap.MockClusterAgentsFactory) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) MockClusterAgentsFactory(com.github.ambry.clustermap.MockClusterAgentsFactory) DataNodeId(com.github.ambry.clustermap.DataNodeId) Test(org.junit.Test)

Example 87 with DataNodeId

use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.

the class AmbryServerRequestsTest method crossColoMetricsUpdateTest.

/**
 * Test that cross-colo metrics are updated correctly when {@link AmbryRequests} handles {@link GetRequest} and
 * {@link ReplicaMetadataRequest}
 * @throws Exception
 */
@Test
public void crossColoMetricsUpdateTest() throws Exception {
    // find a node in remote dc
    DataNodeId remoteNode = clusterMap.getDataNodeIds().stream().filter(node -> !node.getDatacenterName().equals(localDc)).findFirst().get();
    PartitionId id = clusterMap.getReplicaIds(remoteNode).get(0).getPartitionId();
    // send cross-colo metadata request and verify
    String clientId = "replication-metadata-" + remoteNode.getHostname() + "[" + remoteNode.getDatacenterName() + "]";
    List<Response> responseList = sendAndVerifyOperationRequest(RequestOrResponseType.ReplicaMetadataRequest, Collections.singletonList(id), ServerErrorCode.No_Error, null, clientId);
    assertEquals("cross-colo metadata exchange bytes are not expected", responseList.get(0).sizeInBytes(), serverMetrics.crossColoMetadataExchangeBytesRate.get(remoteNode.getDatacenterName()).getCount());
    responseList.forEach(Response::release);
    // send cross-colo get request and verify
    clientId = GetRequest.Replication_Client_Id_Prefix + remoteNode.getHostname() + "[" + remoteNode.getDatacenterName() + "]";
    responseList = sendAndVerifyOperationRequest(RequestOrResponseType.GetRequest, Collections.singletonList(id), ServerErrorCode.No_Error, null, clientId);
    assertEquals("cross-colo fetch bytes are not expected", responseList.get(0).sizeInBytes(), serverMetrics.crossColoFetchBytesRate.get(remoteNode.getDatacenterName()).getCount());
    responseList.forEach(Response::release);
}
Also used : CatchupStatusAdminResponse(com.github.ambry.protocol.CatchupStatusAdminResponse) Response(com.github.ambry.protocol.Response) GetResponse(com.github.ambry.protocol.GetResponse) ReplicaMetadataResponse(com.github.ambry.protocol.ReplicaMetadataResponse) AdminResponse(com.github.ambry.protocol.AdminResponse) RequestOrResponse(com.github.ambry.protocol.RequestOrResponse) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) Test(org.junit.Test) MessageInfoTest(com.github.ambry.store.MessageInfoTest) MessageFormatInputStreamTest(com.github.ambry.messageformat.MessageFormatInputStreamTest)

Example 88 with DataNodeId

use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.

the class DiskReformatter method main.

public static void main(String[] args) throws Exception {
    VerifiableProperties properties = ToolUtils.getVerifiableProperties(args);
    DiskReformatterConfig config = new DiskReformatterConfig(properties);
    StoreConfig storeConfig = new StoreConfig(properties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
    ServerConfig serverConfig = new ServerConfig(properties);
    ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath);
    try (ClusterMap clusterMap = clusterAgentsFactory.getClusterMap()) {
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, clusterMap.getMetricRegistry());
        StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
        DataNodeId dataNodeId = clusterMap.getDataNodeId(config.datanodeHostname, config.datanodePort);
        if (dataNodeId == null) {
            throw new IllegalArgumentException("Did not find node in clustermap with hostname:port - " + config.datanodeHostname + ":" + config.datanodePort);
        }
        DiskReformatter reformatter = new DiskReformatter(dataNodeId, Collections.EMPTY_LIST, config.fetchSizeInBytes, storeConfig, storeKeyFactory, clusterMap, SystemTime.getInstance(), storeKeyConverterFactory.getStoreKeyConverter());
        AtomicInteger exitStatus = new AtomicInteger(0);
        CountDownLatch latch = new CountDownLatch(config.diskMountPaths.length);
        for (int i = 0; i < config.diskMountPaths.length; i++) {
            int finalI = i;
            Runnable runnable = () -> {
                try {
                    reformatter.reformat(config.diskMountPaths[finalI], new File(config.scratchPaths[finalI]));
                    latch.countDown();
                } catch (Exception e) {
                    throw new IllegalStateException(e);
                }
            };
            Thread thread = Utils.newThread(config.diskMountPaths[finalI] + "-reformatter", runnable, true);
            thread.setUncaughtExceptionHandler((t, e) -> {
                exitStatus.set(1);
                logger.error("Reformatting {} failed", config.diskMountPaths[finalI], e);
                latch.countDown();
            });
            thread.start();
        }
        latch.await();
        System.exit(exitStatus.get());
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ServerConfig(com.github.ambry.config.ServerConfig) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StoreConfig(com.github.ambry.config.StoreConfig) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) DataNodeId(com.github.ambry.clustermap.DataNodeId) File(java.io.File)

Example 89 with DataNodeId

use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.

the class DumpIndexTool method main.

public static void main(String[] args) throws Exception {
    final AtomicInteger exitCode = new AtomicInteger(0);
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    DumpIndexToolConfig config = new DumpIndexToolConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
        StoreConfig storeConfig = new StoreConfig(verifiableProperties);
        // this tool supports only blob IDs. It can become generic if StoreKeyFactory provides a deserFromString method.
        BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
        StoreToolsMetrics metrics = new StoreToolsMetrics(clusterMap.getMetricRegistry());
        StoreMetrics storeMetrics = new StoreMetrics("DumpIndexTool", clusterMap.getMetricRegistry());
        ServerConfig serverConfig = new ServerConfig(verifiableProperties);
        Time time = SystemTime.getInstance();
        Throttler throttler = new Throttler(config.indexEntriesToProcessPerSec, 1000, true, time);
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, verifiableProperties, clusterMap.getMetricRegistry());
        DumpIndexTool dumpIndexTool = new DumpIndexTool(blobIdFactory, storeConfig, time, metrics, storeMetrics, throttler, storeKeyConverterFactory.getStoreKeyConverter());
        Set<StoreKey> filterKeySet = new HashSet<>();
        for (String key : config.filterSet) {
            filterKeySet.add(new BlobId(key, clusterMap));
        }
        switch(config.typeOfOperation) {
            case DumpIndex:
                dumpIndex(dumpIndexTool, config.pathOfInput, filterKeySet);
                break;
            case DumpIndexSegment:
                dumpIndexSegment(dumpIndexTool, config.pathOfInput, filterKeySet);
                break;
            case VerifyIndex:
                IndexProcessingResults results = dumpIndexTool.processIndex(config.pathOfInput, filterKeySet, time.milliseconds(), config.detectDuplicatesAcrossKeys);
                exitCode.set(reportVerificationResults(config.pathOfInput, results, config.failIfCraftedIdsPresent));
                break;
            case VerifyDataNode:
                DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
                if (dataNodeId == null) {
                    logger.error("No data node corresponding to {}:{}", config.hostname, config.port);
                } else {
                    Set<File> replicaDirs = clusterMap.getReplicaIds(dataNodeId).stream().map(replicaId -> new File(replicaId.getMountPath())).collect(Collectors.toSet());
                    Map<File, IndexProcessingResults> resultsByReplica = dumpIndexTool.processIndex(replicaDirs, filterKeySet, config.parallelism, config.detectDuplicatesAcrossKeys);
                    replicaDirs.removeAll(resultsByReplica.keySet());
                    if (replicaDirs.size() != 0) {
                        logger.error("Results obtained missing {}", replicaDirs);
                        exitCode.set(5);
                    } else {
                        resultsByReplica.forEach((replicaDir, result) -> exitCode.set(Math.max(exitCode.get(), reportVerificationResults(replicaDir, result, config.failIfCraftedIdsPresent))));
                    }
                }
                break;
            default:
                throw new IllegalArgumentException("Unrecognized operation: " + config.typeOfOperation);
        }
    }
    System.exit(exitCode.get());
}
Also used : Arrays(java.util.Arrays) Default(com.github.ambry.config.Default) DataNodeId(com.github.ambry.clustermap.DataNodeId) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) Future(java.util.concurrent.Future) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) SystemTime(com.github.ambry.utils.SystemTime) Time(com.github.ambry.utils.Time) EnumSet(java.util.EnumSet) ExecutorService(java.util.concurrent.ExecutorService) StoreConfig(com.github.ambry.config.StoreConfig) Logger(org.slf4j.Logger) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Config(com.github.ambry.config.Config) Set(java.util.Set) ClusterMap(com.github.ambry.clustermap.ClusterMap) ServerConfig(com.github.ambry.config.ServerConfig) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) Collectors(java.util.stream.Collectors) File(java.io.File) Executors(java.util.concurrent.Executors) AtomicLong(java.util.concurrent.atomic.AtomicLong) ToolUtils(com.github.ambry.tools.util.ToolUtils) List(java.util.List) Throttler(com.github.ambry.utils.Throttler) TreeMap(java.util.TreeMap) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Timer(com.codahale.metrics.Timer) Collections(java.util.Collections) BlobId(com.github.ambry.commons.BlobId) SortedMap(java.util.SortedMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) SystemTime(com.github.ambry.utils.SystemTime) Time(com.github.ambry.utils.Time) ServerConfig(com.github.ambry.config.ServerConfig) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) Throttler(com.github.ambry.utils.Throttler) HashSet(java.util.HashSet) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StoreConfig(com.github.ambry.config.StoreConfig) BlobId(com.github.ambry.commons.BlobId) DataNodeId(com.github.ambry.clustermap.DataNodeId) File(java.io.File)

Example 90 with DataNodeId

use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.

the class SafeServerShutdownTool method main.

public static void main(String[] args) throws Exception {
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    SafeServerShutdownToolConfig config = new SafeServerShutdownToolConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
        SSLFactory sslFactory = !clusterMapConfig.clusterMapSslEnabledDatacenters.isEmpty() ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
        try (ServerAdminTool serverAdminTool = new ServerAdminTool(clusterMap, sslFactory, verifiableProperties)) {
            DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
            if (dataNodeId == null) {
                throw new IllegalArgumentException("Could not find a data node corresponding to " + config.hostname + ":" + config.port);
            }
            SafeServerShutdownTool safeServerShutdownTool = new SafeServerShutdownTool(serverAdminTool, SystemTime.getInstance());
            int exitStatus = safeServerShutdownTool.prepareServerForShutdown(dataNodeId, config.logGrowthPauseLagThresholdBytes, config.numReplicasCaughtUpPerPartition, config.timeoutSecs, config.checkRepeatDelaySecs) ? 0 : 1;
            System.exit(exitStatus);
        }
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) SSLConfig(com.github.ambry.config.SSLConfig) SSLFactory(com.github.ambry.commons.SSLFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) DataNodeId(com.github.ambry.clustermap.DataNodeId) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig)

Aggregations

DataNodeId (com.github.ambry.clustermap.DataNodeId)92 ArrayList (java.util.ArrayList)45 Test (org.junit.Test)45 HashMap (java.util.HashMap)29 PartitionId (com.github.ambry.clustermap.PartitionId)28 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)27 ReplicaId (com.github.ambry.clustermap.ReplicaId)25 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)23 VerifiableProperties (com.github.ambry.config.VerifiableProperties)23 MetricRegistry (com.codahale.metrics.MetricRegistry)22 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)22 List (java.util.List)22 Map (java.util.Map)22 Port (com.github.ambry.network.Port)21 ClusterMap (com.github.ambry.clustermap.ClusterMap)20 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)19 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)18 BlobIdFactory (com.github.ambry.commons.BlobIdFactory)17 HashSet (java.util.HashSet)16 Properties (java.util.Properties)16