Search in sources :

Example 56 with MetricRegistry

use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.

the class HelixAccountServiceTest method testBackgroundUpdater.

/**
 * Tests the background updater for updating accounts from remote. During the initialization of
 * {@link HelixAccountService}, its internal {@link HelixPropertyStore} will be read to first time get account data.
 * Because of the background account updater, it should continuously make get calls to the {@link HelixPropertyStore},
 * even no notification for account updates is received. Therefore, there will be more than 1 get calls to the
 * {@link HelixPropertyStore}.
 * @throws Exception
 */
@Test
public void testBackgroundUpdater() throws Exception {
    helixConfigProps.setProperty(HelixAccountServiceConfig.UPDATER_POLLING_INTERVAL_MS_KEY, "1");
    vHelixConfigProps = new VerifiableProperties(helixConfigProps);
    storeConfig = new HelixPropertyStoreConfig(vHelixConfigProps);
    String updaterThreadPrefix = UUID.randomUUID().toString();
    MockHelixAccountServiceFactory mockHelixAccountServiceFactory = new MockHelixAccountServiceFactory(vHelixConfigProps, new MetricRegistry(), notifier, updaterThreadPrefix);
    accountService = mockHelixAccountServiceFactory.getAccountService();
    CountDownLatch latch = new CountDownLatch(1);
    mockHelixAccountServiceFactory.getHelixStore(storeConfig).setReadLatch(latch);
    assertEquals("Wrong number of thread for account updater.", 1, numThreadsByThisName(updaterThreadPrefix));
    awaitLatchOrTimeout(latch, 100);
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) HelixPropertyStoreConfig(com.github.ambry.config.HelixPropertyStoreConfig) MetricRegistry(com.codahale.metrics.MetricRegistry) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 57 with MetricRegistry

use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.

the class HelixAccountServiceTest method testNullInputs.

/**
 * Tests a number of bad inputs.
 */
@Test
public void testNullInputs() throws IOException {
    try {
        new MockHelixAccountServiceFactory(null, new MetricRegistry(), notifier, null).getAccountService();
        fail("should have thrown");
    } catch (NullPointerException e) {
    // expected
    }
    try {
        new MockHelixAccountServiceFactory(vHelixConfigProps, null, notifier, null).getAccountService();
        fail("should have thrown");
    } catch (NullPointerException e) {
    // expected
    }
    accountService = new MockHelixAccountServiceFactory(vHelixConfigProps, new MetricRegistry(), null, null).getAccountService();
    accountService.close();
    accountService = mockHelixAccountServiceFactory.getAccountService();
    try {
        accountService.updateAccounts(null);
        fail("should have thrown");
    } catch (NullPointerException e) {
    // expected
    }
    try {
        accountService.getAccountByName(null);
        fail("should have thrown");
    } catch (NullPointerException e) {
    // expected
    }
}
Also used : MetricRegistry(com.codahale.metrics.MetricRegistry) Test(org.junit.Test)

Example 58 with MetricRegistry

use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.

the class DumpDataTool method compareIndexEntriesToLogContent.

/**
 * Compares every entry in an index file with those in the log. Checks to see if each blob in index is successfully
 * deserializable from the log
 * @param indexFile the file that represents the index segment.
 * @param checkLogEndOffsetMatch if {@code true}, checks that the end offset of the log matches the end offset of the
 *                               index.
 * @throws Exception
 */
private void compareIndexEntriesToLogContent(File indexFile, boolean checkLogEndOffsetMatch) throws Exception {
    if (!indexFile.exists()) {
        throw new IllegalArgumentException("File does not exist " + indexFile);
    }
    final Timer.Context context = metrics.compareIndexFileToLogTimeMs.time();
    try {
        logger.info("Dumping index {}", indexFile.getAbsolutePath());
        StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
        StoreConfig config = new StoreConfig(new VerifiableProperties(new Properties()));
        MetricRegistry metricRegistry = new MetricRegistry();
        StoreMetrics storeMetrics = new StoreMetrics(metricRegistry);
        IndexSegment segment = new IndexSegment(indexFile, false, storeKeyFactory, config, storeMetrics, new Journal(indexFile.getParent(), 0, 0), time);
        Offset startOffset = segment.getStartOffset();
        TreeMap<Long, Long> coveredRanges = new TreeMap<>();
        String logFileName = LogSegmentNameHelper.nameToFilename(segment.getLogSegmentName());
        File logFile = new File(indexFile.getParent(), logFileName);
        if (!logFile.exists()) {
            throw new IllegalStateException("Log file does not exist " + logFile);
        }
        RandomAccessFile randomAccessFile = new RandomAccessFile(logFile, "r");
        long logFileSize = randomAccessFile.getChannel().size();
        List<MessageInfo> entries = new ArrayList<>();
        segment.getEntriesSince(null, new FindEntriesCondition(Long.MAX_VALUE), entries, new AtomicLong(0));
        for (MessageInfo entry : entries) {
            StoreKey key = entry.getStoreKey();
            IndexValue value = segment.find(key);
            boolean isDeleted = value.isFlagSet(IndexValue.Flags.Delete_Index);
            if (value.getOffset().getOffset() < logFileSize) {
                boolean success = readFromLogAndVerify(randomAccessFile, key.getID(), value, coveredRanges);
                if (success) {
                    if (isDeleted) {
                        long originalOffset = value.getOriginalMessageOffset();
                        if (originalOffset != -1) {
                            if (!coveredRanges.containsKey(originalOffset)) {
                                if (startOffset.getOffset() > originalOffset) {
                                    logger.trace("Put Record at {} with delete msg offset {} ignored because it is prior to startOffset {}", originalOffset, value.getOffset(), startOffset);
                                } else {
                                    try {
                                        DumpDataHelper.LogBlobRecordInfo logBlobRecordInfo = DumpDataHelper.readSingleRecordFromLog(randomAccessFile, originalOffset, clusterMap, currentTimeInMs, metrics);
                                        coveredRanges.put(originalOffset, originalOffset + logBlobRecordInfo.totalRecordSize);
                                        logger.trace("PUT Record {} with start offset {} and end offset {} for a delete msg {} at offset {} ", logBlobRecordInfo.blobId, originalOffset, (originalOffset + logBlobRecordInfo.totalRecordSize), key.getID(), value.getOffset());
                                        if (!logBlobRecordInfo.blobId.getID().equals(key.getID())) {
                                            logger.error("BlobId value mismatch between delete record {} and put record {}", key.getID(), logBlobRecordInfo.blobId.getID());
                                        }
                                    } catch (IllegalArgumentException e) {
                                        metrics.logDeserializationError.inc();
                                        logger.error("Illegal arg exception thrown at  " + randomAccessFile.getChannel().position() + ", " + "while reading blob starting at offset " + originalOffset + " with exception: ", e);
                                    } catch (MessageFormatException e) {
                                        metrics.logDeserializationError.inc();
                                        logger.error("MessageFormat exception thrown at  " + randomAccessFile.getChannel().position() + " while reading blob starting at offset " + originalOffset + " with exception: ", e);
                                    } catch (EOFException e) {
                                        metrics.endOfFileOnDumpLogError.inc();
                                        logger.error("EOFException thrown at " + randomAccessFile.getChannel().position() + " ", e);
                                    } catch (Exception e) {
                                        metrics.unknownErrorOnDumpIndex.inc();
                                        logger.error("Unknown exception thrown " + e.getMessage() + " ", e);
                                    }
                                }
                            }
                        }
                    }
                } else {
                    metrics.indexToLogBlobRecordComparisonFailure.inc();
                    logger.error("Failed for key {} with value {} ", key, value);
                }
            } else {
                logger.trace("Blob's {} offset {} is outside of log size {}, with a diff of {}", key, value.getOffset().getOffset(), logFileSize, (value.getOffset().getOffset() - logFileSize));
            }
        }
        throttler.maybeThrottle(entries.size());
        long indexEndOffset = segment.getEndOffset().getOffset();
        if (checkLogEndOffsetMatch && indexEndOffset != randomAccessFile.length()) {
            metrics.indexLogEndOffsetMisMatchError.inc();
            logger.error("Log end offset {} and index end offset {} do not match", randomAccessFile.length(), indexEndOffset);
        }
        logRangesNotCovered(coveredRanges, indexEndOffset);
    } finally {
        context.stop();
    }
}
Also used : ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) EOFException(java.io.EOFException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) TreeMap(java.util.TreeMap) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) EOFException(java.io.EOFException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicLong(java.util.concurrent.atomic.AtomicLong) Timer(com.codahale.metrics.Timer) RandomAccessFile(java.io.RandomAccessFile) AtomicLong(java.util.concurrent.atomic.AtomicLong) StoreConfig(com.github.ambry.config.StoreConfig) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 59 with MetricRegistry

use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.

the class IndexWritePerformance method main.

public static void main(String[] args) {
    FileWriter writer = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<Integer> numberOfIndexesOpt = parser.accepts("numberOfIndexes", "The number of indexes to create").withRequiredArg().describedAs("number_of_indexes").ofType(Integer.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> numberOfWritersOpt = parser.accepts("numberOfWriters", "The number of writers that write to a random index concurrently").withRequiredArg().describedAs("The number of writers").ofType(Integer.class).defaultsTo(4);
        ArgumentAcceptingOptionSpec<Integer> writesPerSecondOpt = parser.accepts("writesPerSecond", "The rate at which writes need to be performed").withRequiredArg().describedAs("The number of writes per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(numberOfIndexesOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        int numberOfIndexes = options.valueOf(numberOfIndexesOpt);
        int numberOfWriters = options.valueOf(numberOfWritersOpt);
        int writesPerSecond = options.valueOf(writesPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        final AtomicLong totalTimeTakenInNs = new AtomicLong(0);
        final AtomicLong totalWrites = new AtomicLong(0);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        StoreKeyFactory factory = new BlobIdFactory(map);
        File logFile = new File(System.getProperty("user.dir"), "writeperflog");
        writer = new FileWriter(logFile);
        MetricRegistry metricRegistry = new MetricRegistry();
        StoreMetrics metrics = new StoreMetrics(metricRegistry);
        DiskSpaceAllocator diskSpaceAllocator = new DiskSpaceAllocator(false, null, 0, new StorageManagerMetrics(metricRegistry));
        Log log = new Log(System.getProperty("user.dir"), 10, 10, diskSpaceAllocator, metrics);
        ScheduledExecutorService s = Utils.newScheduler(numberOfWriters, "index", false);
        ArrayList<BlobIndexMetrics> indexWithMetrics = new ArrayList<BlobIndexMetrics>(numberOfIndexes);
        Properties props = new Properties();
        props.setProperty("store.index.memory.size.bytes", "2097152");
        StoreConfig config = new StoreConfig(new VerifiableProperties(props));
        for (Integer i = 0; i < numberOfIndexes; i++) {
            File indexFile = new File(System.getProperty("user.dir"), i.toString());
            if (indexFile.exists()) {
                for (File c : indexFile.listFiles()) {
                    c.delete();
                }
            } else {
                indexFile.mkdir();
            }
            System.out.println("Creating index folder " + indexFile.getAbsolutePath());
            writer.write("logdir-" + indexFile.getAbsolutePath() + "\n");
            indexWithMetrics.add(new BlobIndexMetrics(indexFile.getAbsolutePath(), s, log, enableVerboseLogging, totalWrites, totalTimeTakenInNs, totalWrites, config, writer, factory));
        }
        final CountDownLatch latch = new CountDownLatch(numberOfWriters);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    latch.await();
                    System.out.println("Total writes : " + totalWrites.get() + "  Total time taken : " + totalTimeTakenInNs.get() + " Nano Seconds  Average time taken per write " + ((double) totalWrites.get() / totalTimeTakenInNs.get()) / SystemTime.NsPerSec + " Seconds");
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        Throttler throttler = new Throttler(writesPerSecond, 100, true, SystemTime.getInstance());
        Thread[] threadIndexPerf = new Thread[numberOfWriters];
        for (int i = 0; i < numberOfWriters; i++) {
            threadIndexPerf[i] = new Thread(new IndexWritePerfRun(indexWithMetrics, throttler, shutdown, latch, map));
            threadIndexPerf[i].start();
        }
        for (int i = 0; i < numberOfWriters; i++) {
            threadIndexPerf[i].join();
        }
    } catch (StoreException e) {
        System.err.println("Index creation error on exit " + e.getMessage());
    } catch (Exception e) {
        System.err.println("Error on exit " + e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (Exception e) {
                System.out.println("Error when closing the writer");
            }
        }
    }
}
Also used : OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) FileWriter(java.io.FileWriter) ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Throttler(com.github.ambry.utils.Throttler) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) StoreConfig(com.github.ambry.config.StoreConfig) OptionSet(joptsimple.OptionSet) File(java.io.File)

Example 60 with MetricRegistry

use of com.codahale.metrics.MetricRegistry in project ambry by linkedin.

the class ServerReadPerformance method main.

public static void main(String[] args) {
    ConnectionPool connectionPool = null;
    FileWriter writer = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> logToReadOpt = parser.accepts("logToRead", "The log that needs to be replayed for traffic").withRequiredArg().describedAs("log_to_read").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> readsPerSecondOpt = parser.accepts("readsPerSecond", "The rate at which reads need to be performed").withRequiredArg().describedAs("The number of reads per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Long> measurementIntervalOpt = parser.accepts("measurementInterval", "The interval in second to report performance result").withOptionalArg().describedAs("The CPU time spent for getting blobs, not wall time").ofType(Long.class).defaultsTo(300L);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        ArgumentAcceptingOptionSpec<String> sslEnabledDatacentersOpt = parser.accepts("sslEnabledDatacenters", "Datacenters to which ssl should be enabled").withOptionalArg().describedAs("Comma separated list").ofType(String.class).defaultsTo("");
        ArgumentAcceptingOptionSpec<String> sslKeystorePathOpt = parser.accepts("sslKeystorePath", "SSL key store path").withOptionalArg().describedAs("The file path of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystoreTypeOpt = parser.accepts("sslKeystoreType", "SSL key store type").withOptionalArg().describedAs("The type of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePathOpt = parser.accepts("sslTruststorePath", "SSL trust store path").withOptionalArg().describedAs("The file path of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystorePasswordOpt = parser.accepts("sslKeystorePassword", "SSL key store password").withOptionalArg().describedAs("The password of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeyPasswordOpt = parser.accepts("sslKeyPassword", "SSL key password").withOptionalArg().describedAs("The password of SSL private key").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePasswordOpt = parser.accepts("sslTruststorePassword", "SSL trust store password").withOptionalArg().describedAs("The password of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslCipherSuitesOpt = parser.accepts("sslCipherSuites", "SSL enabled cipher suites").withOptionalArg().describedAs("Comma separated list").defaultsTo("TLS_RSA_WITH_AES_128_CBC_SHA").ofType(String.class);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(logToReadOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        long measurementIntervalNs = options.valueOf(measurementIntervalOpt) * SystemTime.NsPerSec;
        ToolUtils.validateSSLOptions(options, parser, sslEnabledDatacentersOpt, sslKeystorePathOpt, sslKeystoreTypeOpt, sslTruststorePathOpt, sslKeystorePasswordOpt, sslKeyPasswordOpt, sslTruststorePasswordOpt);
        String sslEnabledDatacenters = options.valueOf(sslEnabledDatacentersOpt);
        Properties sslProperties;
        if (sslEnabledDatacenters.length() != 0) {
            sslProperties = ToolUtils.createSSLProperties(sslEnabledDatacenters, options.valueOf(sslKeystorePathOpt), options.valueOf(sslKeystoreTypeOpt), options.valueOf(sslKeystorePasswordOpt), options.valueOf(sslKeyPasswordOpt), options.valueOf(sslTruststorePathOpt), options.valueOf(sslTruststorePasswordOpt), options.valueOf(sslCipherSuitesOpt));
        } else {
            sslProperties = new Properties();
        }
        ToolUtils.addClusterMapProperties(sslProperties);
        String logToRead = options.valueOf(logToReadOpt);
        int readsPerSecond = options.valueOf(readsPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        File logFile = new File(System.getProperty("user.dir"), "readperfresult");
        writer = new FileWriter(logFile);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(sslProperties));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        final AtomicLong totalTimeTaken = new AtomicLong(0);
        final AtomicLong totalReads = new AtomicLong(0);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    String message = "Total reads : " + totalReads.get() + "  Total time taken : " + totalTimeTaken.get() + " Nano Seconds  Average time taken per read " + ((double) totalTimeTaken.get()) / SystemTime.NsPerSec / totalReads.get() + " Seconds";
                    System.out.println(message);
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        final BufferedReader br = new BufferedReader(new FileReader(logToRead));
        Throttler throttler = new Throttler(readsPerSecond, 100, true, SystemTime.getInstance());
        String line;
        ConnectedChannel channel = null;
        ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(new VerifiableProperties(new Properties()));
        VerifiableProperties vProps = new VerifiableProperties(sslProperties);
        SSLConfig sslConfig = new SSLConfig(vProps);
        clusterMapConfig = new ClusterMapConfig(vProps);
        connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, new MetricRegistry());
        long totalNumberOfGetBlobs = 0;
        long totalLatencyForGetBlobs = 0;
        ArrayList<Long> latenciesForGetBlobs = new ArrayList<Long>();
        long maxLatencyForGetBlobs = 0;
        long minLatencyForGetBlobs = Long.MAX_VALUE;
        while ((line = br.readLine()) != null) {
            String[] id = line.split("-");
            BlobData blobData = null;
            BlobId blobId = new BlobId(id[1], map);
            ArrayList<BlobId> blobIds = new ArrayList<BlobId>();
            blobIds.add(blobId);
            for (ReplicaId replicaId : blobId.getPartition().getReplicaIds()) {
                long startTimeGetBlob = 0;
                ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
                try {
                    partitionRequestInfoList.clear();
                    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequest = new GetRequest(1, "getperf", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
                    Port port = replicaId.getDataNodeId().getPortToConnectTo();
                    channel = connectionPool.checkOutConnection(replicaId.getDataNodeId().getHostname(), port, 10000);
                    startTimeGetBlob = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequest);
                    InputStream receiveStream = channel.receive().getInputStream();
                    GetResponse getResponse = GetResponse.readFrom(new DataInputStream(receiveStream), map);
                    blobData = MessageFormatRecord.deserializeBlob(getResponse.getInputStream());
                    long sizeRead = 0;
                    byte[] outputBuffer = new byte[(int) blobData.getSize()];
                    ByteBufferOutputStream streamOut = new ByteBufferOutputStream(ByteBuffer.wrap(outputBuffer));
                    while (sizeRead < blobData.getSize()) {
                        streamOut.write(blobData.getStream().read());
                        sizeRead++;
                    }
                    long latencyPerBlob = SystemTime.getInstance().nanoseconds() - startTimeGetBlob;
                    totalTimeTaken.addAndGet(latencyPerBlob);
                    latenciesForGetBlobs.add(latencyPerBlob);
                    totalReads.incrementAndGet();
                    totalNumberOfGetBlobs++;
                    totalLatencyForGetBlobs += latencyPerBlob;
                    if (enableVerboseLogging) {
                        System.out.println("Time taken to get blob id " + blobId + " in ms " + latencyPerBlob / SystemTime.NsPerMs);
                    }
                    if (latencyPerBlob > maxLatencyForGetBlobs) {
                        maxLatencyForGetBlobs = latencyPerBlob;
                    }
                    if (latencyPerBlob < minLatencyForGetBlobs) {
                        minLatencyForGetBlobs = latencyPerBlob;
                    }
                    if (totalLatencyForGetBlobs >= measurementIntervalNs) {
                        Collections.sort(latenciesForGetBlobs);
                        int index99 = (int) (latenciesForGetBlobs.size() * 0.99) - 1;
                        int index95 = (int) (latenciesForGetBlobs.size() * 0.95) - 1;
                        String message = totalNumberOfGetBlobs + "," + (double) latenciesForGetBlobs.get(index99) / SystemTime.NsPerSec + "," + (double) latenciesForGetBlobs.get(index95) / SystemTime.NsPerSec + "," + ((double) totalLatencyForGetBlobs / SystemTime.NsPerSec / totalNumberOfGetBlobs);
                        System.out.println(message);
                        writer.write(message + "\n");
                        totalLatencyForGetBlobs = 0;
                        latenciesForGetBlobs.clear();
                        totalNumberOfGetBlobs = 0;
                        maxLatencyForGetBlobs = 0;
                        minLatencyForGetBlobs = Long.MAX_VALUE;
                    }
                    partitionRequestInfoList.clear();
                    partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequestProperties = new GetRequest(1, "getperf", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
                    long startTimeGetBlobProperties = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequestProperties);
                    InputStream receivePropertyStream = channel.receive().getInputStream();
                    GetResponse getResponseProperty = GetResponse.readFrom(new DataInputStream(receivePropertyStream), map);
                    BlobProperties blobProperties = MessageFormatRecord.deserializeBlobProperties(getResponseProperty.getInputStream());
                    long endTimeGetBlobProperties = SystemTime.getInstance().nanoseconds() - startTimeGetBlobProperties;
                    partitionRequestInfoList.clear();
                    partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequestUserMetadata = new GetRequest(1, "getperf", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
                    long startTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequestUserMetadata);
                    InputStream receiveUserMetadataStream = channel.receive().getInputStream();
                    GetResponse getResponseUserMetadata = GetResponse.readFrom(new DataInputStream(receiveUserMetadataStream), map);
                    ByteBuffer userMetadata = MessageFormatRecord.deserializeUserMetadata(getResponseUserMetadata.getInputStream());
                    long endTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds() - startTimeGetBlobUserMetadata;
                    // delete the blob
                    DeleteRequest deleteRequest = new DeleteRequest(0, "perf", blobId, System.currentTimeMillis());
                    channel.send(deleteRequest);
                    DeleteResponse deleteResponse = DeleteResponse.readFrom(new DataInputStream(channel.receive().getInputStream()));
                    if (deleteResponse.getError() != ServerErrorCode.No_Error) {
                        throw new UnexpectedException("error " + deleteResponse.getError());
                    }
                    throttler.maybeThrottle(1);
                } finally {
                    if (channel != null) {
                        connectionPool.checkInConnection(channel);
                        channel = null;
                    }
                }
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        System.out.println("Error in server read performance " + e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (Exception e) {
                System.out.println("Error when closing writer");
            }
        }
        if (connectionPool != null) {
            connectionPool.shutdown();
        }
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) GetRequest(com.github.ambry.protocol.GetRequest) BlobData(com.github.ambry.messageformat.BlobData) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Throttler(com.github.ambry.utils.Throttler) UnexpectedException(java.rmi.UnexpectedException) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) ConnectedChannel(com.github.ambry.network.ConnectedChannel) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DeleteResponse(com.github.ambry.protocol.DeleteResponse) AtomicLong(java.util.concurrent.atomic.AtomicLong) File(java.io.File) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) ConnectionPool(com.github.ambry.network.ConnectionPool) OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) FileWriter(java.io.FileWriter) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) FileReader(java.io.FileReader) SSLConfig(com.github.ambry.config.SSLConfig) DataInputStream(java.io.DataInputStream) InputStream(java.io.InputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) UnexpectedException(java.rmi.UnexpectedException) ReplicaId(com.github.ambry.clustermap.ReplicaId) AtomicLong(java.util.concurrent.atomic.AtomicLong) ByteBufferOutputStream(com.github.ambry.utils.ByteBufferOutputStream) BlobProperties(com.github.ambry.messageformat.BlobProperties) BufferedReader(java.io.BufferedReader) OptionSet(joptsimple.OptionSet) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Aggregations

MetricRegistry (com.codahale.metrics.MetricRegistry)505 Test (org.junit.Test)177 Before (org.junit.Before)61 Test (org.junit.jupiter.api.Test)45 VerifiableProperties (com.github.ambry.config.VerifiableProperties)42 ArrayList (java.util.ArrayList)33 Counter (com.codahale.metrics.Counter)30 File (java.io.File)29 Properties (java.util.Properties)28 List (java.util.List)23 Metric (com.codahale.metrics.Metric)22 Map (java.util.Map)22 IOException (java.io.IOException)21 HashMap (java.util.HashMap)20 Size (com.github.joschi.jadconfig.util.Size)17 CountDownLatch (java.util.concurrent.CountDownLatch)17 TimeUnit (java.util.concurrent.TimeUnit)17 Timer (com.codahale.metrics.Timer)15 DefaultTaggedMetricRegistry (com.palantir.tritium.metrics.registry.DefaultTaggedMetricRegistry)15 ResourceConfig (org.glassfish.jersey.server.ResourceConfig)15