Search in sources :

Example 1 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class ReplicationTest method getPutMessage.

/**
 * Constructs an entire message with header, blob properties, user metadata and blob content.
 * @param id id for which the message has to be constructed.
 * @param accountId accountId of the blob
 * @param containerId containerId of the blob
 * @param enableEncryption {@code true} if encryption needs to be enabled. {@code false} otherwise
 * @return a {@link Pair} of {@link ByteBuffer} and {@link MessageInfo} representing the entire message and the
 *         associated {@link MessageInfo}
 * @throws MessageFormatException
 * @throws IOException
 */
private Pair<ByteBuffer, MessageInfo> getPutMessage(StoreKey id, short accountId, short containerId, boolean enableEncryption) throws MessageFormatException, IOException {
    int blobSize = TestUtils.RANDOM.nextInt(500) + 501;
    int userMetadataSize = TestUtils.RANDOM.nextInt(blobSize / 2);
    int encryptionKeySize = TestUtils.RANDOM.nextInt(blobSize / 4);
    byte[] blob = new byte[blobSize];
    byte[] usermetadata = new byte[userMetadataSize];
    byte[] encryptionKey = enableEncryption ? new byte[encryptionKeySize] : null;
    TestUtils.RANDOM.nextBytes(blob);
    TestUtils.RANDOM.nextBytes(usermetadata);
    BlobProperties blobProperties = new BlobProperties(blobSize, "test", accountId, containerId, encryptionKey != null);
    MessageFormatInputStream stream = new PutMessageFormatInputStream(id, encryptionKey == null ? null : ByteBuffer.wrap(encryptionKey), blobProperties, ByteBuffer.wrap(usermetadata), new ByteBufferInputStream(ByteBuffer.wrap(blob)), blobSize);
    byte[] message = Utils.readBytesFromStream(stream, (int) stream.getSize());
    return new Pair<>(ByteBuffer.wrap(message), new MessageInfo(id, message.length, Utils.Infinite_Time, accountId, containerId, blobProperties.getCreationTimeInMs()));
}
Also used : BlobProperties(com.github.ambry.messageformat.BlobProperties) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) DeleteMessageFormatInputStream(com.github.ambry.messageformat.DeleteMessageFormatInputStream) MessageFormatInputStream(com.github.ambry.messageformat.MessageFormatInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) Pair(com.github.ambry.utils.Pair) MessageInfo(com.github.ambry.store.MessageInfo)

Example 2 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class LogTest method constructionBadArgsTest.

/**
 * Tests cases where bad arguments are provided to the {@link Log} constructor.
 * @throws IOException
 */
@Test
public void constructionBadArgsTest() throws IOException {
    List<Pair<Long, Long>> logAndSegmentSizes = new ArrayList<>();
    // <=0 values for capacities
    logAndSegmentSizes.add(new Pair<>(-1L, SEGMENT_CAPACITY));
    logAndSegmentSizes.add(new Pair<>(LOG_CAPACITY, -1L));
    logAndSegmentSizes.add(new Pair<>(0L, SEGMENT_CAPACITY));
    logAndSegmentSizes.add(new Pair<>(LOG_CAPACITY, 0L));
    // log capacity is not perfectly divisible by segment capacity
    logAndSegmentSizes.add(new Pair<>(LOG_CAPACITY, LOG_CAPACITY - 1));
    for (Pair<Long, Long> logAndSegmentSize : logAndSegmentSizes) {
        try {
            new Log(tempDir.getAbsolutePath(), logAndSegmentSize.getFirst(), logAndSegmentSize.getSecond(), StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, metrics);
            fail("Construction should have failed");
        } catch (IllegalArgumentException e) {
        // expected. Nothing to do.
        }
    }
    // file which is not a directory
    File file = create(LogSegmentNameHelper.nameToFilename(LogSegmentNameHelper.generateFirstSegmentName(false)));
    try {
        new Log(file.getAbsolutePath(), 1, 1, StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, metrics);
        fail("Construction should have failed");
    } catch (IOException e) {
    // expected. Nothing to do.
    }
}
Also used : ArrayList(java.util.ArrayList) IOException(java.io.IOException) File(java.io.File) Pair(com.github.ambry.utils.Pair) Test(org.junit.Test) UtilsTest(com.github.ambry.utils.UtilsTest)

Example 3 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class BlobStoreStats method getValidSize.

@Override
public Pair<Long, Long> getValidSize(TimeRange timeRange) throws StoreException {
    Pair<Long, NavigableMap<String, Long>> logSegmentValidSizeResult = getValidDataSizeByLogSegment(timeRange);
    Long totalValidSize = 0L;
    for (Long value : logSegmentValidSizeResult.getSecond().values()) {
        totalValidSize += value;
    }
    return new Pair<>(logSegmentValidSizeResult.getFirst(), totalValidSize);
}
Also used : ConcurrentNavigableMap(java.util.concurrent.ConcurrentNavigableMap) NavigableMap(java.util.NavigableMap) AtomicLong(java.util.concurrent.atomic.AtomicLong) Pair(com.github.ambry.utils.Pair)

Example 4 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class ServerTestUtil method undeleteCornerCasesTest.

static void undeleteCornerCasesTest(MockCluster cluster, PortType portType, SSLConfig clientSSLConfig1, SSLConfig clientSSLConfig2, SSLConfig clientSSLConfig3, SSLSocketFactory clientSSLSocketFactory1, SSLSocketFactory clientSSLSocketFactory2, SSLSocketFactory clientSSLSocketFactory3, MockNotificationSystem notificationSystem, Properties routerProps, boolean testEncryption) {
    MockClusterMap clusterMap = cluster.getClusterMap();
    byte[] userMetadata = new byte[1000];
    byte[] data = new byte[31870];
    byte[] encryptionKey = new byte[100];
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    BlobProperties properties = new BlobProperties(31870, "serviceid1", accountId, containerId, testEncryption, cluster.time.milliseconds());
    TestUtils.RANDOM.nextBytes(userMetadata);
    TestUtils.RANDOM.nextBytes(data);
    if (testEncryption) {
        TestUtils.RANDOM.nextBytes(encryptionKey);
    }
    short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
    Map<String, List<DataNodeId>> dataNodesPerDC = clusterMap.getDataNodes().stream().collect(Collectors.groupingBy(DataNodeId::getDatacenterName));
    Map<String, Pair<SSLConfig, SSLSocketFactory>> sslSettingPerDC = new HashMap<>();
    sslSettingPerDC.put("DC1", new Pair<>(clientSSLConfig1, clientSSLSocketFactory1));
    sslSettingPerDC.put("DC2", new Pair<>(clientSSLConfig2, clientSSLSocketFactory2));
    sslSettingPerDC.put("DC3", new Pair<>(clientSSLConfig3, clientSSLSocketFactory3));
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
    DataNodeId dataNodeId = dataNodesPerDC.get("DC1").get(0);
    Router router = null;
    try {
        Properties routerProperties = getRouterProps("DC1");
        routerProperties.putAll(routerProps);
        VerifiableProperties routerVerifiableProps = new VerifiableProperties(routerProperties);
        AccountService accountService = new InMemAccountService(false, true);
        router = new NonBlockingRouterFactory(routerVerifiableProps, clusterMap, new MockNotificationSystem(clusterMap), getSSLFactoryIfRequired(routerVerifiableProps), accountService).getRouter();
        // channels to all datanodes
        List<ConnectedChannel> channels = new ArrayList<>();
        for (Map.Entry<String, List<DataNodeId>> entry : dataNodesPerDC.entrySet()) {
            Pair<SSLConfig, SSLSocketFactory> pair = sslSettingPerDC.get(entry.getKey());
            for (DataNodeId node : entry.getValue()) {
                ConnectedChannel connectedChannel = getBlockingChannelBasedOnPortType(portType, node, pair.getSecond(), pair.getFirst());
                connectedChannel.connect();
                channels.add(connectedChannel);
            }
        }
        // ////////////////////////////////////////////////////
        // Corner case 1: When only one datacenter has delete
        // ////////////////////////////////////////////////////
        BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
        ConnectedChannel channel = getBlockingChannelBasedOnPortType(portType, dataNodeId, clientSSLSocketFactory1, clientSSLConfig1);
        channel.connect();
        PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
        DataInputStream putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
        PutResponse response = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response.getError());
        notificationSystem.awaitBlobCreations(blobId1.toString());
        // Now stop the replications this partition.
        PartitionId partitionId = blobId1.getPartition();
        controlReplicationForPartition(channels, partitionId, false);
        // Now send the delete to two data nodes in the same DC
        List<DataNodeId> toBeDeleteDataNodes = dataNodesPerDC.values().stream().findFirst().get();
        Pair<SSLConfig, SSLSocketFactory> pair = sslSettingPerDC.get(toBeDeleteDataNodes.get(0).getDatacenterName());
        ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(portType, toBeDeleteDataNodes.get(0), pair.getSecond(), pair.getFirst());
        channel1.connect();
        ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(portType, toBeDeleteDataNodes.get(1), pair.getSecond(), pair.getFirst());
        channel2.connect();
        DeleteRequest deleteRequest1 = new DeleteRequest(1, "deleteClient", blobId1, System.currentTimeMillis());
        DataInputStream stream = channel1.sendAndReceive(deleteRequest1).getInputStream();
        DeleteResponse deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        DeleteRequest deleteRequest2 = new DeleteRequest(1, "deleteClient", blobId1, deleteRequest1.getDeletionTimeInMs());
        stream = channel2.sendAndReceive(deleteRequest2).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        // Now send the undelete operation through router, and it should fail because of not deleted error.
        Future<Void> future = router.undeleteBlob(blobId1.toString(), "service");
        try {
            future.get();
            fail("Undelete blob " + blobId1.toString() + " should fail");
        } catch (ExecutionException e) {
            assertTrue(e.getCause() instanceof RouterException);
            assertEquals(RouterErrorCode.BlobNotDeleted, ((RouterException) e.getCause()).getErrorCode());
        }
        // Now see if either data node 1 or data node 2 has undelete or not, if so, undelete would replicate. If not,
        // delete would replicate.
        List<PartitionRequestInfo> partitionRequestInfoList = getPartitionRequestInfoListFromBlobId(blobId1);
        boolean hasUndelete = false;
        for (ConnectedChannel connectedChannel : new ConnectedChannel[] { channel1, channel2 }) {
            GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
            stream = channel1.sendAndReceive(getRequest).getInputStream();
            GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
            assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
            MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
            hasUndelete = getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion() == (short) 1;
            if (hasUndelete) {
                break;
            }
        }
        releaseNettyBufUnderneathStream(stream);
        // Now restart the replication
        controlReplicationForPartition(channels, partitionId, true);
        if (hasUndelete) {
            notificationSystem.awaitBlobUndeletes(blobId1.toString());
        } else {
            notificationSystem.awaitBlobDeletions(blobId1.toString());
        }
        for (ConnectedChannel connectedChannel : channels) {
            GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
            stream = connectedChannel.sendAndReceive(getRequest).getInputStream();
            GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
            releaseNettyBufUnderneathStream(stream);
            assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
            MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
            if (hasUndelete) {
                assertEquals((short) 1, getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion());
                assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
                assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
            } else {
                assertEquals((short) 0, getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion());
                assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
                assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
            }
        }
        // ///////////////////////////////////////////////////////////
        // Corner case 2: two data nodes have different life versions
        // //////////////////////////////////////////////////////////
        BlobId blobId2 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
        putRequest = new PutRequest(1, "client1", blobId2, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
        putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
        response = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response.getError());
        notificationSystem.awaitBlobCreations(blobId2.toString());
        // Now delete this blob on all servers.
        DeleteRequest deleteRequest = new DeleteRequest(1, "deleteClient", blobId2, System.currentTimeMillis());
        stream = channel.sendAndReceive(deleteRequest).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        notificationSystem.awaitBlobDeletions(blobId2.toString());
        // Now stop the replication
        partitionId = blobId2.getPartition();
        controlReplicationForPartition(channels, partitionId, false);
        // Now send the undelete to two data nodes in the same DC and then send delete
        UndeleteRequest undeleteRequest = new UndeleteRequest(1, "undeleteClient", blobId2, System.currentTimeMillis());
        stream = channel1.sendAndReceive(undeleteRequest).getInputStream();
        UndeleteResponse undeleteResponse = UndeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
        assertEquals((short) 1, undeleteResponse.getLifeVersion());
        undeleteRequest = new UndeleteRequest(1, "undeleteClient", blobId2, undeleteRequest.getOperationTimeMs());
        stream = channel2.sendAndReceive(undeleteRequest).getInputStream();
        undeleteResponse = UndeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
        assertEquals((short) 1, undeleteResponse.getLifeVersion());
        deleteRequest1 = new DeleteRequest(1, "deleteClient", blobId2, System.currentTimeMillis());
        stream = channel1.sendAndReceive(deleteRequest1).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        deleteRequest2 = new DeleteRequest(1, "deleteClient", blobId2, deleteRequest1.getDeletionTimeInMs());
        stream = channel2.sendAndReceive(deleteRequest2).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        // Now send the undelete operation through router, and it should fail because of lifeVersion conflict error.
        future = router.undeleteBlob(blobId2.toString(), "service");
        try {
            future.get();
            fail("Undelete blob " + blobId2.toString() + " should fail");
        } catch (ExecutionException e) {
            assertTrue(e.getCause() instanceof RouterException);
            assertEquals(RouterErrorCode.LifeVersionConflict, ((RouterException) e.getCause()).getErrorCode());
        }
        // Now restart the replication
        controlReplicationForPartition(channels, partitionId, true);
        notificationSystem.awaitBlobUndeletes(blobId2.toString());
        // Now after replication is resumed, the undelete of lifeversion 2 will eventually be replicated to all servers.
        partitionRequestInfoList = getPartitionRequestInfoListFromBlobId(blobId2);
        for (ConnectedChannel connectedChannel : channels) {
            // Even if the notificationSystem acknowledged the undelete, it might be triggered by undelete at lifeversion 1.
            // So check in a loop with a time out.
            long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
            while (true) {
                GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
                stream = connectedChannel.sendAndReceive(getRequest).getInputStream();
                GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
                assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
                MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
                if (getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion() == 2) {
                    assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
                    assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
                    break;
                } else {
                    Thread.sleep(1000);
                    if (System.currentTimeMillis() > deadline) {
                        throw new TimeoutException("Fail to get blob " + blobId2 + " at lifeversion 2 at " + connectedChannel.getRemoteHost());
                    }
                }
            }
        }
        releaseNettyBufUnderneathStream(stream);
        for (ConnectedChannel connectedChannel : channels) {
            connectedChannel.disconnect();
        }
        channel1.disconnect();
        channel2.disconnect();
        channel.disconnect();
    } catch (Exception e) {
        e.printStackTrace();
        fail();
    } finally {
        if (router != null) {
            try {
                router.close();
            } catch (Exception e) {
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) UndeleteRequest(com.github.ambry.protocol.UndeleteRequest) UndeleteResponse(com.github.ambry.protocol.UndeleteResponse) InMemAccountService(com.github.ambry.account.InMemAccountService) GetRequest(com.github.ambry.protocol.GetRequest) ArrayList(java.util.ArrayList) List(java.util.List) SSLSocketFactory(javax.net.ssl.SSLSocketFactory) TimeoutException(java.util.concurrent.TimeoutException) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Router(com.github.ambry.router.Router) ConnectedChannel(com.github.ambry.network.ConnectedChannel) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) DeleteResponse(com.github.ambry.protocol.DeleteResponse) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) InMemAccountService(com.github.ambry.account.InMemAccountService) AccountService(com.github.ambry.account.AccountService) Map(java.util.Map) HashMap(java.util.HashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PutResponse(com.github.ambry.protocol.PutResponse) ExecutionException(java.util.concurrent.ExecutionException) Pair(com.github.ambry.utils.Pair) NonBlockingRouterFactory(com.github.ambry.router.NonBlockingRouterFactory) SSLConfig(com.github.ambry.config.SSLConfig) RouterException(com.github.ambry.router.RouterException) PutRequest(com.github.ambry.protocol.PutRequest) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) DataInputStream(java.io.DataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) RouterException(com.github.ambry.router.RouterException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Example 5 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class HelixClusterAggregator method doWorkOnStatsWrapperMap.

Pair<StatsSnapshot, StatsSnapshot> doWorkOnStatsWrapperMap(Map<String, StatsWrapper> statsWrappers, StatsReportType type, boolean removeExceptionOnType) throws IOException {
    StatsSnapshot partitionSnapshot = new StatsSnapshot(0L, new HashMap<>());
    Map<String, Long> partitionTimestampMap = new HashMap<>();
    StatsSnapshot rawPartitionSnapshot = new StatsSnapshot(0L, new HashMap<>());
    if (removeExceptionOnType) {
        exceptionOccurredInstances.remove(type);
    }
    for (Map.Entry<String, StatsWrapper> statsWrapperEntry : statsWrappers.entrySet()) {
        if (statsWrapperEntry != null && statsWrapperEntry.getValue() != null) {
            try {
                StatsWrapper snapshotWrapper = statsWrapperEntry.getValue();
                StatsWrapper snapshotWrapperCopy = new StatsWrapper(new StatsHeader(snapshotWrapper.getHeader()), new StatsSnapshot(snapshotWrapper.getSnapshot()));
                combineRawStats(rawPartitionSnapshot, snapshotWrapper);
                switch(type) {
                    case ACCOUNT_REPORT:
                        combineValidStatsByAccount(partitionSnapshot, snapshotWrapperCopy, statsWrapperEntry.getKey(), partitionTimestampMap);
                        break;
                    case PARTITION_CLASS_REPORT:
                        combineValidStatsByPartitionClass(partitionSnapshot, snapshotWrapperCopy, statsWrapperEntry.getKey(), partitionTimestampMap);
                        break;
                    default:
                        throw new IllegalArgumentException("Unrecognized stats report type: " + type);
                }
            } catch (Exception e) {
                logger.error("Exception occurred while processing stats from {}", statsWrapperEntry.getKey(), e);
                exceptionOccurredInstances.computeIfAbsent(type, key -> new ArrayList<>()).add(statsWrapperEntry.getKey());
            }
        }
    }
    if (logger.isTraceEnabled()) {
        logger.trace("Combined raw snapshot {}", mapper.writeValueAsString(rawPartitionSnapshot));
        logger.trace("Combined valid snapshot {}", mapper.writeValueAsString(partitionSnapshot));
    }
    StatsSnapshot reducedRawSnapshot;
    StatsSnapshot reducedSnapshot;
    switch(type) {
        case ACCOUNT_REPORT:
            reducedRawSnapshot = reduceByAccount(rawPartitionSnapshot);
            reducedSnapshot = reduceByAccount(partitionSnapshot);
            break;
        case PARTITION_CLASS_REPORT:
            reducedRawSnapshot = reduceByPartitionClass(rawPartitionSnapshot);
            reducedSnapshot = reduceByPartitionClass(partitionSnapshot);
            break;
        default:
            throw new IllegalArgumentException("Unrecognized stats report type: " + type);
    }
    reducedRawSnapshot.removeZeroValueSnapshots();
    reducedSnapshot.removeZeroValueSnapshots();
    if (logger.isTraceEnabled()) {
        logger.trace("Reduced raw snapshot {}", mapper.writeValueAsString(reducedRawSnapshot));
        logger.trace("Reduced valid snapshot {}", mapper.writeValueAsString(reducedSnapshot));
    }
    return new Pair<>(reducedRawSnapshot, reducedSnapshot);
}
Also used : HashMap(java.util.HashMap) StatsHeader(com.github.ambry.server.StatsHeader) IOException(java.io.IOException) HashMap(java.util.HashMap) Map(java.util.Map) StatsWrapper(com.github.ambry.server.StatsWrapper) StatsSnapshot(com.github.ambry.server.StatsSnapshot) Pair(com.github.ambry.utils.Pair)

Aggregations

Pair (com.github.ambry.utils.Pair)64 ArrayList (java.util.ArrayList)29 HashMap (java.util.HashMap)28 Map (java.util.Map)28 Test (org.junit.Test)20 IOException (java.io.IOException)15 MetricRegistry (com.codahale.metrics.MetricRegistry)14 List (java.util.List)14 ByteBuffer (java.nio.ByteBuffer)13 Collections (java.util.Collections)13 File (java.io.File)12 Assert (org.junit.Assert)12 VerifiableProperties (com.github.ambry.config.VerifiableProperties)11 Utils (com.github.ambry.utils.Utils)10 HashSet (java.util.HashSet)10 Properties (java.util.Properties)10 Container (com.github.ambry.account.Container)9 TestUtils (com.github.ambry.utils.TestUtils)9 Arrays (java.util.Arrays)9 Set (java.util.Set)9