Search in sources :

Example 26 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class CloudOperationTest method getBlobAndAssertSuccess.

/**
 * Construct GetBlob operations with appropriate callbacks, then poll those operations until they complete,
 * and ensure that the whole blob data is read out and the contents match.
 * @param blobId id of the blob to get
 * @param expectedLifeVersion the expected lifeVersion from get operation.
 * @param expectedBlobSize the expected blob size
 * @param expectedBlobProperties  the expected {@link BlobProperties} for the blob.
 * @param expectedUserMetadata the expected user meta data
 * @param expectPutContent the expected blob content
 * @param options options of the get blob operation
 * @throws Exception Any unexpected exception
 */
private void getBlobAndAssertSuccess(final BlobId blobId, final short expectedLifeVersion, final int expectedBlobSize, final BlobProperties expectedBlobProperties, final byte[] expectedUserMetadata, final byte[] expectPutContent, final GetBlobOptionsInternal options) throws Exception {
    final CountDownLatch readCompleteLatch = new CountDownLatch(1);
    final AtomicLong readCompleteResult = new AtomicLong(0);
    // callback to compare the data
    Callback<GetBlobResultInternal> callback = (result, exception) -> {
        Assert.assertNull("Shouldn't have exception", exception);
        try {
            BlobInfo blobInfo;
            switch(options.getBlobOptions.getOperationType()) {
                case All:
                    Assert.assertFalse("not supposed to be raw mode", options.getBlobOptions.isRawMode());
                    blobInfo = result.getBlobResult.getBlobInfo();
                    Assert.assertTrue("Blob properties must be the same", RouterTestHelpers.arePersistedFieldsEquivalent(expectedBlobProperties, blobInfo.getBlobProperties()));
                    Assert.assertEquals("Blob size should in received blobProperties should be the same as actual", expectedBlobSize, blobInfo.getBlobProperties().getBlobSize());
                    Assert.assertArrayEquals("User metadata must be the same", expectedUserMetadata, blobInfo.getUserMetadata());
                    Assert.assertEquals("LifeVersion mismatch", expectedLifeVersion, blobInfo.getLifeVersion());
                    break;
                case Data:
                    Assert.assertNull("Unexpected blob info in operation result", result.getBlobResult.getBlobInfo());
                    break;
                case BlobInfo:
                    blobInfo = result.getBlobResult.getBlobInfo();
                    Assert.assertTrue("Blob properties must be the same", RouterTestHelpers.arePersistedFieldsEquivalent(expectedBlobProperties, blobInfo.getBlobProperties()));
                    Assert.assertEquals("Blob size should in received blobProperties should be the same as actual", expectedBlobSize, blobInfo.getBlobProperties().getBlobSize());
                    Assert.assertNull("Unexpected blob data in operation result", result.getBlobResult.getBlobDataChannel());
                    Assert.assertEquals("LifeVersion mismatch", expectedLifeVersion, blobInfo.getLifeVersion());
            }
        } catch (Throwable e) {
            Assert.fail("Shouldn't receive exception here");
        }
        if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo) {
            final ByteBufferAsyncWritableChannel asyncWritableChannel = new ByteBufferAsyncWritableChannel();
            Utils.newThread(() -> {
                Future<Long> readIntoFuture = result.getBlobResult.getBlobDataChannel().readInto(asyncWritableChannel, null);
                assertBlobReadSuccess(options.getBlobOptions, readIntoFuture, asyncWritableChannel, result.getBlobResult.getBlobDataChannel(), readCompleteLatch, readCompleteResult, expectedBlobSize, expectPutContent);
            }, false).start();
        } else {
            readCompleteLatch.countDown();
        }
    };
    // create GetBlobOperation
    final Map<Integer, GetOperation> correlationIdToGetOperation = new HashMap<>();
    final RequestRegistrationCallback<GetOperation> requestRegistrationCallback = new RequestRegistrationCallback<>(correlationIdToGetOperation);
    NonBlockingRouter.currentOperationsCount.incrementAndGet();
    GetBlobOperation op = new GetBlobOperation(routerConfig, routerMetrics, mockClusterMap, responseHandler, blobId, options, callback, routerCallback, blobIdFactory, null, null, null, time, false, null);
    requestRegistrationCallback.setRequestsToSend(new ArrayList<>());
    // Wait operation to complete
    while (!op.isOperationComplete()) {
        op.poll(requestRegistrationCallback);
        List<ResponseInfo> responses = sendAndWaitForResponses(requestRegistrationCallback.getRequestsToSend());
        for (ResponseInfo responseInfo : responses) {
            GetResponse getResponse = RouterUtils.extractResponseAndNotifyResponseHandler(responseHandler, routerMetrics, responseInfo, stream -> GetResponse.readFrom(stream, mockClusterMap), response -> {
                ServerErrorCode serverError = response.getError();
                if (serverError == ServerErrorCode.No_Error) {
                    serverError = response.getPartitionResponseInfoList().get(0).getErrorCode();
                }
                return serverError;
            });
            op.handleResponse(responseInfo, getResponse);
            responseInfo.release();
        }
    }
    readCompleteLatch.await();
    Assert.assertTrue("Operation should be complete at this time", op.isOperationComplete());
    // Ensure that a ChannelClosed exception is not set when the ReadableStreamChannel is closed correctly.
    Assert.assertNull("Callback operation exception should be null", op.getOperationException());
    if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo && !options.getBlobOptions.isRawMode() && !options.getChunkIdsOnly) {
        int sizeWritten = expectedBlobSize;
        if (options.getBlobOptions.getRange() != null) {
            ByteRange range = options.getBlobOptions.getRange().toResolvedByteRange(expectedBlobSize, options.getBlobOptions.resolveRangeOnEmptyBlob());
            sizeWritten = (int) range.getRangeSize();
        }
        Assert.assertEquals("Size read must equal size written", sizeWritten, readCompleteResult.get());
    }
}
Also used : ResponseInfo(com.github.ambry.network.ResponseInfo) Arrays(java.util.Arrays) BlobProperties(com.github.ambry.messageformat.BlobProperties) LocalNetworkClientFactory(com.github.ambry.network.LocalNetworkClientFactory) Random(java.util.Random) StoreErrorCodes(com.github.ambry.store.StoreErrorCodes) ByteBuffer(java.nio.ByteBuffer) GetResponse(com.github.ambry.protocol.GetResponse) Future(java.util.concurrent.Future) PortType(com.github.ambry.network.PortType) Map(java.util.Map) After(org.junit.After) NetworkConfig(com.github.ambry.config.NetworkConfig) NettyByteBufLeakHelper(com.github.ambry.utils.NettyByteBufLeakHelper) Parameterized(org.junit.runners.Parameterized) EnumMap(java.util.EnumMap) Collection(java.util.Collection) Utils(com.github.ambry.utils.Utils) PooledByteBufAllocator(io.netty.buffer.PooledByteBufAllocator) Collectors(java.util.stream.Collectors) BlobInfo(com.github.ambry.messageformat.BlobInfo) RouterConfig(com.github.ambry.config.RouterConfig) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) MockTime(com.github.ambry.utils.MockTime) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) Callback(com.github.ambry.commons.Callback) BlobType(com.github.ambry.messageformat.BlobType) InMemAccountService(com.github.ambry.account.InMemAccountService) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) ResponseHandler(com.github.ambry.commons.ResponseHandler) NetworkMetrics(com.github.ambry.network.NetworkMetrics) DataInputStream(java.io.DataInputStream) CloudDestinationFactory(com.github.ambry.cloud.CloudDestinationFactory) ServerErrorCode(com.github.ambry.server.ServerErrorCode) RunWith(org.junit.runner.RunWith) AccountService(com.github.ambry.account.AccountService) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) CloudConfig(com.github.ambry.config.CloudConfig) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) RequestHandlerPool(com.github.ambry.protocol.RequestHandlerPool) ByteBuf(io.netty.buffer.ByteBuf) LatchBasedInMemoryCloudDestination(com.github.ambry.cloud.LatchBasedInMemoryCloudDestination) LocalRequestResponseChannel(com.github.ambry.network.LocalRequestResponseChannel) PutRequest(com.github.ambry.protocol.PutRequest) NetworkClientFactory(com.github.ambry.network.NetworkClientFactory) Before(org.junit.Before) Properties(java.util.Properties) Iterator(java.util.Iterator) ReplicaType(com.github.ambry.clustermap.ReplicaType) NetworkClient(com.github.ambry.network.NetworkClient) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ByteBufferAsyncWritableChannel(com.github.ambry.commons.ByteBufferAsyncWritableChannel) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) LatchBasedInMemoryCloudDestinationFactory(com.github.ambry.cloud.LatchBasedInMemoryCloudDestinationFactory) RequestInfo(com.github.ambry.network.RequestInfo) ExecutionException(java.util.concurrent.ExecutionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) CompositeNetworkClientFactory(com.github.ambry.network.CompositeNetworkClientFactory) ReplicaId(com.github.ambry.clustermap.ReplicaId) Port(com.github.ambry.network.Port) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) ResponseInfo(com.github.ambry.network.ResponseInfo) HashMap(java.util.HashMap) BlobInfo(com.github.ambry.messageformat.BlobInfo) CountDownLatch(java.util.concurrent.CountDownLatch) ByteBufferAsyncWritableChannel(com.github.ambry.commons.ByteBufferAsyncWritableChannel) GetResponse(com.github.ambry.protocol.GetResponse) ServerErrorCode(com.github.ambry.server.ServerErrorCode) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicLong(java.util.concurrent.atomic.AtomicLong)

Example 27 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class MockReadableStreamChannel method verifyBlob.

/**
 * Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
 * data as the original object that was put.
 * @param requestAndResult the {@link RequestAndResult} to use for verification.
 * @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
 */
private void verifyBlob(RequestAndResult requestAndResult, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
    String blobId = requestAndResult.result.result();
    ByteBuffer serializedRequest = serializedRequests.get(blobId);
    PutRequest request = deserializePutRequest(serializedRequest);
    NotificationBlobType notificationBlobType;
    BlobId origBlobId = new BlobId(blobId, mockClusterMap);
    boolean stitchOperation = requestAndResult.chunksToStitch != null;
    if (stitchOperation) {
        assertEquals("Stitch operations should always produce metadata blobs", BlobType.MetadataBlob, request.getBlobType());
    }
    if (request.getBlobType() == BlobType.MetadataBlob) {
        notificationBlobType = NotificationBlobType.Composite;
        assertEquals("Expected metadata", BlobDataType.METADATA, origBlobId.getBlobDataType());
        byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
        CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
        List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
        long expectedMaxChunkSize;
        long expectedTotalSize;
        int expectedNumChunks;
        if (stitchOperation) {
            expectedMaxChunkSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).max().orElse(0);
            expectedTotalSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).sum();
            expectedNumChunks = requestAndResult.chunksToStitch.size();
        } else {
            expectedMaxChunkSize = chunkSize;
            expectedTotalSize = requestAndResult.putContent.length;
            expectedNumChunks = RouterUtils.getNumChunksForBlobAndChunkSize(requestAndResult.putContent.length, chunkSize);
        }
        if (metadataContentVersion <= MessageFormatRecord.Metadata_Content_Version_V2) {
            assertEquals("Wrong max chunk size in metadata", expectedMaxChunkSize, compositeBlobInfo.getChunkSize());
        }
        assertEquals("Wrong total size in metadata", expectedTotalSize, compositeBlobInfo.getTotalSize());
        assertEquals("Number of chunks is not as expected", expectedNumChunks, dataBlobIds.size());
        // Verify all dataBlobIds are DataChunk
        for (StoreKey key : dataBlobIds) {
            BlobId origDataBlobId = (BlobId) key;
            assertEquals("Expected datachunk", BlobDataType.DATACHUNK, origDataBlobId.getBlobDataType());
        }
        // verify user-metadata
        if (requestAndResult.putBlobProperties.isEncrypted()) {
            ByteBuffer userMetadata = request.getUsermetadata();
            // reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
            // assertion failures in non main thread will not fail the test.
            new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
                assertNull("Exception should not be thrown", exception);
                assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
                assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
            }).run();
        } else {
            assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, request.getUsermetadata().array());
        }
        if (!stitchOperation) {
            verifyCompositeBlob(requestAndResult.putBlobProperties, requestAndResult.putContent, requestAndResult.putUserMetadata, dataBlobIds, request, serializedRequests);
        }
    } else {
        notificationBlobType = requestAndResult.options.isChunkUpload() ? NotificationBlobType.DataChunk : NotificationBlobType.Simple;
        // TODO: Currently, we don't have the logic to distinguish Simple vs DataChunk for the first chunk
        // Once the logic is fixed we should assert Simple.
        BlobDataType dataType = origBlobId.getBlobDataType();
        assertTrue("Invalid blob data type", dataType == BlobDataType.DATACHUNK || dataType == BlobDataType.SIMPLE);
        byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
        if (!requestAndResult.putBlobProperties.isEncrypted()) {
            assertArrayEquals("Input blob and written blob should be the same", requestAndResult.putContent, content);
            assertArrayEquals("UserMetadata mismatch for simple blob", requestAndResult.putUserMetadata, request.getUsermetadata().array());
        } else {
            ByteBuffer userMetadata = request.getUsermetadata();
            // reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
            // assertion failures in non main thread will not fail the test.
            new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), Unpooled.wrappedBuffer(content), userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
                assertNull("Exception should not be thrown", exception);
                assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
                ByteBuf decryptedBlobContent = result.getDecryptedBlobContent();
                byte[] blobContent = new byte[decryptedBlobContent.readableBytes()];
                decryptedBlobContent.readBytes(blobContent);
                assertArrayEquals("Content mismatch", requestAndResult.putContent, blobContent);
                assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
                decryptedBlobContent.release();
            }).run();
        }
    }
    notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
Also used : Arrays(java.util.Arrays) BlobProperties(com.github.ambry.messageformat.BlobProperties) DataNodeId(com.github.ambry.clustermap.DataNodeId) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) Random(java.util.Random) ByteBuffer(java.nio.ByteBuffer) ThrowingConsumer(com.github.ambry.utils.ThrowingConsumer) Unpooled(io.netty.buffer.Unpooled) Future(java.util.concurrent.Future) GeneralSecurityException(java.security.GeneralSecurityException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) After(org.junit.After) NettyByteBufLeakHelper(com.github.ambry.utils.NettyByteBufLeakHelper) QuotaMethod(com.github.ambry.quota.QuotaMethod) Parameterized(org.junit.runners.Parameterized) Container(com.github.ambry.account.Container) Set(java.util.Set) Utils(com.github.ambry.utils.Utils) CryptoServiceConfig(com.github.ambry.config.CryptoServiceConfig) RouterConfig(com.github.ambry.config.RouterConfig) CountDownLatch(java.util.concurrent.CountDownLatch) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) MockTime(com.github.ambry.utils.MockTime) NotificationBlobType(com.github.ambry.notification.NotificationBlobType) Account(com.github.ambry.account.Account) Callback(com.github.ambry.commons.Callback) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobType(com.github.ambry.messageformat.BlobType) InMemAccountService(com.github.ambry.account.InMemAccountService) BlobId(com.github.ambry.commons.BlobId) ResponseHandler(com.github.ambry.commons.ResponseHandler) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) DataInputStream(java.io.DataInputStream) ServerErrorCode(com.github.ambry.server.ServerErrorCode) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ByteBuf(io.netty.buffer.ByteBuf) QuotaResource(com.github.ambry.quota.QuotaResource) SystemTime(com.github.ambry.utils.SystemTime) PutRequest(com.github.ambry.protocol.PutRequest) Before(org.junit.Before) QuotaResourceType(com.github.ambry.quota.QuotaResourceType) BlobDataType(com.github.ambry.commons.BlobId.BlobDataType) Properties(java.util.Properties) LongStream(java.util.stream.LongStream) Pair(com.github.ambry.utils.Pair) Iterator(java.util.Iterator) VerifiableProperties(com.github.ambry.config.VerifiableProperties) QuotaChargeCallback(com.github.ambry.quota.QuotaChargeCallback) MetadataContentSerDe(com.github.ambry.messageformat.MetadataContentSerDe) IOException(java.io.IOException) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) TimeUnit(java.util.concurrent.TimeUnit) QuotaException(com.github.ambry.quota.QuotaException) KMSConfig(com.github.ambry.config.KMSConfig) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) MessageFormatRecord(com.github.ambry.messageformat.MessageFormatRecord) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) InputStream(java.io.InputStream) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) PutRequest(com.github.ambry.protocol.PutRequest) ByteBuf(io.netty.buffer.ByteBuf) ByteBuffer(java.nio.ByteBuffer) NotificationBlobType(com.github.ambry.notification.NotificationBlobType) StoreKey(com.github.ambry.store.StoreKey) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) BlobDataType(com.github.ambry.commons.BlobId.BlobDataType) BlobId(com.github.ambry.commons.BlobId)

Example 28 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class Http2NetworkClientTest method putGetTest.

@Test
public void putGetTest() throws Exception {
    MockClusterMap clusterMap = http2Cluster.getClusterMap();
    DataNodeId dataNodeId = http2Cluster.getGeneralDataNode();
    BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
    SSLFactory sslFactory = new NettySslHttp2Factory(clientSSLConfig);
    Http2NetworkClient networkClient = new Http2NetworkClient(new Http2ClientMetrics(new MetricRegistry()), new Http2ClientConfig(new VerifiableProperties(new Properties())), sslFactory, eventLoopGroup);
    // Put a blob
    int blobSize = 1024 * 1024;
    byte[] usermetadata = new byte[1000];
    byte[] data = new byte[blobSize];
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    BlobProperties properties = new BlobProperties(blobSize, "serviceid1", accountId, containerId, false);
    TestUtils.RANDOM.nextBytes(usermetadata);
    TestUtils.RANDOM.nextBytes(data);
    List<? extends PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
    short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
    BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
    // put blob 1
    PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
    RequestInfo request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), putRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
    List<ResponseInfo> responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
    long startTime = SystemTime.getInstance().milliseconds();
    while (responseInfos.size() == 0) {
        responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
        if (SystemTime.getInstance().milliseconds() - startTime >= 6000) {
            fail("Network Client no reponse and timeout.");
        }
        Thread.sleep(30);
    }
    assertEquals("Should be only one response", 1, responseInfos.size());
    DataInputStream dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
    PutResponse putResponse = PutResponse.readFrom(dis);
    assertEquals("No error expected.", ServerErrorCode.No_Error, putResponse.getError());
    // Get the blob
    // get blob properties
    ArrayList<BlobId> ids = new ArrayList<BlobId>();
    MockPartitionId partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
    ids.add(blobId1);
    ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partition, ids);
    partitionRequestInfoList.add(partitionRequestInfo);
    GetRequest getRequest = new GetRequest(1, "http2-clientid", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
    request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), getRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
    responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
    startTime = SystemTime.getInstance().milliseconds();
    while (responseInfos.size() == 0) {
        responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
        if (SystemTime.getInstance().milliseconds() - startTime >= 3000) {
            fail("Network Client no response and timeout.");
        }
        Thread.sleep(30);
    }
    assertEquals("Should be only one response", 1, responseInfos.size());
    dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
    GetResponse resp = GetResponse.readFrom(dis, clusterMap);
    BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
    // verify BlobProperties
    BlobProperties propertyOutput = blobAll.getBlobInfo().getBlobProperties();
    assertEquals(blobSize, propertyOutput.getBlobSize());
    assertEquals("serviceid1", propertyOutput.getServiceId());
    assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
    assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
    // verify UserMetadata
    byte[] userMetadataOutput = blobAll.getBlobInfo().getUserMetadata();
    assertArrayEquals(usermetadata, userMetadataOutput);
    // verify content
    byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
    assertArrayEquals("Content mismatch.", data, actualBlobData);
}
Also used : SSLFactory(com.github.ambry.commons.SSLFactory) Http2ClientMetrics(com.github.ambry.network.http2.Http2ClientMetrics) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) NettySslHttp2Factory(com.github.ambry.commons.NettySslHttp2Factory) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) RequestInfo(com.github.ambry.network.RequestInfo) PutResponse(com.github.ambry.protocol.PutResponse) BlobAll(com.github.ambry.messageformat.BlobAll) GetRequest(com.github.ambry.protocol.GetRequest) HashSet(java.util.HashSet) ResponseInfo(com.github.ambry.network.ResponseInfo) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MetricRegistry(com.codahale.metrics.MetricRegistry) PutRequest(com.github.ambry.protocol.PutRequest) Http2ClientConfig(com.github.ambry.config.Http2ClientConfig) DataInputStream(java.io.DataInputStream) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) BlobProperties(com.github.ambry.messageformat.BlobProperties) Http2NetworkClient(com.github.ambry.network.http2.Http2NetworkClient) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 29 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class ServerTestUtil method endToEndReplicationWithMultiNodeMultiPartitionMultiDCTest.

static void endToEndReplicationWithMultiNodeMultiPartitionMultiDCTest(String sourceDatacenter, String sslEnabledDatacenters, PortType portType, MockCluster cluster, MockNotificationSystem notificationSystem, Properties routerProps) throws Exception {
    Properties props = new Properties();
    props.setProperty("router.hostname", "localhost");
    props.setProperty("router.datacenter.name", sourceDatacenter);
    props.setProperty("router.put.request.parallelism", "1");
    props.setProperty("router.put.success.target", "1");
    props.setProperty("clustermap.cluster.name", "test");
    props.setProperty("clustermap.datacenter.name", sourceDatacenter);
    props.setProperty("clustermap.host.name", "localhost");
    props.setProperty("kms.default.container.key", TestUtils.getRandomKey(32));
    props.putAll(routerProps);
    VerifiableProperties verifiableProperties = new VerifiableProperties(props);
    AccountService accountService = new InMemAccountService(false, true);
    Router router = new NonBlockingRouterFactory(verifiableProperties, cluster.getClusterMap(), notificationSystem, getSSLFactoryIfRequired(verifiableProperties), accountService).getRouter();
    int numberOfRequestsToSend = 15;
    int numberOfVerifierThreads = 3;
    final LinkedBlockingQueue<Payload> payloadQueue = new LinkedBlockingQueue<Payload>();
    final AtomicReference<Exception> exceptionRef = new AtomicReference<>(null);
    final CountDownLatch callbackLatch = new CountDownLatch(numberOfRequestsToSend);
    List<Future<String>> putFutures = new ArrayList<>(numberOfRequestsToSend);
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    for (int i = 0; i < numberOfRequestsToSend; i++) {
        int size = new Random().nextInt(5000);
        final BlobProperties properties = new BlobProperties(size, "service1", "owner id check", "image/jpeg", false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
        final byte[] metadata = new byte[new Random().nextInt(1000)];
        final byte[] blob = new byte[size];
        TestUtils.RANDOM.nextBytes(metadata);
        TestUtils.RANDOM.nextBytes(blob);
        Future<String> future = router.putBlob(properties, metadata, new ByteBufferReadableStreamChannel(ByteBuffer.wrap(blob)), new PutBlobOptionsBuilder().build(), new Callback<String>() {

            @Override
            public void onCompletion(String result, Exception exception) {
                if (exception == null) {
                    payloadQueue.add(new Payload(properties, metadata, blob, result));
                } else {
                    exceptionRef.set(exception);
                }
                callbackLatch.countDown();
            }
        }, QUOTA_CHARGE_EVENT_LISTENER);
        putFutures.add(future);
    }
    for (Future<String> future : putFutures) {
        future.get(20, TimeUnit.SECONDS);
    }
    assertTrue("Did not receive all callbacks in time", callbackLatch.await(1, TimeUnit.SECONDS));
    if (exceptionRef.get() != null) {
        throw exceptionRef.get();
    }
    // put away for future use
    Payload payload1 = payloadQueue.peek();
    MockClusterMap clusterMap = cluster.getClusterMap();
    BlobId blobId1 = new BlobId(payload1.blobId, clusterMap);
    assertEquals("Did not put expected number of blobs", numberOfRequestsToSend, payloadQueue.size());
    Properties sslProps = new Properties();
    sslProps.putAll(routerProps);
    sslProps.setProperty("clustermap.ssl.enabled.datacenters", sslEnabledDatacenters);
    sslProps.setProperty("clustermap.cluster.name", "test");
    sslProps.setProperty("clustermap.datacenter.name", sourceDatacenter);
    sslProps.setProperty("clustermap.host.name", "localhost");
    sslProps.setProperty("connectionpool.read.timeout.ms", "15000");
    VerifiableProperties vProps = new VerifiableProperties(sslProps);
    ConnectionPool connectionPool = new BlockingChannelConnectionPool(new ConnectionPoolConfig(vProps), new SSLConfig(vProps), new ClusterMapConfig(vProps), new MetricRegistry());
    CountDownLatch verifierLatch = new CountDownLatch(numberOfVerifierThreads);
    AtomicInteger totalRequests = new AtomicInteger(numberOfRequestsToSend);
    AtomicInteger verifiedRequests = new AtomicInteger(0);
    AtomicBoolean cancelTest = new AtomicBoolean(false);
    for (int i = 0; i < numberOfVerifierThreads; i++) {
        Thread thread = new Thread(new Verifier(payloadQueue, verifierLatch, totalRequests, verifiedRequests, cluster.getClusterMap(), cancelTest, portType, connectionPool, notificationSystem, cluster.time));
        thread.start();
    }
    assertTrue("Did not verify in 2 minutes", verifierLatch.await(2, TimeUnit.MINUTES));
    assertEquals(totalRequests.get(), verifiedRequests.get());
    BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
    MockDataNodeId dataNodeId = clusterMap.getDataNodes().get(0);
    Port port = new Port(portType == PortType.PLAINTEXT ? dataNodeId.getPort() : dataNodeId.getSSLPort(), portType);
    ConnectedChannel channel = connectionPool.checkOutConnection("localhost", port, 10000);
    PartitionId partitionId = blobId1.getPartition();
    // stop the store via AdminRequest
    System.out.println("Begin to stop a BlobStore");
    AdminRequest adminRequest = new AdminRequest(AdminRequestOrResponseType.BlobStoreControl, partitionId, 1, "clientid2");
    BlobStoreControlAdminRequest controlRequest = new BlobStoreControlAdminRequest((short) 0, BlobStoreControlAction.StopStore, adminRequest);
    DataInputStream stream = channel.sendAndReceive(controlRequest).getInputStream();
    AdminResponse adminResponse = AdminResponse.readFrom(stream);
    releaseNettyBufUnderneathStream(stream);
    assertEquals("Stop store admin request should succeed", ServerErrorCode.No_Error, adminResponse.getError());
    // put a blob on a stopped store, which should fail
    byte[] userMetadata = new byte[1000];
    byte[] data = new byte[3187];
    BlobProperties properties = new BlobProperties(3187, "serviceid1", accountId, containerId, false, cluster.time.milliseconds());
    BlobId blobId2 = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), accountId, containerId, partitionId, false, BlobId.BlobDataType.DATACHUNK);
    PutRequest putRequest2 = new PutRequest(1, "clientId2", blobId2, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
    DataInputStream putResponseStream = channel.sendAndReceive(putRequest2).getInputStream();
    PutResponse response2 = PutResponse.readFrom(putResponseStream);
    releaseNettyBufUnderneathStream(putResponseStream);
    assertEquals("Put blob on stopped store should fail", ServerErrorCode.Replica_Unavailable, response2.getError());
    // get a blob properties on a stopped store, which should fail
    ArrayList<BlobId> ids = new ArrayList<>();
    ids.add(blobId1);
    ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<>();
    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partitionId, ids);
    partitionRequestInfoList.add(partitionRequestInfo);
    GetRequest getRequest1 = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
    stream = channel.sendAndReceive(getRequest1).getInputStream();
    GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
    assertEquals("Get blob properties on stopped store should fail", ServerErrorCode.Replica_Unavailable, resp1.getPartitionResponseInfoList().get(0).getErrorCode());
    releaseNettyBufUnderneathStream(stream);
    // delete a blob on a stopped store, which should fail
    DeleteRequest deleteRequest = new DeleteRequest(1, "clientId1", blobId1, System.currentTimeMillis());
    stream = channel.sendAndReceive(deleteRequest).getInputStream();
    DeleteResponse deleteResponse = DeleteResponse.readFrom(stream);
    releaseNettyBufUnderneathStream(stream);
    assertEquals("Delete blob on stopped store should fail", ServerErrorCode.Replica_Unavailable, deleteResponse.getError());
    // start the store via AdminRequest
    System.out.println("Begin to restart the BlobStore");
    adminRequest = new AdminRequest(AdminRequestOrResponseType.BlobStoreControl, partitionId, 1, "clientId");
    controlRequest = new BlobStoreControlAdminRequest((short) 0, BlobStoreControlAction.StartStore, adminRequest);
    stream = channel.sendAndReceive(controlRequest).getInputStream();
    adminResponse = AdminResponse.readFrom(stream);
    releaseNettyBufUnderneathStream(stream);
    assertEquals("Start store admin request should succeed", ServerErrorCode.No_Error, adminResponse.getError());
    List<? extends ReplicaId> replicaIds = partitionId.getReplicaIds();
    for (ReplicaId replicaId : replicaIds) {
        // forcibly mark replicas and disks as up.
        MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
        mockReplicaId.markReplicaDownStatus(false);
        ((MockDiskId) mockReplicaId.getDiskId()).setDiskState(HardwareState.AVAILABLE, false);
    }
    // put a blob on a restarted store , which should succeed
    putRequest2 = new PutRequest(1, "clientId2", blobId2, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
    putResponseStream = channel.sendAndReceive(putRequest2).getInputStream();
    response2 = PutResponse.readFrom(putResponseStream);
    releaseNettyBufUnderneathStream(putResponseStream);
    assertEquals("Put blob on restarted store should succeed", ServerErrorCode.No_Error, response2.getError());
    // verify the put blob has been replicated successfully.
    notificationSystem.awaitBlobCreations(blobId2.getID());
    // get a blob on a restarted store , which should succeed
    ids = new ArrayList<BlobId>();
    ids.add(blobId2);
    partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
    partitionRequestInfo = new PartitionRequestInfo(partitionId, ids);
    partitionRequestInfoList.add(partitionRequestInfo);
    GetRequest getRequest2 = new GetRequest(1, "clientId2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
    stream = channel.sendAndReceive(getRequest2).getInputStream();
    GetResponse resp2 = GetResponse.readFrom(stream, clusterMap);
    InputStream responseStream = resp2.getInputStream();
    BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
    byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
    assertArrayEquals("Content mismatch.", data, actualBlobData);
    releaseNettyBufUnderneathStream(stream);
    // delete a blob on a restarted store , which should succeed
    deleteRequest = new DeleteRequest(1, "clientId2", blobId2, System.currentTimeMillis());
    stream = channel.sendAndReceive(deleteRequest).getInputStream();
    deleteResponse = DeleteResponse.readFrom(stream);
    releaseNettyBufUnderneathStream(stream);
    assertEquals("Delete blob on restarted store should succeed", ServerErrorCode.No_Error, deleteResponse.getError());
    router.close();
    connectionPool.shutdown();
}
Also used : PutBlobOptionsBuilder(com.github.ambry.router.PutBlobOptionsBuilder) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) InMemAccountService(com.github.ambry.account.InMemAccountService) Random(java.util.Random) GetRequest(com.github.ambry.protocol.GetRequest) BlobStoreControlAdminRequest(com.github.ambry.protocol.BlobStoreControlAdminRequest) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) Router(com.github.ambry.router.Router) ConnectedChannel(com.github.ambry.network.ConnectedChannel) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AdminResponse(com.github.ambry.protocol.AdminResponse) DeleteResponse(com.github.ambry.protocol.DeleteResponse) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) InMemAccountService(com.github.ambry.account.InMemAccountService) AccountService(com.github.ambry.account.AccountService) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) ConnectionPool(com.github.ambry.network.ConnectionPool) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PutResponse(com.github.ambry.protocol.PutResponse) BlobAll(com.github.ambry.messageformat.BlobAll) NonBlockingRouterFactory(com.github.ambry.router.NonBlockingRouterFactory) SSLConfig(com.github.ambry.config.SSLConfig) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) DataInputStream(java.io.DataInputStream) CrcInputStream(com.github.ambry.utils.CrcInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) PutRequest(com.github.ambry.protocol.PutRequest) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) DataInputStream(java.io.DataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) RouterException(com.github.ambry.router.RouterException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) ReplicationControlAdminRequest(com.github.ambry.protocol.ReplicationControlAdminRequest) BlobStoreControlAdminRequest(com.github.ambry.protocol.BlobStoreControlAdminRequest) AdminRequest(com.github.ambry.protocol.AdminRequest) BlobProperties(com.github.ambry.messageformat.BlobProperties) Future(java.util.concurrent.Future) CompletableFuture(java.util.concurrent.CompletableFuture) MockDiskId(com.github.ambry.clustermap.MockDiskId) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Example 30 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class DumpIndexTool method main.

public static void main(String[] args) throws Exception {
    final AtomicInteger exitCode = new AtomicInteger(0);
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    DumpIndexToolConfig config = new DumpIndexToolConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
        StoreConfig storeConfig = new StoreConfig(verifiableProperties);
        // this tool supports only blob IDs. It can become generic if StoreKeyFactory provides a deserFromString method.
        BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
        StoreToolsMetrics metrics = new StoreToolsMetrics(clusterMap.getMetricRegistry());
        StoreMetrics storeMetrics = new StoreMetrics("DumpIndexTool", clusterMap.getMetricRegistry());
        ServerConfig serverConfig = new ServerConfig(verifiableProperties);
        Time time = SystemTime.getInstance();
        Throttler throttler = new Throttler(config.indexEntriesToProcessPerSec, 1000, true, time);
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, verifiableProperties, clusterMap.getMetricRegistry());
        DumpIndexTool dumpIndexTool = new DumpIndexTool(blobIdFactory, storeConfig, time, metrics, storeMetrics, throttler, storeKeyConverterFactory.getStoreKeyConverter());
        Set<StoreKey> filterKeySet = new HashSet<>();
        for (String key : config.filterSet) {
            filterKeySet.add(new BlobId(key, clusterMap));
        }
        switch(config.typeOfOperation) {
            case DumpIndex:
                dumpIndex(dumpIndexTool, config.pathOfInput, filterKeySet);
                break;
            case DumpIndexSegment:
                dumpIndexSegment(dumpIndexTool, config.pathOfInput, filterKeySet);
                break;
            case VerifyIndex:
                IndexProcessingResults results = dumpIndexTool.processIndex(config.pathOfInput, filterKeySet, time.milliseconds(), config.detectDuplicatesAcrossKeys);
                exitCode.set(reportVerificationResults(config.pathOfInput, results, config.failIfCraftedIdsPresent));
                break;
            case VerifyDataNode:
                DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
                if (dataNodeId == null) {
                    logger.error("No data node corresponding to {}:{}", config.hostname, config.port);
                } else {
                    Set<File> replicaDirs = clusterMap.getReplicaIds(dataNodeId).stream().map(replicaId -> new File(replicaId.getMountPath())).collect(Collectors.toSet());
                    Map<File, IndexProcessingResults> resultsByReplica = dumpIndexTool.processIndex(replicaDirs, filterKeySet, config.parallelism, config.detectDuplicatesAcrossKeys);
                    replicaDirs.removeAll(resultsByReplica.keySet());
                    if (replicaDirs.size() != 0) {
                        logger.error("Results obtained missing {}", replicaDirs);
                        exitCode.set(5);
                    } else {
                        resultsByReplica.forEach((replicaDir, result) -> exitCode.set(Math.max(exitCode.get(), reportVerificationResults(replicaDir, result, config.failIfCraftedIdsPresent))));
                    }
                }
                break;
            default:
                throw new IllegalArgumentException("Unrecognized operation: " + config.typeOfOperation);
        }
    }
    System.exit(exitCode.get());
}
Also used : Arrays(java.util.Arrays) Default(com.github.ambry.config.Default) DataNodeId(com.github.ambry.clustermap.DataNodeId) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) Future(java.util.concurrent.Future) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) SystemTime(com.github.ambry.utils.SystemTime) Time(com.github.ambry.utils.Time) EnumSet(java.util.EnumSet) ExecutorService(java.util.concurrent.ExecutorService) StoreConfig(com.github.ambry.config.StoreConfig) Logger(org.slf4j.Logger) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Config(com.github.ambry.config.Config) Set(java.util.Set) ClusterMap(com.github.ambry.clustermap.ClusterMap) ServerConfig(com.github.ambry.config.ServerConfig) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) Collectors(java.util.stream.Collectors) File(java.io.File) Executors(java.util.concurrent.Executors) AtomicLong(java.util.concurrent.atomic.AtomicLong) ToolUtils(com.github.ambry.tools.util.ToolUtils) List(java.util.List) Throttler(com.github.ambry.utils.Throttler) TreeMap(java.util.TreeMap) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Timer(com.codahale.metrics.Timer) Collections(java.util.Collections) BlobId(com.github.ambry.commons.BlobId) SortedMap(java.util.SortedMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) SystemTime(com.github.ambry.utils.SystemTime) Time(com.github.ambry.utils.Time) ServerConfig(com.github.ambry.config.ServerConfig) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) Throttler(com.github.ambry.utils.Throttler) HashSet(java.util.HashSet) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StoreConfig(com.github.ambry.config.StoreConfig) BlobId(com.github.ambry.commons.BlobId) DataNodeId(com.github.ambry.clustermap.DataNodeId) File(java.io.File)

Aggregations

BlobIdFactory (com.github.ambry.commons.BlobIdFactory)32 ArrayList (java.util.ArrayList)28 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)22 HashMap (java.util.HashMap)21 VerifiableProperties (com.github.ambry.config.VerifiableProperties)20 ClusterMap (com.github.ambry.clustermap.ClusterMap)19 List (java.util.List)19 DataNodeId (com.github.ambry.clustermap.DataNodeId)18 PartitionId (com.github.ambry.clustermap.PartitionId)18 Map (java.util.Map)18 Test (org.junit.Test)18 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)17 BlobId (com.github.ambry.commons.BlobId)17 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)16 Properties (java.util.Properties)16 MockStoreKeyConverterFactory (com.github.ambry.store.MockStoreKeyConverterFactory)13 MetricRegistry (com.codahale.metrics.MetricRegistry)12 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)12 Transformer (com.github.ambry.store.Transformer)12 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)11