Search in sources :

Example 6 with Port

use of com.github.ambry.network.Port in project ambry by linkedin.

the class CloudBlobStoreIntegrationTest method setup.

@Before
public void setup() throws ReflectiveOperationException {
    Properties testProperties = new Properties();
    try (InputStream input = this.getClass().getClassLoader().getResourceAsStream(PROPS_FILE_NAME)) {
        if (input == null) {
            throw new IllegalStateException("Could not find resource: " + PROPS_FILE_NAME);
        }
        testProperties.load(input);
    } catch (IOException ex) {
        throw new IllegalStateException("Could not load properties from resource: " + PROPS_FILE_NAME);
    }
    testProperties.setProperty("clustermap.cluster.name", "Integration-Test");
    testProperties.setProperty("clustermap.datacenter.name", "uswest");
    testProperties.setProperty("clustermap.host.name", "localhost");
    testProperties.setProperty("kms.default.container.key", "B374A26A71490437AA024E4FADD5B497FDFF1A8EA6FF12F6FB65AF2720B59CCF");
    testProperties.setProperty(CloudConfig.CLOUD_DELETED_BLOB_RETENTION_DAYS, String.valueOf(1));
    testProperties.setProperty(AzureCloudConfig.AZURE_PURGE_BATCH_SIZE, "10");
    testProperties.setProperty(CloudConfig.CLOUD_IS_VCR, "" + isVcr);
    verifiableProperties = new VerifiableProperties(testProperties);
    azureCloudConfig = new AzureCloudConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    CloudConfig cloudConfig = new CloudConfig(verifiableProperties);
    partitionId = new Partition(666, clusterMapConfig.clusterMapDefaultPartitionClass, PartitionState.READ_WRITE, 100 * 1024 * 1024 * 1024L);
    ClusterMap clusterMap = new MockClusterMap(false, Collections.singletonList(new MockDataNodeId(Collections.singletonList(new Port(6666, PortType.PLAINTEXT)), Collections.singletonList("test"), "AzureTest")), 1, Collections.singletonList(partitionId), "AzureTest");
    MetricRegistry registry = new MetricRegistry();
    vcrMetrics = new VcrMetrics(registry);
    azureMetrics = new AzureMetrics(registry);
    CloudDestinationFactory cloudDestinationFactory = Utils.getObj(cloudConfig.cloudDestinationFactoryClass, verifiableProperties, registry, clusterMap);
    cloudDestination = cloudDestinationFactory.getCloudDestination();
    cloudBlobStore = new CloudBlobStore(verifiableProperties, partitionId, cloudDestination, clusterMap, vcrMetrics);
    cloudBlobStore.start();
}
Also used : Partition(com.github.ambry.clustermap.Partition) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) InputStream(java.io.InputStream) Port(com.github.ambry.network.Port) MetricRegistry(com.codahale.metrics.MetricRegistry) AzureCloudConfig(com.github.ambry.cloud.azure.AzureCloudConfig) CloudConfig(com.github.ambry.config.CloudConfig) IOException(java.io.IOException) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) AzureCloudConfig(com.github.ambry.cloud.azure.AzureCloudConfig) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) AzureMetrics(com.github.ambry.cloud.azure.AzureMetrics) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Before(org.junit.Before)

Example 7 with Port

use of com.github.ambry.network.Port in project ambry by linkedin.

the class MockHost method getRemoteReplicaInfos.

/**
 * Gets the list of {@link RemoteReplicaInfo} from this host to the given {@code remoteHost}
 * @param remoteHost the host whose replica info is required.
 * @param listener the {@link ReplicationTest.StoreEventListener} to use.
 * @return the list of {@link RemoteReplicaInfo} from this host to the given {@code remoteHost}
 */
List<RemoteReplicaInfo> getRemoteReplicaInfos(MockHost remoteHost, ReplicationTest.StoreEventListener listener) {
    List<? extends ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId);
    List<RemoteReplicaInfo> remoteReplicaInfos = new ArrayList<>();
    for (ReplicaId replicaId : replicaIds) {
        for (ReplicaId peerReplicaId : replicaId.getPeerReplicaIds()) {
            if (peerReplicaId.getDataNodeId().equals(remoteHost.dataNodeId)) {
                PartitionId partitionId = replicaId.getPartitionId();
                InMemoryStore store = storesByPartition.computeIfAbsent(partitionId, partitionId1 -> new InMemoryStore(partitionId, infosByPartition.computeIfAbsent(partitionId1, (Function<PartitionId, List<MessageInfo>>) partitionId2 -> new ArrayList<>()), buffersByPartition.computeIfAbsent(partitionId1, (Function<PartitionId, List<ByteBuffer>>) partitionId22 -> new ArrayList<>()), listener));
                store.start();
                RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(peerReplicaId, replicaId, store, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(peerReplicaId.getDataNodeId().getPort(), PortType.PLAINTEXT));
                remoteReplicaInfos.add(remoteReplicaInfo);
            }
        }
    }
    return remoteReplicaInfos;
}
Also used : Arrays(java.util.Arrays) StoreKeyConverter(com.github.ambry.store.StoreKeyConverter) DataNodeId(com.github.ambry.clustermap.DataNodeId) ClusterMap(com.github.ambry.clustermap.ClusterMap) HashMap(java.util.HashMap) Function(java.util.function.Function) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) PortType(com.github.ambry.network.PortType) MessageInfo(com.github.ambry.store.MessageInfo) ReplicaId(com.github.ambry.clustermap.ReplicaId) Map(java.util.Map) SystemTime(com.github.ambry.utils.SystemTime) Port(com.github.ambry.network.Port) Collections(java.util.Collections) PartitionId(com.github.ambry.clustermap.PartitionId) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) PartitionId(com.github.ambry.clustermap.PartitionId) ReplicaId(com.github.ambry.clustermap.ReplicaId)

Example 8 with Port

use of com.github.ambry.network.Port in project ambry by linkedin.

the class CloudToStoreReplicationManagerTest method testGetCloudDataNode.

/**
 * Test {@code CloudToStoreReplicationManager#getCloudDataNode}
 */
@Test
public void testGetCloudDataNode() throws NoSuchFieldException, ReplicationException {
    CloudToStoreReplicationManager mockCloudToStoreReplicationManager = mock(CloudToStoreReplicationManager.class);
    List<DataNodeId> dataNodeIds = new ArrayList<>();
    Port port = new Port(1000, PortType.PLAINTEXT);
    AtomicReference<List<DataNodeId>> vcrNodes = new AtomicReference<>();
    vcrNodes.set(dataNodeIds);
    FieldSetter.setField(mockCloudToStoreReplicationManager, CloudToStoreReplicationManager.class.getDeclaredField("vcrNodes"), vcrNodes);
    when(mockCloudToStoreReplicationManager.getCloudDataNode()).thenCallRealMethod();
    // test getCloudDataNode() with empty vcrNodes
    try {
        mockCloudToStoreReplicationManager.getCloudDataNode();
        fail("Calling getCloudDataNode when there are no vcrNodes should throw exception.");
    } catch (ReplicationException rex) {
    }
    // add vcr nodes.
    for (int i = 0; i < 100; i++) {
        dataNodeIds.add(new MockDataNodeId("hosname" + i, Collections.singletonList(port), null, null));
    }
    // Make sure that calling getCloudDataNode() doesn't return the same node every time.
    try {
        Set<String> dataNodeIdSet = new HashSet<>();
        for (int i = 0; i < 5; i++) {
            dataNodeIdSet.add(mockCloudToStoreReplicationManager.getCloudDataNode().getHostname());
        }
        assertTrue("getCloudDataNode shouldn't return same node every time", dataNodeIdSet.size() > 1);
    } catch (ReplicationException rex) {
        fail("getCloudDataNode shouldn't fail if vcrNodes is not empty");
    }
}
Also used : Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) AtomicReference(java.util.concurrent.atomic.AtomicReference) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) ArrayList(java.util.ArrayList) List(java.util.List) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 9 with Port

use of com.github.ambry.network.Port in project ambry by linkedin.

the class UndeleteOperation method fetchRequests.

/**
 * Fetch {@link UndeleteRequest}s to send for the operation.
 * @param requestRegistrationCallback the {@link RequestRegistrationCallback} to use for addition of requests that
 *                                    need to be sent to the storage server
 */
private void fetchRequests(RequestRegistrationCallback<UndeleteOperation> requestRegistrationCallback) {
    Iterator<ReplicaId> replicaIterator = operationTracker.getReplicaIterator();
    while (replicaIterator.hasNext()) {
        ReplicaId replica = replicaIterator.next();
        String hostname = replica.getDataNodeId().getHostname();
        Port port = RouterUtils.getPortToConnectTo(replica, routerConfig.routerEnableHttp2NetworkClient);
        UndeleteRequest undeleteRequest = createUndeleteRequest();
        undeleteRequestInfos.put(undeleteRequest.getCorrelationId(), new UndeleteRequestInfo(time.milliseconds(), replica));
        RequestInfo requestInfo = new RequestInfo(hostname, port, undeleteRequest, replica, operationQuotaCharger);
        requestRegistrationCallback.registerRequestToSend(this, requestInfo);
        replicaIterator.remove();
        if (RouterUtils.isRemoteReplica(routerConfig, replica)) {
            LOGGER.trace("Making request with correlationId {} to a remote replica {} in {} ", undeleteRequest.getCorrelationId(), replica.getDataNodeId(), replica.getDataNodeId().getDatacenterName());
            routerMetrics.crossColoRequestCount.inc();
        } else {
            LOGGER.trace("Making request with correlationId {} to a local replica {} ", undeleteRequest.getCorrelationId(), replica.getDataNodeId());
        }
        routerMetrics.getDataNodeBasedMetrics(replica.getDataNodeId()).undeleteRequestRate.mark();
    }
}
Also used : Port(com.github.ambry.network.Port) UndeleteRequest(com.github.ambry.protocol.UndeleteRequest) RequestInfo(com.github.ambry.network.RequestInfo) ReplicaId(com.github.ambry.clustermap.ReplicaId)

Example 10 with Port

use of com.github.ambry.network.Port in project ambry by linkedin.

the class AdaptiveOperationTrackerTest method diskLevelAdaptiveTrackerTest.

/**
 * Tests that adaptive tracker uses separate disk-level histogram to determine if inflight requests are past due.
 * Mock a partition layout as follows for this test. This test also tests the case where new nodes and new partition
 * are dynamically added.
 *             |       |   Partition 1 |  Partition 2  | Partition 3 (added at runtime)
 * -------------------------------------------------------------------------------------
 * LocalHost1  | Disk0 |   Replica_1   |               |
 *             | Disk1 |               |  Replica_1    |
 * -------------------------------------------------------------------------------------
 * RemoteHost1 | Disk0 |   Replica_2   |  Replica_2    |
 *             | Disk1 |   Replica_3   |  Replica_3    |
 * -------------------------------------------------------------------------------------
 *  NewNode1   | Disk0 |               |               |          Replica_1
 *             | Disk1 |               |               |
 * -------------------------------------------------------------------------------------
 *  NewNod2    | Disk0 |               |               |
 *             | Disk1 |               |               |          Replica_2
 * @throws Exception
 */
@Test
public void diskLevelAdaptiveTrackerTest() throws Exception {
    MockPartitionId mockPartition1 = new MockPartitionId(1L, MockClusterMap.DEFAULT_PARTITION_CLASS);
    MockPartitionId mockPartition2 = new MockPartitionId(2L, MockClusterMap.DEFAULT_PARTITION_CLASS);
    // create a new list mock datanodes instead of using the default class member
    List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
    List<String> mountPaths = Arrays.asList("mockMountPath0", "mockMountPath1");
    MockDataNodeId localHost1 = new MockDataNodeId("LocalHost1", portList, mountPaths, "dc-0");
    MockDataNodeId remoteHost1 = new MockDataNodeId("RemoteHost1", portList, mountPaths, "dc-1");
    List<MockDataNodeId> datanodes = new ArrayList<>(Arrays.asList(localHost1, remoteHost1));
    // distribute replicas to nodes (Note that localDC name is still "dc-0" in current setup)
    ReplicaId partition1Replica1 = new MockReplicaId(PORT, mockPartition1, localHost1, 0);
    ReplicaId partition1Replica2 = new MockReplicaId(PORT, mockPartition1, remoteHost1, 0);
    ReplicaId partition1Replica3 = new MockReplicaId(PORT, mockPartition1, remoteHost1, 1);
    ReplicaId partition2Replica1 = new MockReplicaId(PORT, mockPartition2, localHost1, 1);
    mockPartition1.replicaIds.add(partition1Replica1);
    mockPartition1.replicaIds.add(partition1Replica2);
    mockPartition1.replicaIds.add(partition1Replica3);
    mockPartition2.replicaIds.add(partition2Replica1);
    mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, remoteHost1, 0));
    mockPartition2.replicaIds.add(new MockReplicaId(PORT, mockPartition2, remoteHost1, 1));
    MockClusterMap clusterMap = new MockClusterMap(false, datanodes, 2, Arrays.asList(mockPartition1, mockPartition2), localDcName);
    trackerScope = OperationTrackerScope.Disk;
    RouterConfig routerConfig = createRouterConfig(true, 1, 1, 6, null, true);
    NonBlockingRouterMetrics originalMetrics = routerMetrics;
    routerMetrics = new NonBlockingRouterMetrics(clusterMap, routerConfig);
    Counter pastDueCount = routerMetrics.getBlobPastDueCount;
    Map<Resource, CachedHistogram> localColoMap = routerMetrics.getBlobLocalDcResourceToLatency;
    Map<Resource, CachedHistogram> crossColoMap = routerMetrics.getBlobCrossDcResourceToLatency;
    // mock different latency distribution of different disks
    Histogram localHostDisk0Histogram = localColoMap.get(partition1Replica1.getDiskId());
    Histogram localHostDisk1Histogram = localColoMap.get(partition2Replica1.getDiskId());
    Histogram remoteHostDisk0Histogram = crossColoMap.get(partition1Replica2.getDiskId());
    Histogram remoteHostDisk1Histogram = crossColoMap.get(partition1Replica3.getDiskId());
    primeTracker(localHostDisk0Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(0L, 50L));
    primeTracker(localHostDisk1Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(100L, 120L));
    primeTracker(remoteHostDisk0Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(150L, 180L));
    primeTracker(remoteHostDisk1Histogram, routerConfig.routerOperationTrackerMinDataPointsRequired, new Pair<>(150L, 180L));
    double localHostDisk0Cutoff = localHostDisk0Histogram.getSnapshot().getValue(QUANTILE);
    double localHostDisk1Cutoff = localHostDisk1Histogram.getSnapshot().getValue(QUANTILE);
    double remoteHostDisk0Cutoff = remoteHostDisk0Histogram.getSnapshot().getValue(QUANTILE);
    OperationTracker tracker1 = getOperationTracker(routerConfig, mockPartition1);
    OperationTracker tracker2 = getOperationTracker(routerConfig, mockPartition2);
    // issue first request for both partitions in local DC
    sendRequests(tracker2, 1);
    sendRequests(tracker1, 1);
    // partition1: 0-1-0-0, partition2: 0-1-0-0
    time.sleep((long) localHostDisk0Cutoff + 1);
    // partition1 should send 2nd request to RemoteNode1, partition2 won't because its 1st request isn't past due.
    sendRequests(tracker1, 1);
    sendRequests(tracker2, 0);
    // partition1: 0-1-0-0(local), 1-1-0-0(remote); partition2: 0-1-0-0(local), 2-0-0-0(remote)
    time.sleep((long) (localHostDisk1Cutoff - localHostDisk0Cutoff) + 2);
    // partition2 Replica1 on localhost Disk1 is past due, so the request should be sent to remote host
    sendRequests(tracker1, 0);
    sendRequests(tracker2, 1);
    // partition1: 0-1-0-0(local), 1-1-0-0(remote); partition2: 0-1-0-0(local), 2-0-0-0(remote)
    time.sleep((long) remoteHostDisk0Cutoff + 1);
    // both requests are past due (Note that they have same latency histogram)
    sendRequests(tracker1, 1);
    sendRequests(tracker2, 1);
    assertFalse("Operation should not be done", tracker1.isDone() || tracker2.isDone());
    // make local requests successful
    tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.SUCCESS);
    tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.SUCCESS);
    // make remote requests failed
    tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.FAILURE);
    tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.TIMED_OUT);
    tracker1.onResponse(partitionAndInflightReplicas.get(mockPartition1).poll(), TrackedRequestFinalState.TIMED_OUT);
    tracker2.onResponse(partitionAndInflightReplicas.get(mockPartition2).poll(), TrackedRequestFinalState.FAILURE);
    assertTrue("Operation should have succeeded", tracker1.hasSucceeded() && tracker2.hasSucceeded());
    // past due count should be 4 because for each partition there were one local and one remote request that didn't get
    // response within threshold. In total, it should be 2 * (1 + 1) = 4
    assertEquals("Past due counter is not expected", 4, pastDueCount.getCount());
    // number of data points in local colo histogram should be 2 because both requests finally succeeded
    assertEquals("Mismatch in number of data points in local colo histogram", 2, routerMetrics.getBlobLocalDcLatencyMs.getCount());
    // number of data points in cross colo histogram should be 2 because two timed-out requests should be counted
    assertEquals("Mismatch in number of data points in cross colo histogram", 2, routerMetrics.getBlobCrossDcLatencyMs.getCount());
    // additional test: dynamically add 1 new partition and 2 new nodes. Each new node hosts a replica from new partition
    MockDataNodeId newNode1 = clusterMap.createNewDataNodes(1, "dc-0").get(0);
    MockDataNodeId newNode2 = clusterMap.createNewDataNodes(1, "dc-1").get(0);
    MockPartitionId mockPartition3 = new MockPartitionId(3L, MockClusterMap.DEFAULT_PARTITION_CLASS);
    mockPartition3.replicaIds.add(new MockReplicaId(PORT, mockPartition3, newNode1, 0));
    mockPartition3.replicaIds.add(new MockReplicaId(PORT, mockPartition3, newNode2, 1));
    OperationTracker tracker3 = getOperationTracker(routerConfig, mockPartition3);
    // send 1st request
    sendRequests(tracker3, 1);
    // attempt to send 2nd one. This will trigger router metrics to create a histogram that associated with new disk
    // However, there is no 2nd request out because new created histogram doesn't of enough data points.
    sendRequests(tracker3, 0);
    // make the 1st request fail
    tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.FAILURE);
    // 2nd request is sent
    sendRequests(tracker3, 1);
    // make the 2nd request succeed
    tracker3.onResponse(partitionAndInflightReplicas.get(mockPartition3).poll(), TrackedRequestFinalState.SUCCESS);
    assertTrue("Operation should have succeeded", tracker3.hasSucceeded());
    // restore the tracer scope and routerMetrics
    trackerScope = OperationTrackerScope.Datacenter;
    routerMetrics = originalMetrics;
}
Also used : Histogram(com.codahale.metrics.Histogram) CachedHistogram(com.github.ambry.utils.CachedHistogram) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) Resource(com.github.ambry.clustermap.Resource) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) ReplicaId(com.github.ambry.clustermap.ReplicaId) RouterConfig(com.github.ambry.config.RouterConfig) Counter(com.codahale.metrics.Counter) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) CachedHistogram(com.github.ambry.utils.CachedHistogram) Test(org.junit.Test)

Aggregations

Port (com.github.ambry.network.Port)64 Test (org.junit.Test)33 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)29 ArrayList (java.util.ArrayList)28 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)27 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)23 DataNodeId (com.github.ambry.clustermap.DataNodeId)22 ReplicaId (com.github.ambry.clustermap.ReplicaId)17 BlobId (com.github.ambry.commons.BlobId)15 VerifiableProperties (com.github.ambry.config.VerifiableProperties)15 BlobProperties (com.github.ambry.messageformat.BlobProperties)15 Properties (java.util.Properties)15 MetricRegistry (com.codahale.metrics.MetricRegistry)12 PartitionId (com.github.ambry.clustermap.PartitionId)12 ConnectedChannel (com.github.ambry.network.ConnectedChannel)12 PartitionRequestInfo (com.github.ambry.protocol.PartitionRequestInfo)10 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)9 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)9 GetResponse (com.github.ambry.protocol.GetResponse)9 DataInputStream (java.io.DataInputStream)9