use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class UndeleteOperationTrackerTest method failureForInitialization.
/**
* Tests when there are not sufficient eligible hosts.
*/
@Test
public void failureForInitialization() {
assumeTrue(replicasStateEnabled);
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Collections.singletonList("mockMountPath");
datanodes = new ArrayList<>(Arrays.asList(new MockDataNodeId(portList, mountPaths, "dc-0"), new MockDataNodeId(portList, mountPaths, "dc-1"), new MockDataNodeId(portList, mountPaths, "dc-2"), new MockDataNodeId(portList, mountPaths, "dc-3")));
mockPartition = new MockPartitionId();
populateReplicaList(4, ReplicaState.STANDBY);
populateReplicaList(4, ReplicaState.INACTIVE);
populateReplicaList(4, ReplicaState.DROPPED);
localDcName = datanodes.get(0).getDatacenterName();
mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), localDcName);
try {
getOperationTracker(3);
fail("Should fail to create undelete operation tracker because of insufficient eligible hosts");
} catch (IllegalArgumentException e) {
}
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class UndeleteOperationTrackerTest method failureWithIneligibleNodesTest.
/**
* Tests when there are ineligible hosts.
*/
@Test
public void failureWithIneligibleNodesTest() {
assumeTrue(replicasStateEnabled);
List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
List<String> mountPaths = Collections.singletonList("mockMountPath");
datanodes = new ArrayList<>(Arrays.asList(new MockDataNodeId(portList, mountPaths, "dc-0"), new MockDataNodeId(portList, mountPaths, "dc-1"), new MockDataNodeId(portList, mountPaths, "dc-2"), new MockDataNodeId(portList, mountPaths, "dc-3")));
mockPartition = new MockPartitionId();
populateReplicaList(8, ReplicaState.STANDBY);
populateReplicaList(4, ReplicaState.INACTIVE);
localDcName = datanodes.get(0).getDatacenterName();
mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), localDcName);
// Now any of the failure would fail the operation
UndeleteOperationTracker tracker = getOperationTracker(2);
sendRequests(tracker, 2);
tracker.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.SUCCESS);
assertFalse("Operation should not have failed", tracker.hasFailed());
assertFalse("Operation should not have succeeded", tracker.hasSucceeded());
assertFalse("Operation should not be done", tracker.isDone());
tracker.onResponse(inflightReplicas.poll(), TrackedRequestFinalState.FAILURE);
assertTrue("Operation should have failed", tracker.hasFailed());
assertFalse("Operation should not have succeeded", tracker.hasSucceeded());
assertTrue("Operation should be done", tracker.isDone());
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class BlobStoreCompactorTest method generateLocalAndPeerReplicas.
// helpers
// general
/**
* Generate local replica and two peer replicas.
* @return a list of replicas (first one is local replica, others are remote peer replicas)
*/
private List<MockReplicaId> generateLocalAndPeerReplicas() {
Port port = new Port(6667, PortType.PLAINTEXT);
List<String> mountPaths = Arrays.asList("/mnt/u001", "/mnt/u002", "/mnt/u003");
// generate two peer replicas
MockDataNodeId peerNode1 = new MockDataNodeId("node1_host", Collections.singletonList(port), mountPaths, null);
MockDataNodeId peerNode2 = new MockDataNodeId("node2_host", Collections.singletonList(port), mountPaths, null);
MockDataNodeId localNode = new MockDataNodeId("local_host", Collections.singletonList(port), mountPaths, null);
MockPartitionId mockPartitionId = new MockPartitionId(101L, MockClusterMap.DEFAULT_PARTITION_CLASS);
MockReplicaId peerReplica1 = new MockReplicaId(port.getPort(), mockPartitionId, peerNode1, 0);
MockReplicaId peerReplica2 = new MockReplicaId(port.getPort(), mockPartitionId, peerNode2, 1);
MockReplicaId localReplica = new MockReplicaId(port.getPort(), mockPartitionId, localNode, 2);
localReplica.setPeerReplicas(Arrays.asList(peerReplica1, peerReplica2));
return Arrays.asList(localReplica, peerReplica1, peerReplica2);
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class CloudStorageCompactorTest method testCompactPartitions.
/**
* Test the compactPartitions method.
*/
@Test
public void testCompactPartitions() throws Exception {
// start with empty map
assertEquals(0, compactor.compactPartitions());
int numPartitions = 40;
// add partitions to map
String defaultClass = MockClusterMap.DEFAULT_PARTITION_CLASS;
for (int i = 0; i < numPartitions; i++) {
partitionMap.put(new MockPartitionId(i, defaultClass), null);
when(mockDest.compactPartition(eq(Integer.toString(i)))).thenReturn(pageSize);
}
assertEquals(pageSize * numPartitions, compactor.compactPartitions());
assertEquals(0, vcrMetrics.compactionFailureCount.getCount());
// remove a partition from map
partitionMap.remove(new MockPartitionId(0, defaultClass));
assertEquals(pageSize * (numPartitions - 1), compactor.compactPartitions());
assertEquals(0, vcrMetrics.compactionFailureCount.getCount());
// Make compaction fail for some partitions
CloudStorageException csex = new CloudStorageException("failure", new RuntimeException("Don't hurt me!"));
when(mockDest.compactPartition(eq("2"))).thenThrow(csex);
when(mockDest.compactPartition(eq("20"))).thenThrow(csex);
assertEquals(pageSize * (numPartitions - 3), compactor.compactPartitions());
assertEquals(2, vcrMetrics.compactionFailureCount.getCount());
// Test shutdown
assertFalse("Should not be shutting down yet", compactor.isShutDown());
compactor.shutdown();
assertTrue("Should be shutting down now", compactor.isShutDown());
// TODO: test shutting down with compaction still in progress (more involved)
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class AzureIntegrationTest method testBatchQuery.
/**
* Test batch query on large number of blobs.
* @throws Exception on error
*/
@Test
public void testBatchQuery() throws Exception {
cleanup();
int numBlobs = 100;
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
long creationTime = System.currentTimeMillis();
Map<BlobId, byte[]> blobIdtoDataMap = createUnencryptedPermanentBlobs(numBlobs, dataCenterId, accountId, containerId, partitionId, blobSize, cloudRequestAgent, azureDest, creationTime);
List<BlobId> blobIdList = new ArrayList<>(blobIdtoDataMap.keySet());
long uploadTime = System.currentTimeMillis() - creationTime;
logger.info("Uploaded {} blobs in {} ms", numBlobs, uploadTime);
Map<String, CloudBlobMetadata> metadataMap = getBlobMetadataWithRetry(blobIdList, partitionId.toPathString(), cloudRequestAgent, azureDest);
assertEquals("Unexpected size of returned metadata map", numBlobs, metadataMap.size());
for (BlobId blobId : blobIdList) {
CloudBlobMetadata metadata = metadataMap.get(blobId.getID());
assertNotNull("No metadata found for blobId: " + blobId, metadata);
assertEquals("Unexpected metadata id", blobId.getID(), metadata.getId());
assertEquals("Unexpected metadata accountId", accountId, metadata.getAccountId());
assertEquals("Unexpected metadata containerId", containerId, metadata.getContainerId());
assertEquals("Unexpected metadata partitionId", partitionId.toPathString(), metadata.getPartitionId());
assertEquals("Unexpected metadata creationTime", creationTime, metadata.getCreationTime());
assertEquals("Unexpected metadata encryption origin", CloudBlobMetadata.EncryptionOrigin.NONE, metadata.getEncryptionOrigin());
verifyDownloadMatches(blobId, blobIdtoDataMap.get(blobId));
}
cleanup();
}
Aggregations