use of com.github.ambry.clustermap.ClusterMap in project ambry by linkedin.
the class CloudBlobStoreIntegrationTest method setup.
@Before
public void setup() throws ReflectiveOperationException {
Properties testProperties = new Properties();
try (InputStream input = this.getClass().getClassLoader().getResourceAsStream(PROPS_FILE_NAME)) {
if (input == null) {
throw new IllegalStateException("Could not find resource: " + PROPS_FILE_NAME);
}
testProperties.load(input);
} catch (IOException ex) {
throw new IllegalStateException("Could not load properties from resource: " + PROPS_FILE_NAME);
}
testProperties.setProperty("clustermap.cluster.name", "Integration-Test");
testProperties.setProperty("clustermap.datacenter.name", "uswest");
testProperties.setProperty("clustermap.host.name", "localhost");
testProperties.setProperty("kms.default.container.key", "B374A26A71490437AA024E4FADD5B497FDFF1A8EA6FF12F6FB65AF2720B59CCF");
testProperties.setProperty(CloudConfig.CLOUD_DELETED_BLOB_RETENTION_DAYS, String.valueOf(1));
testProperties.setProperty(AzureCloudConfig.AZURE_PURGE_BATCH_SIZE, "10");
testProperties.setProperty(CloudConfig.CLOUD_IS_VCR, "" + isVcr);
verifiableProperties = new VerifiableProperties(testProperties);
azureCloudConfig = new AzureCloudConfig(verifiableProperties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
CloudConfig cloudConfig = new CloudConfig(verifiableProperties);
partitionId = new Partition(666, clusterMapConfig.clusterMapDefaultPartitionClass, PartitionState.READ_WRITE, 100 * 1024 * 1024 * 1024L);
ClusterMap clusterMap = new MockClusterMap(false, Collections.singletonList(new MockDataNodeId(Collections.singletonList(new Port(6666, PortType.PLAINTEXT)), Collections.singletonList("test"), "AzureTest")), 1, Collections.singletonList(partitionId), "AzureTest");
MetricRegistry registry = new MetricRegistry();
vcrMetrics = new VcrMetrics(registry);
azureMetrics = new AzureMetrics(registry);
CloudDestinationFactory cloudDestinationFactory = Utils.getObj(cloudConfig.cloudDestinationFactoryClass, verifiableProperties, registry, clusterMap);
cloudDestination = cloudDestinationFactory.getCloudDestination();
cloudBlobStore = new CloudBlobStore(verifiableProperties, partitionId, cloudDestination, clusterMap, vcrMetrics);
cloudBlobStore.start();
}
use of com.github.ambry.clustermap.ClusterMap in project ambry by linkedin.
the class FrontendUtilsTest method testGetBlobIdFromString.
/**
* Tests {@link FrontendUtils#getBlobIdFromString(String, ClusterMap)}
* @throws IOException
* @throws RestServiceException
*/
@Test
public void testGetBlobIdFromString() throws IOException, RestServiceException {
// good path
byte[] bytes = new byte[2];
ClusterMap referenceClusterMap = new MockClusterMap();
TestUtils.RANDOM.nextBytes(bytes);
BlobId.BlobIdType referenceType = TestUtils.RANDOM.nextBoolean() ? BlobId.BlobIdType.NATIVE : BlobId.BlobIdType.CRAFTED;
TestUtils.RANDOM.nextBytes(bytes);
byte referenceDatacenterId = bytes[0];
short referenceAccountId = getRandomShort(TestUtils.RANDOM);
short referenceContainerId = getRandomShort(TestUtils.RANDOM);
PartitionId referencePartitionId = referenceClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
boolean referenceIsEncrypted = TestUtils.RANDOM.nextBoolean();
List<Short> versions = Arrays.stream(BlobId.getAllValidVersions()).filter(version -> version >= BlobId.BLOB_ID_V3).collect(Collectors.toList());
for (short version : versions) {
BlobId blobId = new BlobId(version, referenceType, referenceDatacenterId, referenceAccountId, referenceContainerId, referencePartitionId, referenceIsEncrypted, BlobId.BlobDataType.DATACHUNK);
BlobId regeneratedBlobId = FrontendUtils.getBlobIdFromString(blobId.getID(), referenceClusterMap);
assertEquals("BlobId mismatch", blobId, regeneratedBlobId);
assertBlobIdFieldValues(regeneratedBlobId, referenceType, referenceDatacenterId, referenceAccountId, referenceContainerId, referencePartitionId, version >= BlobId.BLOB_ID_V4 && referenceIsEncrypted);
// bad path
try {
FrontendUtils.getBlobIdFromString(blobId.getID().substring(1), referenceClusterMap);
fail("Should have thrown exception for bad blobId ");
} catch (RestServiceException e) {
assertEquals("RestServiceErrorCode mismatch", RestServiceErrorCode.BadRequest, e.getErrorCode());
}
}
}
use of com.github.ambry.clustermap.ClusterMap in project ambry by linkedin.
the class FrontendRestRequestServiceFactoryTest method getFrontendRestRequestServiceFactoryWithBadInputTest.
/**
* Tests instantiation of {@link FrontendRestRequestServiceFactory} with bad input.
* @throws Exception
*/
@Test
public void getFrontendRestRequestServiceFactoryWithBadInputTest() throws Exception {
// dud properties. server should pick up defaults
Properties properties = new Properties();
VerifiableProperties verifiableProperties = new VerifiableProperties(properties);
ClusterMap clusterMap = new MockClusterMap();
Router router = new InMemoryRouter(verifiableProperties, clusterMap);
AccountService accountService = new InMemAccountService(false, true);
// VerifiableProperties null.
try {
new FrontendRestRequestServiceFactory(null, clusterMap, router, accountService);
fail("Instantiation should have failed because VerifiableProperties was null");
} catch (NullPointerException e) {
// expected. Nothing to do.
}
// ClusterMap null.
try {
new FrontendRestRequestServiceFactory(verifiableProperties, null, router, accountService);
fail("Instantiation should have failed because ClusterMap was null");
} catch (NullPointerException e) {
// expected. Nothing to do.
}
// Router null.
try {
new FrontendRestRequestServiceFactory(verifiableProperties, clusterMap, null, accountService);
fail("Instantiation should have failed because Router was null");
} catch (NullPointerException e) {
// expected. Nothing to do.
}
// AccountService null.
try {
new FrontendRestRequestServiceFactory(verifiableProperties, clusterMap, router, null);
fail("Instantiation should have failed because AccountService was null");
} catch (NullPointerException e) {
// expected. Nothing to do.
}
}
use of com.github.ambry.clustermap.ClusterMap in project ambry by linkedin.
the class DiskTokenPersistorTest method setup.
/**
* Create the one time setup for the tests.
* @throws Exception if Exception happens during setup.
*/
@BeforeClass
public static void setup() throws Exception {
clusterMap = new MockClusterMap();
mountPathToPartitionInfoList = new HashMap<>();
mountPathToReplicaTokenInfos = new HashMap<>();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
StoreFindTokenFactory factory = new StoreFindTokenFactory(blobIdFactory);
DataNodeId dataNodeId = clusterMap.getDataNodeIds().get(0);
List<? extends ReplicaId> localReplicas = clusterMap.getReplicaIds(dataNodeId);
replicaId = localReplicas.get(0);
for (ReplicaId replicaId : localReplicas) {
List<? extends ReplicaId> peerReplicas = replicaId.getPeerReplicaIds();
List<RemoteReplicaInfo> remoteReplicas = new ArrayList<>();
for (ReplicaId remoteReplica : peerReplicas) {
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(remoteReplica, replicaId, null, factory.getNewFindToken(), 10, SystemTime.getInstance(), remoteReplica.getDataNodeId().getPortToConnectTo());
remoteReplicas.add(remoteReplicaInfo);
mountPathToReplicaTokenInfos.computeIfAbsent(replicaId.getMountPath(), k -> new ArrayList<>()).add(new RemoteReplicaInfo.ReplicaTokenInfo(remoteReplicaInfo));
}
PartitionInfo partitionInfo = new PartitionInfo(remoteReplicas, replicaId.getPartitionId(), null, replicaId);
mountPathToPartitionInfoList.computeIfAbsent(replicaId.getMountPath(), key -> ConcurrentHashMap.newKeySet()).add(partitionInfo);
}
Properties replicationProperties = new Properties();
replicationProperties.setProperty("replication.cloud.token.factory", MockFindTokenFactory.class.getName());
ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(replicationProperties));
findTokenHelper = new FindTokenHelper(blobIdFactory, replicationConfig);
mockStoreManager = Mockito.mock(StoreManager.class);
Mockito.when(mockStoreManager.checkLocalPartitionStatus(any(), any())).thenReturn(ServerErrorCode.No_Error);
}
use of com.github.ambry.clustermap.ClusterMap in project ambry by linkedin.
the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_FinalState_TtlUpdateDelete.
/**
* Tests when the lifeVersion in local is less than the lifeVersion in remote and the final state from remote
* is delete with ttl update.
* @throws Exception
*/
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_FinalState_TtlUpdateDelete() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<PartitionId, List<StoreKey>> idsByPartition = new HashMap<>();
Map<PartitionId, StoreKey> idsToBeIgnoredByPartition = new HashMap<>();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
// 5 P, D -> [U, T, D]
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), 1);
// 1 Missing
StoreKey id = ids.get(0);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, UPDATED_EXPIRY_TIME_MS);
idsToBeIgnoredByPartition.put(partitionId, id);
// 2 P -> [T, D]
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, UPDATED_EXPIRY_TIME_MS);
// 3 P, T -> [D]
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Arrays.asList(localHost, remoteHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, UPDATED_EXPIRY_TIME_MS);
// 4 P, T, D -> [D]
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Arrays.asList(localHost, remoteHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, UPDATED_EXPIRY_TIME_MS);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, UPDATED_EXPIRY_TIME_MS);
// 5 P, D -> [U, T, D]
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, UPDATED_EXPIRY_TIME_MS);
ids.add(id);
idsByPartition.put(partitionId, ids);
}
int batchSize = 100;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// It's all deletes, there is no missing key.
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(0, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
}
// Before exchange metadata, the number of message infos in local host is 8. Exchange metadata would add another 7.
for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
assertEquals("MessageInfo number mismatch", 15, localInfoEntry.getValue().size());
}
for (Map.Entry<PartitionId, List<StoreKey>> idsEntry : idsByPartition.entrySet()) {
List<MessageInfo> remoteInfos = remoteHost.infosByPartition.get(idsEntry.getKey());
List<MessageInfo> localInfos = localHost.infosByPartition.get(idsEntry.getKey());
for (StoreKey id : idsEntry.getValue()) {
if (!idsToBeIgnoredByPartition.get(idsEntry.getKey()).equals(id)) {
MessageInfo localInfo = getMergedMessageInfo(id, localInfos);
MessageInfo remoteInfo = getMergedMessageInfo(id, remoteInfos);
assertTrue(localInfo.isDeleted());
assertTrue(remoteInfo.isDeleted());
assertTrue(localInfo.isTtlUpdated());
assertTrue(remoteInfo.isTtlUpdated());
assertEquals(localInfo.getLifeVersion(), remoteInfo.getLifeVersion());
}
}
}
}
Aggregations