use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class CompactionManagerTest method testDisableCompactionForBlobStore.
/**
* Tests for cases where compaction is disabled on a given BlobStore
*/
@Test
public void testDisableCompactionForBlobStore() throws Exception {
// without compaction enabled.
compactionManager.enable();
assertTrue("Disable compaction on BlobStore should be true when compaction executor is not instantiated", compactionManager.disableCompactionForBlobStore(blobStore));
compactionManager.disable();
compactionManager.awaitTermination();
// with compaction enabled.
properties.setProperty("store.compaction.triggers", ALL_COMPACTION_TRIGGERS);
config = new StoreConfig(new VerifiableProperties(properties));
StorageManagerMetrics metrics = new StorageManagerMetrics(new MetricRegistry());
compactionManager = new CompactionManager(MOUNT_PATH, config, Collections.singleton(blobStore), metrics, time);
compactionManager.enable();
assertTrue("Disable compaction on given BlobStore should succeed", compactionManager.disableCompactionForBlobStore(blobStore));
assertFalse("BlobStore should not be scheduled after compaction is disabled on it", compactionManager.scheduleNextForCompaction(blobStore));
compactionManager.disable();
compactionManager.awaitTermination();
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class HelixVcrClusterParticipantTest method createHelixVcrClusterParticipant.
/**
* Helper function to create {@link HelixVcrClusterParticipant}.
* @param clusterMapPort The clusterMapPort of the instance.
* @param vcrSslPort The vcrSslPort of this vcr.
*/
private VcrClusterParticipant createHelixVcrClusterParticipant(int clusterMapPort, int vcrSslPort) throws Exception {
Properties props = new Properties();
props.setProperty("clustermap.host.name", "localhost");
props.setProperty("clustermap.resolve.hostnames", "false");
props.setProperty("clustermap.cluster.name", "clusterName");
props.setProperty("clustermap.datacenter.name", "DC1");
props.setProperty("clustermap.ssl.enabled.datacenters", "DC1,DC2");
props.setProperty("clustermap.port", Integer.toString(clusterMapPort));
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
props = new Properties();
props.setProperty("vcr.ssl.port", Integer.toString(vcrSslPort));
props.setProperty(CloudConfig.VCR_CLUSTER_ZK_CONNECT_STRING, ZK_SERVER_HOSTNAME + ":" + ZK_SERVER_PORT);
props.setProperty(CloudConfig.VCR_CLUSTER_NAME, VCR_CLUSTER_NAME);
VerifiableProperties verifiableProperties = new VerifiableProperties(props);
CloudConfig cloudConfig = new CloudConfig(verifiableProperties);
return new HelixVcrClusterAgentsFactory(cloudConfig, clusterMapConfig, mockClusterMap, Mockito.mock(AccountService.class), new StoreConfig(verifiableProperties), null, new MetricRegistry()).getVcrClusterParticipant();
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class CloudBlobStoreIntegrationTest method testUpdateTtl.
/**
* Test {@link CloudBlobStore#updateTtl} method.
*/
@Test
public void testUpdateTtl() throws StoreException {
MockMessageWriteSet messageWriteSet = new MockMessageWriteSet();
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
CloudConfig cloudConfig = new CloudConfig(verifiableProperties);
long now = System.currentTimeMillis();
long expirationTimeMs = now;
if (isVcr) {
// vcr doesn't upload a blob that is within CloudConfig#vcrMinTtlDays of expiry.
expirationTimeMs += Math.max(TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays), TimeUnit.SECONDS.toMillis(storeConfig.storeTtlUpdateBufferTimeSeconds));
} else {
expirationTimeMs += TimeUnit.SECONDS.toMillis(storeConfig.storeTtlUpdateBufferTimeSeconds);
}
expirationTimeMs += 100000;
addBlobToMessageSet(messageWriteSet, expirationTimeMs, accountId, containerId, partitionId, operationTime, (short) -1);
cloudBlobStore.put(messageWriteSet);
// verify that the blob was uploaded with expected metadata.
StoreInfo storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertFalse("Unexpected ttl update status", storeInfo.getMessageReadSetInfo().get(0).isTtlUpdated());
assertEquals("Unexpected expiration time", expirationTimeMs, storeInfo.getMessageReadSetInfo().get(0).getExpirationTimeInMs());
// Do a ttl update without setting ttl update flag.
MessageInfo ttlUpdateMessageInfo = new MessageInfo(messageWriteSet.getMessageSetInfo().get(0).getStoreKey(), 100, false, true, -1, accountId, containerId, now);
cloudBlobStore.updateTtl(Collections.singletonList(ttlUpdateMessageInfo));
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertTrue("Unexpected ttl update status", storeInfo.getMessageReadSetInfo().get(0).isTtlUpdated());
assertEquals("Unexpected expiration time", -1, storeInfo.getMessageReadSetInfo().get(0).getExpirationTimeInMs());
// Do a ttl update on a updated blob. It should fail silently.
ttlUpdateMessageInfo = new MessageInfo(messageWriteSet.getMessageSetInfo().get(0).getStoreKey(), 100, false, true, -1, accountId, containerId, now);
cloudBlobStore.updateTtl(Collections.singletonList(ttlUpdateMessageInfo));
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertTrue("Unexpected ttl update status", storeInfo.getMessageReadSetInfo().get(0).isTtlUpdated());
assertEquals("Unexpected expiration time", -1, storeInfo.getMessageReadSetInfo().get(0).getExpirationTimeInMs());
// Clear cache by restarting blob store. Do a ttl update on a updated blob. It should fail silently.
cloudBlobStore.shutdown();
cloudBlobStore.start();
ttlUpdateMessageInfo = new MessageInfo(messageWriteSet.getMessageSetInfo().get(0).getStoreKey(), 100, false, true, -1, accountId, containerId, now);
cloudBlobStore.updateTtl(Collections.singletonList(ttlUpdateMessageInfo));
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertTrue("Unexpected ttl update status", storeInfo.getMessageReadSetInfo().get(0).isTtlUpdated());
assertEquals("Unexpected expiration time", -1, storeInfo.getMessageReadSetInfo().get(0).getExpirationTimeInMs());
// Delete the blob.
cloudBlobStore.delete(Collections.singletonList(ttlUpdateMessageInfo));
// ttlupdate of a deleted blob should throw ID_Delete Store Exception for frontend and fail silently for vcr.
try {
cloudBlobStore.updateTtl(Collections.singletonList(ttlUpdateMessageInfo));
if (!isVcr) {
fail("Update ttl of a deleted blob should fail for frontend.");
}
} catch (StoreException ex) {
assertEquals("Unexcpected error code", ex.getErrorCode(), StoreErrorCodes.ID_Deleted);
}
// Clear cache by restarting blob store. ttlupdate of a deleted blob should throw ID_Delete Store Exception.
cloudBlobStore.shutdown();
cloudBlobStore.start();
try {
cloudBlobStore.updateTtl(Collections.singletonList(ttlUpdateMessageInfo));
if (!isVcr) {
fail("Update ttl of a deleted blob should fail.");
}
} catch (StoreException ex) {
assertEquals("Unexpected error code", ex.getErrorCode(), StoreErrorCodes.ID_Deleted);
}
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class ReplicationTest method onReplicaAddedOrRemovedCallbackTest.
/**
* Test cluster map change callback in {@link ReplicationManager} when any remote replicas are added or removed.
* Test setup: attempt to add 3 replicas and remove 3 replicas respectively. The three replicas are picked as follows:
* (1) 1st replica on current node (should skip)
* (2) 2nd replica on remote node sharing partition with current one (should be added or removed)
* (3) 3rd replica on remote node but doesn't share partition with current one (should skip)
* @throws Exception
*/
@Test
public void onReplicaAddedOrRemovedCallbackTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
// pick a node with no special partition as current node
Set<DataNodeId> specialPartitionNodes = clusterMap.getSpecialPartition().getReplicaIds().stream().map(ReplicaId::getDataNodeId).collect(Collectors.toSet());
DataNodeId currentNode = clusterMap.getDataNodes().stream().filter(d -> !specialPartitionNodes.contains(d)).findFirst().get();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, currentNode, null, null, new MockTime(), null, new InMemAccountService(false, false));
storageManager.start();
MockReplicationManager replicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, currentNode, storeKeyConverterFactory, null);
ClusterMapChangeListener clusterMapChangeListener = clusterMap.getClusterMapChangeListener();
// find the special partition (not on current node) and get an irrelevant replica from it
PartitionId absentPartition = clusterMap.getSpecialPartition();
ReplicaId irrelevantReplica = absentPartition.getReplicaIds().get(0);
// find an existing replica on current node and one of its peer replicas on remote node
ReplicaId existingReplica = clusterMap.getReplicaIds(currentNode).get(0);
ReplicaId peerReplicaToRemove = existingReplica.getPartitionId().getReplicaIds().stream().filter(r -> r != existingReplica).findFirst().get();
// create a new node and place a peer of existing replica on it.
MockDataNodeId remoteNode = createDataNode(getListOfPorts(PLAIN_TEXT_PORT_START_NUMBER + 10, SSL_PORT_START_NUMBER + 10, HTTP2_PORT_START_NUMBER + 10), clusterMap.getDatacenterName((byte) 0), 3);
ReplicaId addedReplica = new MockReplicaId(remoteNode.getPort(), (MockPartitionId) existingReplica.getPartitionId(), remoteNode, 0);
// populate added replica and removed replica lists
List<ReplicaId> replicasToAdd = new ArrayList<>(Arrays.asList(existingReplica, addedReplica, irrelevantReplica));
List<ReplicaId> replicasToRemove = new ArrayList<>(Arrays.asList(existingReplica, peerReplicaToRemove, irrelevantReplica));
PartitionInfo partitionInfo = replicationManager.getPartitionToPartitionInfoMap().get(existingReplica.getPartitionId());
assertNotNull("PartitionInfo is not found", partitionInfo);
RemoteReplicaInfo peerReplicaInfo = partitionInfo.getRemoteReplicaInfos().stream().filter(info -> info.getReplicaId() == peerReplicaToRemove).findFirst().get();
// get the replica-thread for this peer replica
ReplicaThread peerReplicaThread = peerReplicaInfo.getReplicaThread();
// Test Case 1: replication manager encountered exception during startup (remote replica addition/removal will be skipped)
replicationManager.startWithException();
clusterMapChangeListener.onReplicaAddedOrRemoved(replicasToAdd, replicasToRemove);
// verify that PartitionInfo stays unchanged
verifyRemoteReplicaInfo(partitionInfo, addedReplica, false);
verifyRemoteReplicaInfo(partitionInfo, peerReplicaToRemove, true);
// Test Case 2: startup latch is interrupted
CountDownLatch initialLatch = replicationManager.startupLatch;
CountDownLatch mockLatch = Mockito.mock(CountDownLatch.class);
doThrow(new InterruptedException()).when(mockLatch).await();
replicationManager.startupLatch = mockLatch;
try {
clusterMapChangeListener.onReplicaAddedOrRemoved(replicasToAdd, replicasToRemove);
fail("should fail because startup latch is interrupted");
} catch (IllegalStateException e) {
// expected
}
replicationManager.startupLatch = initialLatch;
// Test Case 3: replication manager is successfully started
replicationManager.start();
clusterMapChangeListener.onReplicaAddedOrRemoved(replicasToAdd, replicasToRemove);
// verify that PartitionInfo has latest remote replica infos
verifyRemoteReplicaInfo(partitionInfo, addedReplica, true);
verifyRemoteReplicaInfo(partitionInfo, peerReplicaToRemove, false);
verifyRemoteReplicaInfo(partitionInfo, irrelevantReplica, false);
// verify new added replica is assigned to a certain thread
ReplicaThread replicaThread = replicationManager.getDataNodeIdToReplicaThreadMap().get(addedReplica.getDataNodeId());
assertNotNull("There is no ReplicaThread assocated with new replica", replicaThread);
Optional<RemoteReplicaInfo> findResult = replicaThread.getRemoteReplicaInfos().get(remoteNode).stream().filter(info -> info.getReplicaId() == addedReplica).findAny();
assertTrue("New added remote replica info should exist in corresponding thread", findResult.isPresent());
// verify the removed replica info's thread is null
assertNull("Thread in removed replica info should be null", peerReplicaInfo.getReplicaThread());
findResult = peerReplicaThread.getRemoteReplicaInfos().get(peerReplicaToRemove.getDataNodeId()).stream().filter(info -> info.getReplicaId() == peerReplicaToRemove).findAny();
assertFalse("Previous replica thread should not contain RemoteReplicaInfo that is already removed", findResult.isPresent());
storageManager.shutdown();
}
use of com.github.ambry.config.StoreConfig in project ambry by linkedin.
the class ReplicationTestHelper method createStorageManagerAndReplicationManager.
/**
* Helper method to create storage manager and replication manager
* @param clusterMap {@link ClusterMap} to use
* @param clusterMapConfig {@link ClusterMapConfig} to use
* @param clusterParticipant {@link com.github.ambry.clustermap.ClusterParticipant} for listener registration.
* @return a pair of storage manager and replication manager
* @throws Exception
*/
protected Pair<StorageManager, ReplicationManager> createStorageManagerAndReplicationManager(ClusterMap clusterMap, ClusterMapConfig clusterMapConfig, MockHelixParticipant clusterParticipant, ConnectionPool mockConnectionPool) throws Exception {
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
DataNodeId dataNodeId = clusterMap.getDataNodeIds().get(0);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, dataNodeId, null, clusterParticipant == null ? null : Collections.singletonList(clusterParticipant), new MockTime(), null, new InMemAccountService(false, false));
storageManager.start();
MockReplicationManager replicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, dataNodeId, storeKeyConverterFactory, clusterParticipant, mockConnectionPool, new MockFindTokenHelper(storeKeyFactory, replicationConfig), BlobIdTransformer.class.getName(), storeKeyFactory, time);
return new Pair<>(storageManager, replicationManager);
}
Aggregations