use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class StorageManager method setBlobStoreStoppedState.
@Override
public List<PartitionId> setBlobStoreStoppedState(List<PartitionId> partitionIds, boolean markStop) {
Map<DiskManager, List<PartitionId>> diskManagerToPartitionMap = new HashMap<>();
List<PartitionId> failToUpdateStores = new ArrayList<>();
for (PartitionId id : partitionIds) {
DiskManager diskManager = partitionToDiskManager.get(id);
if (diskManager != null) {
diskManagerToPartitionMap.computeIfAbsent(diskManager, disk -> new ArrayList<>()).add(id);
} else {
failToUpdateStores.add(id);
}
}
for (Map.Entry<DiskManager, List<PartitionId>> diskToPartitions : diskManagerToPartitionMap.entrySet()) {
List<PartitionId> failList = diskToPartitions.getKey().setBlobStoreStoppedState(diskToPartitions.getValue(), markStop);
failToUpdateStores.addAll(failList);
}
return failToUpdateStores;
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class StatsManagerTest method testStatsManagerDeleteTombstoneStats.
/**
* Test to verify that the {@link StatsManager} is collecting delete tombstone stats.
*/
@Test
public void testStatsManagerDeleteTombstoneStats() {
List<PartitionId> unreachablePartitions = Collections.emptyList();
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> hostAccountStorageStatsMap = new HashMap<>();
for (PartitionId partitionId : storeMap.keySet()) {
statsManager.collectAndAggregateAccountStorageStats(hostAccountStorageStatsMap, partitionId, unreachablePartitions);
}
statsManager.updateAggregatedDeleteTombstoneStats();
// verify aggregated delete tombstone stats
StatsManager.AggregatedDeleteTombstoneStats deleteTombstoneStats = statsManager.getAggregatedDeleteTombstoneStats();
Pair<Long, Long> expectedExpiredDeleteStats = storeDeleteTombstoneStats.get(EXPIRED_DELETE_TOMBSTONE);
Pair<Long, Long> expectedPermanentDeleteStats = storeDeleteTombstoneStats.get(PERMANENT_DELETE_TOMBSTONE);
assertEquals("Mismatch in expired delete count", storeMap.size() * expectedExpiredDeleteStats.getFirst(), deleteTombstoneStats.getExpiredDeleteTombstoneCount());
assertEquals("Mismatch in expired delete size", storeMap.size() * expectedExpiredDeleteStats.getSecond(), deleteTombstoneStats.getExpiredDeleteTombstoneSize());
assertEquals("Mismatch in permanent delete count", storeMap.size() * expectedPermanentDeleteStats.getFirst(), deleteTombstoneStats.getPermanentDeleteTombstoneCount());
assertEquals("Mismatch in permanent delete size", storeMap.size() * expectedPermanentDeleteStats.getSecond(), deleteTombstoneStats.getPermanentDeleteTombstoneSize());
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class DiskManager method shutdown.
/**
* Shuts down all the stores on this disk.
* @throws InterruptedException
*/
void shutdown() throws InterruptedException {
long startTimeMs = time.milliseconds();
rwLock.readLock().lock();
try {
running = false;
compactionManager.disable();
diskIOScheduler.disable();
final AtomicInteger numFailures = new AtomicInteger(0);
List<Thread> shutdownThreads = new ArrayList<>();
for (final Map.Entry<PartitionId, BlobStore> partitionAndStore : stores.entrySet()) {
if (!partitionAndStore.getValue().isStarted()) {
continue;
}
Thread thread = Utils.newThread("store-shutdown-" + partitionAndStore.getKey(), () -> {
try {
partitionAndStore.getValue().shutdown();
} catch (Exception e) {
numFailures.incrementAndGet();
metrics.totalStoreShutdownFailures.inc();
logger.error("Exception while shutting down store {} on disk {}", partitionAndStore.getKey(), disk, e);
}
}, false);
thread.start();
shutdownThreads.add(thread);
}
for (Thread shutdownThread : shutdownThreads) {
shutdownThread.join();
}
if (numFailures.get() > 0) {
logger.error("Could not shutdown {} out of {} stores on the disk {}", numFailures.get(), stores.size(), disk);
}
compactionManager.awaitTermination();
longLivedTaskScheduler.shutdown();
if (!longLivedTaskScheduler.awaitTermination(30, TimeUnit.SECONDS)) {
logger.error("Could not terminate long live tasks after DiskManager shutdown");
}
} finally {
rwLock.readLock().unlock();
metrics.diskShutdownTimeMs.update(time.milliseconds() - startTimeMs);
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ReplicaMetadataRequestInfo method readFrom.
public static ReplicaMetadataRequestInfo readFrom(DataInputStream stream, ClusterMap clusterMap, FindTokenFactory factory) throws IOException {
String hostName = Utils.readIntString(stream);
String replicaPath = Utils.readIntString(stream);
PartitionId partitionId = clusterMap.getPartitionIdFromStream(stream);
FindToken token = factory.getFindToken(stream);
return new ReplicaMetadataRequestInfo(partitionId, token, hostName, replicaPath);
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ReplicaMetadataResponseInfo method readFrom.
public static ReplicaMetadataResponseInfo readFrom(DataInputStream stream, FindTokenFactory factory, ClusterMap clusterMap, short replicaMetadataResponseVersion) throws IOException {
PartitionId partitionId = clusterMap.getPartitionIdFromStream(stream);
ServerErrorCode error = ServerErrorCode.values()[stream.readShort()];
if (error != ServerErrorCode.No_Error) {
return new ReplicaMetadataResponseInfo(partitionId, error);
} else {
FindToken token = factory.getFindToken(stream);
Pair<List<MessageInfo>, List<MessageMetadata>> messageInfoAndMetadataList = MessageInfoAndMetadataListSerde.deserializeMessageInfoAndMetadataList(stream, clusterMap, getMessageInfoAndMetadataListSerDeVersion(replicaMetadataResponseVersion));
long remoteReplicaLag = stream.readLong();
return new ReplicaMetadataResponseInfo(partitionId, token, messageInfoAndMetadataList.getFirst(), remoteReplicaLag, replicaMetadataResponseVersion);
}
}
Aggregations