use of org.apache.hadoop.hdfs.ExtendedBlockId in project hadoop by apache.
the class FsDatasetCache method cacheBlock.
/**
* Attempt to begin caching a block.
*/
synchronized void cacheBlock(long blockId, String bpid, String blockFileName, long length, long genstamp, Executor volumeExecutor) {
ExtendedBlockId key = new ExtendedBlockId(blockId, bpid);
Value prevValue = mappableBlockMap.get(key);
if (prevValue != null) {
LOG.debug("Block with id {}, pool {} already exists in the " + "FsDatasetCache with state {}", blockId, bpid, prevValue.state);
numBlocksFailedToCache.incrementAndGet();
return;
}
mappableBlockMap.put(key, new Value(null, State.CACHING));
volumeExecutor.execute(new CachingTask(key, blockFileName, length, genstamp));
LOG.debug("Initiating caching for Block with id {}, pool {}", blockId, bpid);
}
use of org.apache.hadoop.hdfs.ExtendedBlockId in project hadoop by apache.
the class FsDatasetCache method uncacheBlock.
synchronized void uncacheBlock(String bpid, long blockId) {
ExtendedBlockId key = new ExtendedBlockId(blockId, bpid);
Value prevValue = mappableBlockMap.get(key);
boolean deferred = false;
if (!dataset.datanode.getShortCircuitRegistry().processBlockMunlockRequest(key)) {
deferred = true;
}
if (prevValue == null) {
LOG.debug("Block with id {}, pool {} does not need to be uncached, " + "because it is not currently in the mappableBlockMap.", blockId, bpid);
numBlocksFailedToUncache.incrementAndGet();
return;
}
switch(prevValue.state) {
case CACHING:
LOG.debug("Cancelling caching for block with id {}, pool {}.", blockId, bpid);
mappableBlockMap.put(key, new Value(prevValue.mappableBlock, State.CACHING_CANCELLED));
break;
case CACHED:
mappableBlockMap.put(key, new Value(prevValue.mappableBlock, State.UNCACHING));
if (deferred) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} is anchored, and can't be uncached now. Scheduling it " + "for uncaching in {} ", key, DurationFormatUtils.formatDurationHMS(revocationPollingMs));
}
deferredUncachingExecutor.schedule(new UncachingTask(key, revocationMs), revocationPollingMs, TimeUnit.MILLISECONDS);
} else {
LOG.debug("{} has been scheduled for immediate uncaching.", key);
uncachingExecutor.execute(new UncachingTask(key, 0));
}
break;
default:
LOG.debug("Block with id {}, pool {} does not need to be uncached, " + "because it is in state {}.", blockId, bpid, prevValue.state);
numBlocksFailedToUncache.incrementAndGet();
break;
}
}
use of org.apache.hadoop.hdfs.ExtendedBlockId in project hadoop by apache.
the class FsDatasetImpl method invalidate.
/**
* Invalidate a block but does not delete the actual on-disk block file.
*
* It should only be used when deactivating disks.
*
* @param bpid the block pool ID.
* @param block The block to be invalidated.
*/
public void invalidate(String bpid, ReplicaInfo block) {
// If a DFSClient has the replica in its cache of short-circuit file
// descriptors (and the client is using ShortCircuitShm), invalidate it.
datanode.getShortCircuitRegistry().processBlockInvalidation(new ExtendedBlockId(block.getBlockId(), bpid));
// If the block is cached, start uncaching it.
cacheManager.uncacheBlock(bpid, block.getBlockId());
datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block), block.getStorageUuid());
}
use of org.apache.hadoop.hdfs.ExtendedBlockId in project hadoop by apache.
the class FsDatasetImpl method invalidate.
/**
* We're informed that a block is no longer valid. Delete it.
*/
// FsDatasetSpi
@Override
public void invalidate(String bpid, Block[] invalidBlks) throws IOException {
final List<String> errors = new ArrayList<String>();
for (int i = 0; i < invalidBlks.length; i++) {
final ReplicaInfo removing;
final FsVolumeImpl v;
try (AutoCloseableLock lock = datasetLock.acquire()) {
final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
if (info == null) {
ReplicaInfo infoByBlockId = volumeMap.get(bpid, invalidBlks[i].getBlockId());
if (infoByBlockId == null) {
// It is okay if the block is not found -- it
// may be deleted earlier.
LOG.info("Failed to delete replica " + invalidBlks[i] + ": ReplicaInfo not found.");
} else {
errors.add("Failed to delete replica " + invalidBlks[i] + ": GenerationStamp not matched, existing replica is " + Block.toString(infoByBlockId));
}
continue;
}
v = (FsVolumeImpl) info.getVolume();
if (v == null) {
errors.add("Failed to delete replica " + invalidBlks[i] + ". No volume for replica " + info);
continue;
}
try {
File blockFile = new File(info.getBlockURI());
if (blockFile != null && blockFile.getParentFile() == null) {
errors.add("Failed to delete replica " + invalidBlks[i] + ". Parent not found for block file: " + blockFile);
continue;
}
} catch (IllegalArgumentException e) {
LOG.warn("Parent directory check failed; replica " + info + " is not backed by a local file");
}
removing = volumeMap.remove(bpid, invalidBlks[i]);
addDeletingBlock(bpid, removing.getBlockId());
if (LOG.isDebugEnabled()) {
LOG.debug("Block file " + removing.getBlockURI() + " is to be deleted");
}
if (removing instanceof ReplicaInPipeline) {
((ReplicaInPipeline) removing).releaseAllBytesReserved();
}
}
if (v.isTransientStorage()) {
RamDiskReplica replicaInfo = ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
if (replicaInfo != null) {
if (!replicaInfo.getIsPersisted()) {
datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
}
ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(), replicaInfo.getBlockId(), true);
}
}
// If a DFSClient has the replica in its cache of short-circuit file
// descriptors (and the client is using ShortCircuitShm), invalidate it.
datanode.getShortCircuitRegistry().processBlockInvalidation(new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));
// If the block is cached, start uncaching it.
cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
// finishes.
try {
asyncDiskService.deleteAsync(v.obtainReference(), removing, new ExtendedBlock(bpid, invalidBlks[i]), dataStorage.getTrashDirectoryForReplica(bpid, removing));
} catch (ClosedChannelException e) {
LOG.warn("Volume " + v + " is closed, ignore the deletion task for " + "block " + invalidBlks[i]);
}
}
if (!errors.isEmpty()) {
StringBuilder b = new StringBuilder("Failed to delete ").append(errors.size()).append(" (out of ").append(invalidBlks.length).append(") replica(s):");
for (int i = 0; i < errors.size(); i++) {
b.append("\n").append(i).append(") ").append(errors.get(i));
}
throw new IOException(b.toString());
}
}
use of org.apache.hadoop.hdfs.ExtendedBlockId in project hadoop by apache.
the class TestShortCircuitShm method testAllocateSlots.
@Test(timeout = 60000)
public void testAllocateSlots() throws Exception {
File path = new File(TEST_BASE, "testAllocateSlots");
path.mkdirs();
SharedFileDescriptorFactory factory = SharedFileDescriptorFactory.create("shm_", new String[] { path.getAbsolutePath() });
FileInputStream stream = factory.createDescriptor("testAllocateSlots", 4096);
ShortCircuitShm shm = new ShortCircuitShm(ShmId.createRandom(), stream);
int numSlots = 0;
ArrayList<Slot> slots = new ArrayList<Slot>();
while (!shm.isFull()) {
Slot slot = shm.allocAndRegisterSlot(new ExtendedBlockId(123L, "test_bp1"));
slots.add(slot);
numSlots++;
}
LOG.info("allocated " + numSlots + " slots before running out.");
int slotIdx = 0;
for (Iterator<Slot> iter = shm.slotIterator(); iter.hasNext(); ) {
Assert.assertTrue(slots.contains(iter.next()));
}
for (Slot slot : slots) {
Assert.assertFalse(slot.addAnchor());
Assert.assertEquals(slotIdx++, slot.getSlotIdx());
}
for (Slot slot : slots) {
slot.makeAnchorable();
}
for (Slot slot : slots) {
Assert.assertTrue(slot.addAnchor());
}
for (Slot slot : slots) {
slot.removeAnchor();
}
for (Slot slot : slots) {
shm.unregisterSlot(slot.getSlotIdx());
slot.makeInvalid();
}
shm.free();
stream.close();
FileUtil.fullyDelete(path);
}
Aggregations