use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class TestDirectoryScanner method duplicateBlock.
/**
* Duplicate the given block on all volumes.
* @param blockId
* @throws IOException
*/
private void duplicateBlock(long blockId) throws IOException {
try (AutoCloseableLock lock = fds.acquireDatasetLock()) {
ReplicaInfo b = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
try (FsDatasetSpi.FsVolumeReferences volumes = fds.getFsVolumeReferences()) {
for (FsVolumeSpi v : volumes) {
if (v.getStorageID().equals(b.getVolume().getStorageID())) {
continue;
}
// Volume without a copy of the block. Make a copy now.
File sourceBlock = new File(b.getBlockURI());
File sourceMeta = new File(b.getMetadataURI());
URI sourceRoot = b.getVolume().getStorageLocation().getUri();
URI destRoot = v.getStorageLocation().getUri();
String relativeBlockPath = sourceRoot.relativize(sourceBlock.toURI()).getPath();
String relativeMetaPath = sourceRoot.relativize(sourceMeta.toURI()).getPath();
File destBlock = new File(new File(destRoot).toString(), relativeBlockPath);
File destMeta = new File(new File(destRoot).toString(), relativeMetaPath);
destBlock.getParentFile().mkdirs();
FileUtils.copyFile(sourceBlock, destBlock);
FileUtils.copyFile(sourceMeta, destMeta);
if (destBlock.exists() && destMeta.exists()) {
LOG.info("Copied " + sourceBlock + " ==> " + destBlock);
LOG.info("Copied " + sourceMeta + " ==> " + destMeta);
}
}
}
}
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class TestInterDatanodeProtocol method testInitReplicaRecovery.
/** Test
* {@link FsDatasetImpl#initReplicaRecovery(String, ReplicaMap, Block, long, long)}
*/
@Test
public void testInitReplicaRecovery() throws IOException {
final long firstblockid = 10000L;
final long gs = 7777L;
final long length = 22L;
final ReplicaMap map = new ReplicaMap(new AutoCloseableLock());
String bpid = "BP-TEST";
final Block[] blocks = new Block[5];
for (int i = 0; i < blocks.length; i++) {
blocks[i] = new Block(firstblockid + i, length, gs);
map.add(bpid, createReplicaInfo(blocks[i]));
}
{
//normal case
final Block b = blocks[0];
final ReplicaInfo originalInfo = map.get(bpid, b);
final long recoveryid = gs + 1;
final ReplicaRecoveryInfo recoveryInfo = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo, recoveryInfo);
final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery) map.get(bpid, b);
Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
//recover one more time
final long recoveryid2 = gs + 2;
final ReplicaRecoveryInfo recoveryInfo2 = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid2, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo, recoveryInfo2);
final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery) map.get(bpid, b);
Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
//case RecoveryInProgressException
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
} catch (RecoveryInProgressException ripe) {
System.out.println("GOOD: getting " + ripe);
}
}
{
// BlockRecoveryFI_01: replica not found
final long recoveryid = gs + 1;
final Block b = new Block(firstblockid - 1, length, gs);
ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.assertNull("Data-node should not have this replica.", r);
}
{
// BlockRecoveryFI_02: "THIS IS NOT SUPPOSED TO HAPPEN" with recovery id < gs
final long recoveryid = gs - 1;
final Block b = new Block(firstblockid + 1, length, gs);
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
} catch (IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
// BlockRecoveryFI_03: Replica's gs is less than the block's gs
{
final long recoveryid = gs + 1;
final Block b = new Block(firstblockid, length, gs + 1);
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs");
} catch (IOException e) {
e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block=");
}
}
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class DataNode method transferReplicaForPipelineRecovery.
/**
* Transfer a replica to the datanode targets.
* @param b the block to transfer.
* The corresponding replica must be an RBW or a Finalized.
* Its GS and numBytes will be set to
* the stored GS and the visible length.
* @param targets targets to transfer the block to
* @param client client name
*/
void transferReplicaForPipelineRecovery(final ExtendedBlock b, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final String client) throws IOException {
final long storedGS;
final long visible;
final BlockConstructionStage stage;
//get replica information
try (AutoCloseableLock lock = data.acquireDatasetLock()) {
Block storedBlock = data.getStoredBlock(b.getBlockPoolId(), b.getBlockId());
if (null == storedBlock) {
throw new IOException(b + " not found in datanode.");
}
storedGS = storedBlock.getGenerationStamp();
if (storedGS < b.getGenerationStamp()) {
throw new IOException(storedGS + " = storedGS < b.getGenerationStamp(), b=" + b);
}
// Update the genstamp with storedGS
b.setGenerationStamp(storedGS);
if (data.isValidRbw(b)) {
stage = BlockConstructionStage.TRANSFER_RBW;
} else if (data.isValidBlock(b)) {
stage = BlockConstructionStage.TRANSFER_FINALIZED;
} else {
final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
}
visible = data.getReplicaVisibleLength(b);
}
//set visible length
b.setNumBytes(visible);
if (targets.length > 0) {
new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
}
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class FsDatasetImpl method getTmpInputStreams.
/**
* Returns handles to the block file and its metadata file
*/
// FsDatasetSpi
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkOffset, long metaOffset) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo info = getReplicaInfo(b);
FsVolumeReference ref = info.getVolume().obtainReference();
try {
InputStream blockInStream = info.getDataInputStream(blkOffset);
try {
InputStream metaInStream = info.getMetadataInputStream(metaOffset);
return new ReplicaInputStreams(blockInStream, metaInStream, ref, datanode.getFileIoProvider());
} catch (IOException e) {
IOUtils.cleanup(null, blockInStream);
throw e;
}
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
}
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class FsDatasetImpl method convertTemporaryToRbw.
// FsDatasetSpi
@Override
public ReplicaInPipeline convertTemporaryToRbw(final ExtendedBlock b) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final long blockId = b.getBlockId();
final long expectedGs = b.getGenerationStamp();
final long visible = b.getNumBytes();
LOG.info("Convert " + b + " from Temporary to RBW, visible length=" + visible);
final ReplicaInfo temp;
{
// get replica
final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
if (r == null) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
}
// check the replica's state
if (r.getState() != ReplicaState.TEMPORARY) {
throw new ReplicaAlreadyExistsException("r.getState() != ReplicaState.TEMPORARY, r=" + r);
}
temp = r;
}
// check generation stamp
if (temp.getGenerationStamp() != expectedGs) {
throw new ReplicaAlreadyExistsException("temp.getGenerationStamp() != expectedGs = " + expectedGs + ", temp=" + temp);
}
// TODO: check writer?
// set writer to the current thread
// temp.setWriter(Thread.currentThread());
// check length
final long numBytes = temp.getNumBytes();
if (numBytes < visible) {
throw new IOException(numBytes + " = numBytes < visible = " + visible + ", temp=" + temp);
}
// check volume
final FsVolumeImpl v = (FsVolumeImpl) temp.getVolume();
if (v == null) {
throw new IOException("r.getVolume() = null, temp=" + temp);
}
final ReplicaInPipeline rbw = v.convertTemporaryToRbw(b, temp);
if (rbw.getState() != ReplicaState.RBW) {
throw new IOException("Expected replica state: " + ReplicaState.RBW + " obtained " + rbw.getState() + " for converting block " + b.getBlockId());
}
// overwrite the RBW in the volume map
volumeMap.add(b.getBlockPoolId(), rbw.getReplicaInfo());
return rbw;
}
}
Aggregations