Search in sources :

Example 21 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method contains.

// FsDatasetSpi
@Override
public boolean contains(final ExtendedBlock block) {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final long blockId = block.getLocalBlock().getBlockId();
        final String bpid = block.getBlockPoolId();
        final ReplicaInfo r = volumeMap.get(bpid, blockId);
        return (r != null && r.blockDataExists());
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 22 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method recoverRbw.

// FsDatasetSpi
@Override
public ReplicaHandler recoverRbw(ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
    LOG.info("Recover RBW replica " + b);
    while (true) {
        try {
            try (AutoCloseableLock lock = datasetLock.acquire()) {
                ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
                // check the replica's state
                if (replicaInfo.getState() != ReplicaState.RBW) {
                    throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
                }
                ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
                if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
                    throw new MustStopExistingWriter(rbw);
                }
                LOG.info("At " + datanode.getDisplayName() + ", Recovering " + rbw);
                return recoverRbwImpl(rbw, b, newGS, minBytesRcvd, maxBytesRcvd);
            }
        } catch (MustStopExistingWriter e) {
            e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Example 23 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method createTemporary.

// FsDatasetSpi
@Override
public ReplicaHandler createTemporary(StorageType storageType, ExtendedBlock b) throws IOException {
    long startTimeMs = Time.monotonicNow();
    long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
    ReplicaInfo lastFoundReplicaInfo = null;
    do {
        try (AutoCloseableLock lock = datasetLock.acquire()) {
            ReplicaInfo currentReplicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
            if (currentReplicaInfo == lastFoundReplicaInfo) {
                if (lastFoundReplicaInfo != null) {
                    invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo });
                }
                FsVolumeReference ref = volumes.getNextVolume(storageType, b.getNumBytes());
                FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
                ReplicaInPipeline newReplicaInfo;
                try {
                    newReplicaInfo = v.createTemporary(b);
                } catch (IOException e) {
                    IOUtils.cleanup(null, ref);
                    throw e;
                }
                volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
                return new ReplicaHandler(newReplicaInfo, ref);
            } else {
                if (!(currentReplicaInfo.getGenerationStamp() < b.getGenerationStamp() && (currentReplicaInfo.getState() == ReplicaState.TEMPORARY || currentReplicaInfo.getState() == ReplicaState.RBW))) {
                    throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + currentReplicaInfo.getState() + " and thus cannot be created.");
                }
                lastFoundReplicaInfo = currentReplicaInfo;
            }
        }
        // Hang too long, just bail out. This is not supposed to happen.
        long writerStopMs = Time.monotonicNow() - startTimeMs;
        if (writerStopMs > writerStopTimeoutMs) {
            LOG.warn("Unable to stop existing writer for block " + b + " after " + writerStopMs + " miniseconds.");
            throw new IOException("Unable to stop existing writer for block " + b + " after " + writerStopMs + " miniseconds.");
        }
        // Stop the previous writer
        ((ReplicaInPipeline) lastFoundReplicaInfo).stopWriter(writerStopTimeoutMs);
    } while (true);
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 24 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class TestWriteToReplica method testReplicaMapAfterDatanodeRestart.

/**
   * This is a test to check the replica map before and after the datanode 
   * quick restart (less than 5 minutes)
   * @throws Exception
   */
@Test
public void testReplicaMapAfterDatanodeRestart() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
    try {
        cluster.waitActive();
        NameNode nn1 = cluster.getNameNode(0);
        NameNode nn2 = cluster.getNameNode(1);
        assertNotNull("cannot create nn1", nn1);
        assertNotNull("cannot create nn2", nn2);
        // check number of volumes in fsdataset
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetImpl dataSet = (FsDatasetImpl) DataNodeTestUtils.getFSDataset(dn);
        List<FsVolumeSpi> volumes = null;
        try (FsDatasetSpi.FsVolumeReferences referredVols = dataSet.getFsVolumeReferences()) {
            // number of volumes should be 2 - [data1, data2]
            assertEquals("number of volumes is wrong", 2, referredVols.size());
            volumes = new ArrayList<>(referredVols.size());
            for (FsVolumeSpi vol : referredVols) {
                volumes.add(vol);
            }
        }
        ArrayList<String> bpList = new ArrayList<>(Arrays.asList(cluster.getNamesystem(0).getBlockPoolId(), cluster.getNamesystem(1).getBlockPoolId()));
        Assert.assertTrue("Cluster should have 2 block pools", bpList.size() == 2);
        createReplicas(bpList, volumes, cluster.getFsDatasetTestUtils(dn));
        ReplicaMap oldReplicaMap = new ReplicaMap(new AutoCloseableLock());
        oldReplicaMap.addAll(dataSet.volumeMap);
        cluster.restartDataNode(0);
        cluster.waitActive();
        dn = cluster.getDataNodes().get(0);
        dataSet = (FsDatasetImpl) dn.getFSDataset();
        testEqualityOfReplicaMap(oldReplicaMap, dataSet.volumeMap, bpList);
    } finally {
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) Test(org.junit.Test)

Example 25 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class TestDirectoryScanner method truncateBlockFile.

/** Truncate a block file */
private long truncateBlockFile() throws IOException {
    try (AutoCloseableLock lock = fds.acquireDatasetLock()) {
        for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
            File f = new File(b.getBlockURI());
            File mf = new File(b.getMetadataURI());
            // Truncate a block file that has a corresponding metadata file
            if (f.exists() && f.length() != 0 && mf.exists()) {
                FileOutputStream s = null;
                FileChannel channel = null;
                try {
                    s = new FileOutputStream(f);
                    channel = s.getChannel();
                    channel.truncate(0);
                    LOG.info("Truncated block file " + f.getAbsolutePath());
                    return b.getBlockId();
                } finally {
                    IOUtils.cleanup(LOG, channel, s);
                }
            }
        }
    }
    return 0;
}
Also used : FileChannel(java.nio.channels.FileChannel) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) FileOutputStream(java.io.FileOutputStream) File(java.io.File)

Aggregations

AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)44 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)27 IOException (java.io.IOException)23 MultipleIOException (org.apache.hadoop.io.MultipleIOException)17 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)10 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)9 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)8 File (java.io.File)7 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)5 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)5 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)4 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)4 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 Block (org.apache.hadoop.hdfs.protocol.Block)3 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)3 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)3