Search in sources :

Example 41 with FileNotFoundException

use of java.io.FileNotFoundException in project hadoop by apache.

the class AbstractContractGetFileStatusTest method testListFilesNoDir.

@Test
public void testListFilesNoDir() throws Throwable {
    describe("test the listFiles calls on a path which is not present");
    Path path = path("missing");
    try {
        RemoteIterator<LocatedFileStatus> iterator = getFileSystem().listFiles(path, false);
        fail("Expected an exception, got an iterator: " + iterator);
    } catch (FileNotFoundException expected) {
    // expected
    }
    try {
        RemoteIterator<LocatedFileStatus> iterator = getFileSystem().listFiles(path, true);
        fail("Expected an exception, got an iterator: " + iterator);
    } catch (FileNotFoundException expected) {
    // expected
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileNotFoundException(java.io.FileNotFoundException) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Test(org.junit.Test)

Example 42 with FileNotFoundException

use of java.io.FileNotFoundException in project hadoop by apache.

the class VolumeScanner method scanBlock.

/**
   * Scan a block.
   *
   * @param cblock               The block to scan.
   * @param bytesPerSec          The bytes per second to scan at.
   *
   * @return                     The length of the block that was scanned, or
   *                               -1 if the block could not be scanned.
   */
private long scanBlock(ExtendedBlock cblock, long bytesPerSec) {
    // 'cblock' has a valid blockId and block pool id, but we don't yet know the
    // genstamp the block is supposed to have.  Ask the FsDatasetImpl for this
    // information.
    ExtendedBlock block = null;
    try {
        Block b = volume.getDataset().getStoredBlock(cblock.getBlockPoolId(), cblock.getBlockId());
        if (b == null) {
            LOG.info("Replica {} was not found in the VolumeMap for volume {}", cblock, volume);
        } else {
            block = new ExtendedBlock(cblock.getBlockPoolId(), b);
        }
    } catch (FileNotFoundException e) {
        LOG.info("FileNotFoundException while finding block {} on volume {}", cblock, volume);
    } catch (IOException e) {
        LOG.warn("I/O error while finding block {} on volume {}", cblock, volume);
    }
    if (block == null) {
        // block not found.
        return -1;
    }
    LOG.debug("start scanning block {}", block);
    BlockSender blockSender = null;
    try {
        blockSender = new BlockSender(block, 0, -1, false, true, true, datanode, null, CachingStrategy.newDropBehind());
        throttler.setBandwidth(bytesPerSec);
        long bytesRead = blockSender.sendBlock(nullStream, null, throttler);
        resultHandler.handle(block, null);
        return bytesRead;
    } catch (IOException e) {
        resultHandler.handle(block, e);
    } finally {
        IOUtils.cleanup(null, blockSender);
    }
    return -1;
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FileNotFoundException(java.io.FileNotFoundException) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException)

Example 43 with FileNotFoundException

use of java.io.FileNotFoundException in project hadoop by apache.

the class VolumeScanner method enableBlockPoolId.

/**
   * Allow the scanner to scan the given block pool.
   *
   * @param bpid       The block pool id.
   */
public synchronized void enableBlockPoolId(String bpid) {
    for (BlockIterator iter : blockIters) {
        if (iter.getBlockPoolId().equals(bpid)) {
            LOG.warn("{}: already enabled scanning on block pool {}", this, bpid);
            return;
        }
    }
    BlockIterator iter = null;
    try {
        // Load a block iterator for the next block pool on the volume.
        iter = volume.loadBlockIterator(bpid, BLOCK_ITERATOR_NAME);
        LOG.trace("{}: loaded block iterator for {}.", this, bpid);
    } catch (FileNotFoundException e) {
        LOG.debug("{}: failed to load block iterator: " + e.getMessage(), this);
    } catch (IOException e) {
        LOG.warn("{}: failed to load block iterator.", this, e);
    }
    if (iter == null) {
        iter = volume.newBlockIterator(bpid, BLOCK_ITERATOR_NAME);
        LOG.trace("{}: created new block iterator for {}.", this, bpid);
    }
    iter.setMaxStalenessMs(conf.maxStalenessMs);
    blockIters.add(iter);
    notify();
}
Also used : BlockIterator(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException)

Example 44 with FileNotFoundException

use of java.io.FileNotFoundException in project hadoop by apache.

the class BlockPoolSlice method addReplicaToReplicasMap.

private void addReplicaToReplicasMap(Block block, ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap, boolean isFinalized) throws IOException {
    ReplicaInfo newReplica = null;
    long blockId = block.getBlockId();
    long genStamp = block.getGenerationStamp();
    if (isFinalized) {
        newReplica = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(block.getNumBytes()).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(DatanodeUtil.idToBlockDir(finalizedDir, blockId)).build();
    } else {
        File file = new File(rbwDir, block.getBlockName());
        boolean loadRwr = true;
        File restartMeta = new File(file.getParent() + File.pathSeparator + "." + file.getName() + ".restart");
        Scanner sc = null;
        try {
            sc = new Scanner(restartMeta, "UTF-8");
            // The restart meta file exists
            if (sc.hasNextLong() && (sc.nextLong() > timer.now())) {
                // It didn't expire. Load the replica as a RBW.
                // We don't know the expected block length, so just use 0
                // and don't reserve any more space for writes.
                newReplica = new ReplicaBuilder(ReplicaState.RBW).setBlockId(blockId).setLength(validateIntegrityAndSetLength(file, genStamp)).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(file.getParentFile()).setWriterThread(null).setBytesToReserve(0).build();
                loadRwr = false;
            }
            sc.close();
            if (!fileIoProvider.delete(volume, restartMeta)) {
                FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " + restartMeta.getPath());
            }
        } catch (FileNotFoundException fnfe) {
        // nothing to do hereFile dir =
        } finally {
            if (sc != null) {
                sc.close();
            }
        }
        // Restart meta doesn't exist or expired.
        if (loadRwr) {
            ReplicaBuilder builder = new ReplicaBuilder(ReplicaState.RWR).setBlockId(blockId).setLength(validateIntegrityAndSetLength(file, genStamp)).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(file.getParentFile());
            newReplica = builder.build();
        }
    }
    ReplicaInfo oldReplica = volumeMap.get(bpid, newReplica.getBlockId());
    if (oldReplica == null) {
        volumeMap.add(bpid, newReplica);
    } else {
        // We have multiple replicas of the same block so decide which one
        // to keep.
        newReplica = resolveDuplicateReplicas(newReplica, oldReplica, volumeMap);
    }
    // eventually.
    if (newReplica.getVolume().isTransientStorage()) {
        lazyWriteReplicaMap.addReplica(bpid, blockId, (FsVolumeImpl) newReplica.getVolume(), 0);
    } else {
        lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
    }
    if (oldReplica == null) {
        incrNumBlocks();
    }
}
Also used : Scanner(java.util.Scanner) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaBuilder(org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder) FileNotFoundException(java.io.FileNotFoundException) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 45 with FileNotFoundException

use of java.io.FileNotFoundException in project hadoop by apache.

the class FileIoProvider method getFileOutputStream.

/**
   * Create a FileOutputStream using
   * {@link FileOutputStream#FileOutputStream(File, boolean)}.
   *
   * Wraps the created output stream to intercept write calls
   * before delegating to the wrapped stream.
   *
   * @param volume  target volume. null if unavailable.
   * @param f  File object.
   * @param append  if true, then bytes will be written to the end of the
   *                file rather than the beginning.
   * @return  FileOutputStream to the given file object.
   * @throws FileNotFoundException
   */
public FileOutputStream getFileOutputStream(@Nullable FsVolumeSpi volume, File f, boolean append) throws FileNotFoundException {
    final long begin = profilingEventHook.beforeMetadataOp(volume, OPEN);
    FileOutputStream fos = null;
    try {
        faultInjectorEventHook.beforeMetadataOp(volume, OPEN);
        fos = new WrappedFileOutputStream(volume, f, append);
        profilingEventHook.afterMetadataOp(volume, OPEN, begin);
        return fos;
    } catch (Exception e) {
        org.apache.commons.io.IOUtils.closeQuietly(fos);
        onFailure(volume, begin);
        throw e;
    }
}
Also used : FileOutputStream(java.io.FileOutputStream) NativeIOException(org.apache.hadoop.io.nativeio.NativeIOException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException)

Aggregations

FileNotFoundException (java.io.FileNotFoundException)3218 IOException (java.io.IOException)1836 File (java.io.File)1277 FileInputStream (java.io.FileInputStream)814 FileOutputStream (java.io.FileOutputStream)492 InputStream (java.io.InputStream)466 BufferedReader (java.io.BufferedReader)262 FileReader (java.io.FileReader)230 ArrayList (java.util.ArrayList)205 Path (org.apache.hadoop.fs.Path)202 XmlPullParserException (org.xmlpull.v1.XmlPullParserException)189 InputStreamReader (java.io.InputStreamReader)171 Test (org.junit.Test)171 XmlPullParser (org.xmlpull.v1.XmlPullParser)166 BufferedInputStream (java.io.BufferedInputStream)138 ParcelFileDescriptor (android.os.ParcelFileDescriptor)131 Properties (java.util.Properties)120 URL (java.net.URL)119 FileStatus (org.apache.hadoop.fs.FileStatus)119 RandomAccessFile (java.io.RandomAccessFile)101