use of java.io.FileNotFoundException in project hadoop by apache.
the class AbstractContractGetFileStatusTest method testListFilesNoDir.
@Test
public void testListFilesNoDir() throws Throwable {
describe("test the listFiles calls on a path which is not present");
Path path = path("missing");
try {
RemoteIterator<LocatedFileStatus> iterator = getFileSystem().listFiles(path, false);
fail("Expected an exception, got an iterator: " + iterator);
} catch (FileNotFoundException expected) {
// expected
}
try {
RemoteIterator<LocatedFileStatus> iterator = getFileSystem().listFiles(path, true);
fail("Expected an exception, got an iterator: " + iterator);
} catch (FileNotFoundException expected) {
// expected
}
}
use of java.io.FileNotFoundException in project hadoop by apache.
the class VolumeScanner method scanBlock.
/**
* Scan a block.
*
* @param cblock The block to scan.
* @param bytesPerSec The bytes per second to scan at.
*
* @return The length of the block that was scanned, or
* -1 if the block could not be scanned.
*/
private long scanBlock(ExtendedBlock cblock, long bytesPerSec) {
// 'cblock' has a valid blockId and block pool id, but we don't yet know the
// genstamp the block is supposed to have. Ask the FsDatasetImpl for this
// information.
ExtendedBlock block = null;
try {
Block b = volume.getDataset().getStoredBlock(cblock.getBlockPoolId(), cblock.getBlockId());
if (b == null) {
LOG.info("Replica {} was not found in the VolumeMap for volume {}", cblock, volume);
} else {
block = new ExtendedBlock(cblock.getBlockPoolId(), b);
}
} catch (FileNotFoundException e) {
LOG.info("FileNotFoundException while finding block {} on volume {}", cblock, volume);
} catch (IOException e) {
LOG.warn("I/O error while finding block {} on volume {}", cblock, volume);
}
if (block == null) {
// block not found.
return -1;
}
LOG.debug("start scanning block {}", block);
BlockSender blockSender = null;
try {
blockSender = new BlockSender(block, 0, -1, false, true, true, datanode, null, CachingStrategy.newDropBehind());
throttler.setBandwidth(bytesPerSec);
long bytesRead = blockSender.sendBlock(nullStream, null, throttler);
resultHandler.handle(block, null);
return bytesRead;
} catch (IOException e) {
resultHandler.handle(block, e);
} finally {
IOUtils.cleanup(null, blockSender);
}
return -1;
}
use of java.io.FileNotFoundException in project hadoop by apache.
the class VolumeScanner method enableBlockPoolId.
/**
* Allow the scanner to scan the given block pool.
*
* @param bpid The block pool id.
*/
public synchronized void enableBlockPoolId(String bpid) {
for (BlockIterator iter : blockIters) {
if (iter.getBlockPoolId().equals(bpid)) {
LOG.warn("{}: already enabled scanning on block pool {}", this, bpid);
return;
}
}
BlockIterator iter = null;
try {
// Load a block iterator for the next block pool on the volume.
iter = volume.loadBlockIterator(bpid, BLOCK_ITERATOR_NAME);
LOG.trace("{}: loaded block iterator for {}.", this, bpid);
} catch (FileNotFoundException e) {
LOG.debug("{}: failed to load block iterator: " + e.getMessage(), this);
} catch (IOException e) {
LOG.warn("{}: failed to load block iterator.", this, e);
}
if (iter == null) {
iter = volume.newBlockIterator(bpid, BLOCK_ITERATOR_NAME);
LOG.trace("{}: created new block iterator for {}.", this, bpid);
}
iter.setMaxStalenessMs(conf.maxStalenessMs);
blockIters.add(iter);
notify();
}
use of java.io.FileNotFoundException in project hadoop by apache.
the class BlockPoolSlice method addReplicaToReplicasMap.
private void addReplicaToReplicasMap(Block block, ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap, boolean isFinalized) throws IOException {
ReplicaInfo newReplica = null;
long blockId = block.getBlockId();
long genStamp = block.getGenerationStamp();
if (isFinalized) {
newReplica = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(block.getNumBytes()).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(DatanodeUtil.idToBlockDir(finalizedDir, blockId)).build();
} else {
File file = new File(rbwDir, block.getBlockName());
boolean loadRwr = true;
File restartMeta = new File(file.getParent() + File.pathSeparator + "." + file.getName() + ".restart");
Scanner sc = null;
try {
sc = new Scanner(restartMeta, "UTF-8");
// The restart meta file exists
if (sc.hasNextLong() && (sc.nextLong() > timer.now())) {
// It didn't expire. Load the replica as a RBW.
// We don't know the expected block length, so just use 0
// and don't reserve any more space for writes.
newReplica = new ReplicaBuilder(ReplicaState.RBW).setBlockId(blockId).setLength(validateIntegrityAndSetLength(file, genStamp)).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(file.getParentFile()).setWriterThread(null).setBytesToReserve(0).build();
loadRwr = false;
}
sc.close();
if (!fileIoProvider.delete(volume, restartMeta)) {
FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " + restartMeta.getPath());
}
} catch (FileNotFoundException fnfe) {
// nothing to do hereFile dir =
} finally {
if (sc != null) {
sc.close();
}
}
// Restart meta doesn't exist or expired.
if (loadRwr) {
ReplicaBuilder builder = new ReplicaBuilder(ReplicaState.RWR).setBlockId(blockId).setLength(validateIntegrityAndSetLength(file, genStamp)).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(file.getParentFile());
newReplica = builder.build();
}
}
ReplicaInfo oldReplica = volumeMap.get(bpid, newReplica.getBlockId());
if (oldReplica == null) {
volumeMap.add(bpid, newReplica);
} else {
// We have multiple replicas of the same block so decide which one
// to keep.
newReplica = resolveDuplicateReplicas(newReplica, oldReplica, volumeMap);
}
// eventually.
if (newReplica.getVolume().isTransientStorage()) {
lazyWriteReplicaMap.addReplica(bpid, blockId, (FsVolumeImpl) newReplica.getVolume(), 0);
} else {
lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
}
if (oldReplica == null) {
incrNumBlocks();
}
}
use of java.io.FileNotFoundException in project hadoop by apache.
the class FileIoProvider method getFileOutputStream.
/**
* Create a FileOutputStream using
* {@link FileOutputStream#FileOutputStream(File, boolean)}.
*
* Wraps the created output stream to intercept write calls
* before delegating to the wrapped stream.
*
* @param volume target volume. null if unavailable.
* @param f File object.
* @param append if true, then bytes will be written to the end of the
* file rather than the beginning.
* @return FileOutputStream to the given file object.
* @throws FileNotFoundException
*/
public FileOutputStream getFileOutputStream(@Nullable FsVolumeSpi volume, File f, boolean append) throws FileNotFoundException {
final long begin = profilingEventHook.beforeMetadataOp(volume, OPEN);
FileOutputStream fos = null;
try {
faultInjectorEventHook.beforeMetadataOp(volume, OPEN);
fos = new WrappedFileOutputStream(volume, f, append);
profilingEventHook.afterMetadataOp(volume, OPEN, begin);
return fos;
} catch (Exception e) {
org.apache.commons.io.IOUtils.closeQuietly(fos);
onFailure(volume, begin);
throw e;
}
}
Aggregations