use of java.io.RandomAccessFile in project hadoop by apache.
the class TestDU method createFile.
private void createFile(File newFile, int size) throws IOException {
// write random data so that filesystems with compression enabled (e.g., ZFS)
// can't compress the file
Random random = new Random();
byte[] data = new byte[size];
random.nextBytes(data);
newFile.createNewFile();
RandomAccessFile file = new RandomAccessFile(newFile, "rws");
file.write(data);
file.getFD().sync();
file.close();
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class BlockPoolSlice method validateIntegrityAndSetLength.
/**
* Find out the number of bytes in the block that match its crc.
*
* This algorithm assumes that data corruption caused by unexpected
* datanode shutdown occurs only in the last crc chunk. So it checks
* only the last chunk.
*
* @param blockFile the block file
* @param genStamp generation stamp of the block
* @return the number of valid bytes
*/
private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
try {
final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
long blockFileLen = blockFile.length();
long metaFileLen = metaFile.length();
int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
if (!blockFile.exists() || blockFileLen == 0 || !metaFile.exists() || metaFileLen < crcHeaderLen) {
return 0;
}
try (DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(fileIoProvider.getFileInputStream(volume, metaFile), ioFileBufferSize))) {
// read and handle the common header here. For now just a version
final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(checksumIn, metaFile);
int bytesPerChecksum = checksum.getBytesPerChecksum();
int checksumSize = checksum.getChecksumSize();
long numChunks = Math.min((blockFileLen + bytesPerChecksum - 1) / bytesPerChecksum, (metaFileLen - crcHeaderLen) / checksumSize);
if (numChunks == 0) {
return 0;
}
try (InputStream blockIn = fileIoProvider.getFileInputStream(volume, blockFile);
ReplicaInputStreams ris = new ReplicaInputStreams(blockIn, checksumIn, volume.obtainReference(), fileIoProvider)) {
ris.skipChecksumFully((numChunks - 1) * checksumSize);
long lastChunkStartPos = (numChunks - 1) * bytesPerChecksum;
ris.skipDataFully(lastChunkStartPos);
int lastChunkSize = (int) Math.min(bytesPerChecksum, blockFileLen - lastChunkStartPos);
byte[] buf = new byte[lastChunkSize + checksumSize];
ris.readChecksumFully(buf, lastChunkSize, checksumSize);
ris.readDataFully(buf, 0, lastChunkSize);
checksum.update(buf, 0, lastChunkSize);
long validFileLength;
if (checksum.compare(buf, lastChunkSize)) {
// last chunk matches crc
validFileLength = lastChunkStartPos + lastChunkSize;
} else {
// last chunk is corrupt
validFileLength = lastChunkStartPos;
}
// truncate if extra bytes are present without CRC
if (blockFile.length() > validFileLength) {
try (RandomAccessFile blockRAF = fileIoProvider.getRandomAccessFile(volume, blockFile, "rw")) {
// truncate blockFile
blockRAF.setLength(validFileLength);
}
}
return validFileLength;
}
}
} catch (IOException e) {
FsDatasetImpl.LOG.warn(e);
return 0;
}
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class FileIoProvider method getRandomAccessFile.
/**
* Create a RandomAccessFile using
* {@link RandomAccessFile#RandomAccessFile(File, String)}.
*
* Wraps the created input stream to intercept IO calls
* before delegating to the wrapped RandomAccessFile.
*
* @param volume target volume. null if unavailable.
* @param f File object.
* @param mode See {@link RandomAccessFile} for a description
* of the mode string.
* @return RandomAccessFile representing the given file.
* @throws FileNotFoundException
*/
public RandomAccessFile getRandomAccessFile(@Nullable FsVolumeSpi volume, File f, String mode) throws FileNotFoundException {
final long begin = profilingEventHook.beforeMetadataOp(volume, OPEN);
RandomAccessFile raf = null;
try {
faultInjectorEventHook.beforeMetadataOp(volume, OPEN);
raf = new WrappedRandomAccessFile(volume, f, mode);
profilingEventHook.afterMetadataOp(volume, OPEN, begin);
return raf;
} catch (Exception e) {
org.apache.commons.io.IOUtils.closeQuietly(raf);
onFailure(volume, begin);
throw e;
}
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class FsDatasetTestUtil method assertFileLockReleased.
/**
* Asserts that the storage lock file in the given directory has been
* released. This method works by trying to acquire the lock file itself. If
* locking fails here, then the main code must have failed to release it.
*
* @param dir the storage directory to check
* @throws IOException if there is an unexpected I/O error
*/
public static void assertFileLockReleased(String dir) throws IOException {
StorageLocation sl = StorageLocation.parse(dir);
File lockFile = new File(new File(sl.getUri()), Storage.STORAGE_FILE_LOCK);
try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
FileChannel channel = raf.getChannel()) {
FileLock lock = channel.tryLock();
assertNotNull(String.format("Lock file at %s appears to be held by a different process.", lockFile.getAbsolutePath()), lock);
if (lock != null) {
try {
lock.release();
} catch (IOException e) {
FsDatasetImpl.LOG.warn(String.format("I/O error releasing file lock %s.", lockFile.getAbsolutePath()), e);
throw e;
}
}
} catch (OverlappingFileLockException e) {
fail(String.format("Must release lock file at %s.", lockFile.getAbsolutePath()));
}
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class TestOfflineImageViewer method testFileDistributionCalculator.
@Test
public void testFileDistributionCalculator() throws IOException {
ByteArrayOutputStream output = new ByteArrayOutputStream();
PrintStream o = new PrintStream(output);
new FileDistributionCalculator(new Configuration(), 0, 0, false, o).visit(new RandomAccessFile(originalFsimage, "r"));
o.close();
String outputString = output.toString();
Pattern p = Pattern.compile("totalFiles = (\\d+)\n");
Matcher matcher = p.matcher(outputString);
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalFiles = Integer.parseInt(matcher.group(1));
assertEquals(NUM_DIRS * FILES_PER_DIR + filesECCount + 1, totalFiles);
p = Pattern.compile("totalDirectories = (\\d+)\n");
matcher = p.matcher(outputString);
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalDirs = Integer.parseInt(matcher.group(1));
// totalDirs includes root directory
assertEquals(dirCount + 1, totalDirs);
FileStatus maxFile = Collections.max(writtenFiles.values(), new Comparator<FileStatus>() {
@Override
public int compare(FileStatus first, FileStatus second) {
return first.getLen() < second.getLen() ? -1 : ((first.getLen() == second.getLen()) ? 0 : 1);
}
});
p = Pattern.compile("maxFileSize = (\\d+)\n");
matcher = p.matcher(output.toString("UTF-8"));
assertTrue(matcher.find() && matcher.groupCount() == 1);
assertEquals(maxFile.getLen(), Long.parseLong(matcher.group(1)));
}
Aggregations