Search in sources :

Example 6 with RandomAccessFile

use of java.io.RandomAccessFile in project hadoop by apache.

the class TestDU method createFile.

private void createFile(File newFile, int size) throws IOException {
    // write random data so that filesystems with compression enabled (e.g., ZFS)
    // can't compress the file
    Random random = new Random();
    byte[] data = new byte[size];
    random.nextBytes(data);
    newFile.createNewFile();
    RandomAccessFile file = new RandomAccessFile(newFile, "rws");
    file.write(data);
    file.getFD().sync();
    file.close();
}
Also used : Random(java.util.Random) RandomAccessFile(java.io.RandomAccessFile)

Example 7 with RandomAccessFile

use of java.io.RandomAccessFile in project hadoop by apache.

the class BlockPoolSlice method validateIntegrityAndSetLength.

/**
   * Find out the number of bytes in the block that match its crc.
   *
   * This algorithm assumes that data corruption caused by unexpected
   * datanode shutdown occurs only in the last crc chunk. So it checks
   * only the last chunk.
   *
   * @param blockFile the block file
   * @param genStamp generation stamp of the block
   * @return the number of valid bytes
   */
private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
    try {
        final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
        long blockFileLen = blockFile.length();
        long metaFileLen = metaFile.length();
        int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
        if (!blockFile.exists() || blockFileLen == 0 || !metaFile.exists() || metaFileLen < crcHeaderLen) {
            return 0;
        }
        try (DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(fileIoProvider.getFileInputStream(volume, metaFile), ioFileBufferSize))) {
            // read and handle the common header here. For now just a version
            final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(checksumIn, metaFile);
            int bytesPerChecksum = checksum.getBytesPerChecksum();
            int checksumSize = checksum.getChecksumSize();
            long numChunks = Math.min((blockFileLen + bytesPerChecksum - 1) / bytesPerChecksum, (metaFileLen - crcHeaderLen) / checksumSize);
            if (numChunks == 0) {
                return 0;
            }
            try (InputStream blockIn = fileIoProvider.getFileInputStream(volume, blockFile);
                ReplicaInputStreams ris = new ReplicaInputStreams(blockIn, checksumIn, volume.obtainReference(), fileIoProvider)) {
                ris.skipChecksumFully((numChunks - 1) * checksumSize);
                long lastChunkStartPos = (numChunks - 1) * bytesPerChecksum;
                ris.skipDataFully(lastChunkStartPos);
                int lastChunkSize = (int) Math.min(bytesPerChecksum, blockFileLen - lastChunkStartPos);
                byte[] buf = new byte[lastChunkSize + checksumSize];
                ris.readChecksumFully(buf, lastChunkSize, checksumSize);
                ris.readDataFully(buf, 0, lastChunkSize);
                checksum.update(buf, 0, lastChunkSize);
                long validFileLength;
                if (checksum.compare(buf, lastChunkSize)) {
                    // last chunk matches crc
                    validFileLength = lastChunkStartPos + lastChunkSize;
                } else {
                    // last chunk is corrupt
                    validFileLength = lastChunkStartPos;
                }
                // truncate if extra bytes are present without CRC
                if (blockFile.length() > validFileLength) {
                    try (RandomAccessFile blockRAF = fileIoProvider.getRandomAccessFile(volume, blockFile, "rw")) {
                        // truncate blockFile
                        blockRAF.setLength(validFileLength);
                    }
                }
                return validFileLength;
            }
        }
    } catch (IOException e) {
        FsDatasetImpl.LOG.warn(e);
        return 0;
    }
}
Also used : ReplicaInputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams) RandomAccessFile(java.io.RandomAccessFile) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) BufferedInputStream(java.io.BufferedInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) DataChecksum(org.apache.hadoop.util.DataChecksum)

Example 8 with RandomAccessFile

use of java.io.RandomAccessFile in project hadoop by apache.

the class FileIoProvider method getRandomAccessFile.

/**
   * Create a RandomAccessFile using
   * {@link RandomAccessFile#RandomAccessFile(File, String)}.
   *
   * Wraps the created input stream to intercept IO calls
   * before delegating to the wrapped RandomAccessFile.
   *
   * @param volume  target volume. null if unavailable.
   * @param f  File object.
   * @param mode  See {@link RandomAccessFile} for a description
   *              of the mode string.
   * @return RandomAccessFile representing the given file.
   * @throws FileNotFoundException
   */
public RandomAccessFile getRandomAccessFile(@Nullable FsVolumeSpi volume, File f, String mode) throws FileNotFoundException {
    final long begin = profilingEventHook.beforeMetadataOp(volume, OPEN);
    RandomAccessFile raf = null;
    try {
        faultInjectorEventHook.beforeMetadataOp(volume, OPEN);
        raf = new WrappedRandomAccessFile(volume, f, mode);
        profilingEventHook.afterMetadataOp(volume, OPEN, begin);
        return raf;
    } catch (Exception e) {
        org.apache.commons.io.IOUtils.closeQuietly(raf);
        onFailure(volume, begin);
        throw e;
    }
}
Also used : RandomAccessFile(java.io.RandomAccessFile) NativeIOException(org.apache.hadoop.io.nativeio.NativeIOException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException)

Example 9 with RandomAccessFile

use of java.io.RandomAccessFile in project hadoop by apache.

the class FsDatasetTestUtil method assertFileLockReleased.

/**
   * Asserts that the storage lock file in the given directory has been
   * released.  This method works by trying to acquire the lock file itself.  If
   * locking fails here, then the main code must have failed to release it.
   *
   * @param dir the storage directory to check
   * @throws IOException if there is an unexpected I/O error
   */
public static void assertFileLockReleased(String dir) throws IOException {
    StorageLocation sl = StorageLocation.parse(dir);
    File lockFile = new File(new File(sl.getUri()), Storage.STORAGE_FILE_LOCK);
    try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
        FileChannel channel = raf.getChannel()) {
        FileLock lock = channel.tryLock();
        assertNotNull(String.format("Lock file at %s appears to be held by a different process.", lockFile.getAbsolutePath()), lock);
        if (lock != null) {
            try {
                lock.release();
            } catch (IOException e) {
                FsDatasetImpl.LOG.warn(String.format("I/O error releasing file lock %s.", lockFile.getAbsolutePath()), e);
                throw e;
            }
        }
    } catch (OverlappingFileLockException e) {
        fail(String.format("Must release lock file at %s.", lockFile.getAbsolutePath()));
    }
}
Also used : RandomAccessFile(java.io.RandomAccessFile) FileChannel(java.nio.channels.FileChannel) FileLock(java.nio.channels.FileLock) IOException(java.io.IOException) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) OverlappingFileLockException(java.nio.channels.OverlappingFileLockException)

Example 10 with RandomAccessFile

use of java.io.RandomAccessFile in project hadoop by apache.

the class TestOfflineImageViewer method testFileDistributionCalculator.

@Test
public void testFileDistributionCalculator() throws IOException {
    ByteArrayOutputStream output = new ByteArrayOutputStream();
    PrintStream o = new PrintStream(output);
    new FileDistributionCalculator(new Configuration(), 0, 0, false, o).visit(new RandomAccessFile(originalFsimage, "r"));
    o.close();
    String outputString = output.toString();
    Pattern p = Pattern.compile("totalFiles = (\\d+)\n");
    Matcher matcher = p.matcher(outputString);
    assertTrue(matcher.find() && matcher.groupCount() == 1);
    int totalFiles = Integer.parseInt(matcher.group(1));
    assertEquals(NUM_DIRS * FILES_PER_DIR + filesECCount + 1, totalFiles);
    p = Pattern.compile("totalDirectories = (\\d+)\n");
    matcher = p.matcher(outputString);
    assertTrue(matcher.find() && matcher.groupCount() == 1);
    int totalDirs = Integer.parseInt(matcher.group(1));
    // totalDirs includes root directory
    assertEquals(dirCount + 1, totalDirs);
    FileStatus maxFile = Collections.max(writtenFiles.values(), new Comparator<FileStatus>() {

        @Override
        public int compare(FileStatus first, FileStatus second) {
            return first.getLen() < second.getLen() ? -1 : ((first.getLen() == second.getLen()) ? 0 : 1);
        }
    });
    p = Pattern.compile("maxFileSize = (\\d+)\n");
    matcher = p.matcher(output.toString("UTF-8"));
    assertTrue(matcher.find() && matcher.groupCount() == 1);
    assertEquals(maxFile.getLen(), Long.parseLong(matcher.group(1)));
}
Also used : PrintStream(java.io.PrintStream) Pattern(java.util.regex.Pattern) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) RandomAccessFile(java.io.RandomAccessFile) Matcher(java.util.regex.Matcher) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Test(org.junit.Test)

Aggregations

RandomAccessFile (java.io.RandomAccessFile)866 IOException (java.io.IOException)425 File (java.io.File)349 FileChannel (java.nio.channels.FileChannel)133 FileNotFoundException (java.io.FileNotFoundException)84 ByteBuffer (java.nio.ByteBuffer)78 Test (org.junit.Test)78 FileLock (java.nio.channels.FileLock)64 EOFException (java.io.EOFException)50 FileOutputStream (java.io.FileOutputStream)47 FileInputStream (java.io.FileInputStream)40 InputStream (java.io.InputStream)36 MappedByteBuffer (java.nio.MappedByteBuffer)33 Random (java.util.Random)26 ByteArrayInputStream (java.io.ByteArrayInputStream)24 BufferedInputStream (java.io.BufferedInputStream)21 DataInputStream (java.io.DataInputStream)19 ByteArrayOutputStream (java.io.ByteArrayOutputStream)17 Configuration (org.apache.hadoop.conf.Configuration)16 AtomicFile (android.util.AtomicFile)12