Search in sources :

Example 11 with RandomAccessFile

use of java.io.RandomAccessFile in project hadoop by apache.

the class TestOfflineImageViewer method testTruncatedFSImage.

@Test(expected = IOException.class)
public void testTruncatedFSImage() throws IOException {
    File truncatedFile = new File(tempDir, "truncatedFsImage");
    PrintStream output = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
    copyPartOfFile(originalFsimage, truncatedFile);
    new FileDistributionCalculator(new Configuration(), 0, 0, false, output).visit(new RandomAccessFile(truncatedFile, "r"));
}
Also used : PrintStream(java.io.PrintStream) Configuration(org.apache.hadoop.conf.Configuration) RandomAccessFile(java.io.RandomAccessFile) RandomAccessFile(java.io.RandomAccessFile) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) File(java.io.File) Test(org.junit.Test)

Example 12 with RandomAccessFile

use of java.io.RandomAccessFile in project hadoop by apache.

the class TestOfflineImageViewerForAcl method testPBDelimitedWriterForAcl.

@Test
public void testPBDelimitedWriterForAcl() throws Exception {
    final String DELIMITER = "\t";
    ByteArrayOutputStream output = new ByteArrayOutputStream();
    try (PrintStream o = new PrintStream(output)) {
        PBImageDelimitedTextWriter v = // run in memory.
        new PBImageDelimitedTextWriter(o, DELIMITER, "");
        v.visit(new RandomAccessFile(originalFsimage, "r"));
    }
    try (ByteArrayInputStream input = new ByteArrayInputStream(output.toByteArray());
        BufferedReader reader = new BufferedReader(new InputStreamReader(input))) {
        String line;
        boolean header = true;
        while ((line = reader.readLine()) != null) {
            String[] fields = line.split(DELIMITER);
            if (!header) {
                String filePath = fields[0];
                String permission = fields[9];
                if (!filePath.equals("/")) {
                    boolean hasAcl = !filePath.toLowerCase().contains("noacl");
                    assertEquals(hasAcl, permission.endsWith("+"));
                }
            }
            header = false;
        }
    }
}
Also used : PrintStream(java.io.PrintStream) RandomAccessFile(java.io.RandomAccessFile) InputStreamReader(java.io.InputStreamReader) ByteArrayInputStream(java.io.ByteArrayInputStream) BufferedReader(java.io.BufferedReader) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Test(org.junit.Test)

Example 13 with RandomAccessFile

use of java.io.RandomAccessFile in project hadoop by apache.

the class FsVolumeImpl method loadLastPartialChunkChecksum.

@Override
public byte[] loadLastPartialChunkChecksum(File blockFile, File metaFile) throws IOException {
    // readHeader closes the temporary FileInputStream.
    DataChecksum dcs;
    try (FileInputStream fis = fileIoProvider.getFileInputStream(this, metaFile)) {
        dcs = BlockMetadataHeader.readHeader(fis).getChecksum();
    }
    final int checksumSize = dcs.getChecksumSize();
    final long onDiskLen = blockFile.length();
    final int bytesPerChecksum = dcs.getBytesPerChecksum();
    if (onDiskLen % bytesPerChecksum == 0) {
        // because it will not be modified.
        return null;
    }
    long offsetInChecksum = BlockMetadataHeader.getHeaderSize() + (onDiskLen / bytesPerChecksum) * checksumSize;
    byte[] lastChecksum = new byte[checksumSize];
    try (RandomAccessFile raf = fileIoProvider.getRandomAccessFile(this, metaFile, "r")) {
        raf.seek(offsetInChecksum);
        int readBytes = raf.read(lastChecksum, 0, checksumSize);
        if (readBytes == -1) {
            throw new IOException("Expected to read " + checksumSize + " bytes from offset " + offsetInChecksum + " but reached end of file.");
        } else if (readBytes != checksumSize) {
            throw new IOException("Expected to read " + checksumSize + " bytes from offset " + offsetInChecksum + " but read " + readBytes + " bytes.");
        }
    }
    return lastChecksum;
}
Also used : RandomAccessFile(java.io.RandomAccessFile) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) DataChecksum(org.apache.hadoop.util.DataChecksum)

Example 14 with RandomAccessFile

use of java.io.RandomAccessFile in project hadoop by apache.

the class TestFsck method testCorruptBlock.

@Test
public void testCorruptBlock() throws Exception {
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    FileSystem fs = null;
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;
    short factor = 1;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    Path file1 = new Path("/testCorruptBlock");
    DFSTestUtil.createFile(fs, file1, 1024, factor, 0);
    // Wait until file replication has completed
    DFSTestUtil.waitReplication(fs, file1, factor);
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
    // Make sure filesystem is in healthy state
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // corrupt replicas
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
        FileChannel channel = raFile.getChannel();
        String badString = "BADBAD";
        int rand = random.nextInt((int) channel.size() / 2);
        raFile.seek(rand);
        raFile.write(badString.getBytes());
        raFile.close();
    }
    // Read the file to trigger reportBadBlocks
    try {
        IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true);
    } catch (IOException ie) {
        assertTrue(ie instanceof ChecksumException);
    }
    dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
    while (replicaCount != factor) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException ignore) {
        }
        blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        replicaCount = blocks.get(0).getLocations().length;
    }
    assertTrue(blocks.get(0).isCorrupt());
    // Check if fsck reports the same
    outStr = runFsck(conf, 1, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
    assertTrue(outStr.contains("testCorruptBlock"));
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileChannel(java.nio.channels.FileChannel) ChecksumException(org.apache.hadoop.fs.ChecksumException) InetSocketAddress(java.net.InetSocketAddress) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) IOUtils(org.apache.hadoop.io.IOUtils) Random(java.util.Random) RandomAccessFile(java.io.RandomAccessFile) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 15 with RandomAccessFile

use of java.io.RandomAccessFile in project hadoop by apache.

the class TestIOUtils method testWriteFully.

@Test
public void testWriteFully() throws IOException {
    final int INPUT_BUFFER_LEN = 10000;
    final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
    byte[] input = new byte[INPUT_BUFFER_LEN];
    for (int i = 0; i < input.length; i++) {
        input[i] = (byte) (i & 0xff);
    }
    byte[] output = new byte[input.length];
    try {
        RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
        FileChannel fc = raf.getChannel();
        ByteBuffer buf = ByteBuffer.wrap(input);
        IOUtils.writeFully(fc, buf);
        raf.seek(0);
        raf.read(output);
        for (int i = 0; i < input.length; i++) {
            assertEquals(input[i], output[i]);
        }
        buf.rewind();
        IOUtils.writeFully(fc, buf, HALFWAY);
        for (int i = 0; i < HALFWAY; i++) {
            assertEquals(input[i], output[i]);
        }
        raf.seek(0);
        raf.read(output);
        for (int i = HALFWAY; i < input.length; i++) {
            assertEquals(input[i - HALFWAY], output[i]);
        }
        raf.close();
    } finally {
        File f = new File(TEST_FILE_NAME);
        if (f.exists()) {
            f.delete();
        }
    }
}
Also used : RandomAccessFile(java.io.RandomAccessFile) FileChannel(java.nio.channels.FileChannel) ByteBuffer(java.nio.ByteBuffer) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Aggregations

RandomAccessFile (java.io.RandomAccessFile)866 IOException (java.io.IOException)425 File (java.io.File)349 FileChannel (java.nio.channels.FileChannel)133 FileNotFoundException (java.io.FileNotFoundException)84 ByteBuffer (java.nio.ByteBuffer)78 Test (org.junit.Test)78 FileLock (java.nio.channels.FileLock)64 EOFException (java.io.EOFException)50 FileOutputStream (java.io.FileOutputStream)47 FileInputStream (java.io.FileInputStream)40 InputStream (java.io.InputStream)36 MappedByteBuffer (java.nio.MappedByteBuffer)33 Random (java.util.Random)26 ByteArrayInputStream (java.io.ByteArrayInputStream)24 BufferedInputStream (java.io.BufferedInputStream)21 DataInputStream (java.io.DataInputStream)19 ByteArrayOutputStream (java.io.ByteArrayOutputStream)17 Configuration (org.apache.hadoop.conf.Configuration)16 AtomicFile (android.util.AtomicFile)12