Search in sources :

Example 66 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class TestDatanodeDeath method checkFile.

//
// verify that the data written are sane
// 
private static void checkFile(FileSystem fileSys, Path name, int repl, int numblocks, int filesize, long seed) throws IOException {
    boolean done = false;
    int attempt = 0;
    long len = fileSys.getFileStatus(name).getLen();
    assertTrue(name + " should be of size " + filesize + " but found to be of size " + len, len == filesize);
    // wait till all full blocks are confirmed by the datanodes.
    while (!done) {
        attempt++;
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        }
        done = true;
        BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, filesize);
        if (locations.length < numblocks) {
            if (attempt > 100) {
                System.out.println("File " + name + " has only " + locations.length + " blocks, " + " but is expected to have " + numblocks + " blocks.");
            }
            done = false;
            continue;
        }
        for (int idx = 0; idx < locations.length; idx++) {
            if (locations[idx].getHosts().length < repl) {
                if (attempt > 100) {
                    System.out.println("File " + name + " has " + locations.length + " blocks: " + " The " + idx + " block has only " + locations[idx].getHosts().length + " replicas but is expected to have " + repl + " replicas.");
                }
                done = false;
                break;
            }
        }
    }
    FSDataInputStream stm = fileSys.open(name);
    final byte[] expected = AppendTestUtil.randomBytes(seed, fileSize);
    // do a sanity check. Read the file
    byte[] actual = new byte[filesize];
    stm.readFully(0, actual);
    checkData(actual, 0, expected, "Read 1");
}
Also used : FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) BlockLocation(org.apache.hadoop.fs.BlockLocation)

Example 67 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class TestExternalBlockReader method testMisconfiguredExternalBlockReader.

@Test
public void testMisconfiguredExternalBlockReader() throws Exception {
    Configuration conf = new Configuration();
    conf.set(HdfsClientConfigKeys.REPLICA_ACCESSOR_BUILDER_CLASSES_KEY, "org.apache.hadoop.hdfs.NonExistentReplicaAccessorBuilderClass");
    conf.setLong(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    final int TEST_LENGTH = 2048;
    DistributedFileSystem dfs = cluster.getFileSystem();
    try {
        DFSTestUtil.createFile(dfs, new Path("/a"), TEST_LENGTH, (short) 1, SEED);
        FSDataInputStream stream = dfs.open(new Path("/a"));
        byte[] buf = new byte[TEST_LENGTH];
        IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
        byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_LENGTH);
        Assert.assertArrayEquals(expected, buf);
        stream.close();
    } finally {
        dfs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Test(org.junit.Test)

Example 68 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class TestBatchIbr method verifyFile.

static boolean verifyFile(Path f, DistributedFileSystem dfs) {
    final long seed;
    final int numBlocks;
    {
        final String name = f.getName();
        final int i = name.indexOf('_');
        seed = Long.parseLong(name.substring(0, i));
        numBlocks = Integer.parseInt(name.substring(i + 1));
    }
    final byte[] computed = IO_BUF.get();
    final byte[] expected = VERIFY_BUF.get();
    try (FSDataInputStream in = dfs.open(f)) {
        for (int i = 0; i < numBlocks; i++) {
            in.read(computed);
            nextBytes(i, seed, expected);
            Assert.assertArrayEquals(expected, computed);
        }
        return true;
    } catch (Exception e) {
        LOG.error("Failed to verify file " + f);
        return false;
    }
}
Also used : FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) IOException(java.io.IOException)

Example 69 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class MRAppMaster method readJustAMInfos.

private List<AMInfo> readJustAMInfos() {
    List<AMInfo> amInfos = new ArrayList<AMInfo>();
    FSDataInputStream inputStream = null;
    try {
        inputStream = getPreviousJobHistoryStream(getConfig(), appAttemptID);
        EventReader jobHistoryEventReader = new EventReader(inputStream);
        // All AMInfos are contiguous. Track when the first AMStartedEvent
        // appears.
        boolean amStartedEventsBegan = false;
        HistoryEvent event;
        while ((event = jobHistoryEventReader.getNextEvent()) != null) {
            if (event.getEventType() == EventType.AM_STARTED) {
                if (!amStartedEventsBegan) {
                    // First AMStartedEvent.
                    amStartedEventsBegan = true;
                }
                AMStartedEvent amStartedEvent = (AMStartedEvent) event;
                amInfos.add(MRBuilderUtils.newAMInfo(amStartedEvent.getAppAttemptId(), amStartedEvent.getStartTime(), amStartedEvent.getContainerId(), StringInterner.weakIntern(amStartedEvent.getNodeManagerHost()), amStartedEvent.getNodeManagerPort(), amStartedEvent.getNodeManagerHttpPort()));
            } else if (amStartedEventsBegan) {
                // No need to continue reading all the other events.
                break;
            }
        }
    } catch (IOException e) {
        LOG.warn("Could not parse the old history file. " + "Will not have old AMinfos ", e);
    } finally {
        if (inputStream != null) {
            IOUtils.closeQuietly(inputStream);
        }
    }
    return amInfos;
}
Also used : AMInfo(org.apache.hadoop.mapreduce.v2.api.records.AMInfo) AMStartedEvent(org.apache.hadoop.mapreduce.jobhistory.AMStartedEvent) EventReader(org.apache.hadoop.mapreduce.jobhistory.EventReader) ArrayList(java.util.ArrayList) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) IOException(java.io.IOException) JobHistoryEvent(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent) HistoryEvent(org.apache.hadoop.mapreduce.jobhistory.HistoryEvent)

Example 70 with FSDataInputStream

use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.

the class TestTracing method readTestFile.

private void readTestFile(String testFileName) throws Exception {
    Path filePath = new Path(testFileName);
    FSDataInputStream istream = dfs.open(filePath, 10240);
    ByteBuffer buf = ByteBuffer.allocate(10240);
    int count = 0;
    try {
        while (istream.read(buf) > 0) {
            count += 1;
            buf.clear();
            istream.seek(istream.getPos() + 5);
        }
    } catch (IOException ioe) {
    // Ignore this it's probably a seek after eof.
    } finally {
        istream.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer)

Aggregations

FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)431 Path (org.apache.hadoop.fs.Path)271 FileSystem (org.apache.hadoop.fs.FileSystem)143 Test (org.junit.Test)135 IOException (java.io.IOException)125 Configuration (org.apache.hadoop.conf.Configuration)94 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)93 FileStatus (org.apache.hadoop.fs.FileStatus)62 InputStreamReader (java.io.InputStreamReader)37 BufferedReader (java.io.BufferedReader)36 FileNotFoundException (java.io.FileNotFoundException)26 IgfsPath (org.apache.ignite.igfs.IgfsPath)26 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)21 ArrayList (java.util.ArrayList)20 Random (java.util.Random)19 EOFException (java.io.EOFException)18 HashMap (java.util.HashMap)16 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)15 URI (java.net.URI)14 File (java.io.File)13