use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestDatanodeDeath method checkFile.
//
// verify that the data written are sane
//
private static void checkFile(FileSystem fileSys, Path name, int repl, int numblocks, int filesize, long seed) throws IOException {
boolean done = false;
int attempt = 0;
long len = fileSys.getFileStatus(name).getLen();
assertTrue(name + " should be of size " + filesize + " but found to be of size " + len, len == filesize);
// wait till all full blocks are confirmed by the datanodes.
while (!done) {
attempt++;
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
done = true;
BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, filesize);
if (locations.length < numblocks) {
if (attempt > 100) {
System.out.println("File " + name + " has only " + locations.length + " blocks, " + " but is expected to have " + numblocks + " blocks.");
}
done = false;
continue;
}
for (int idx = 0; idx < locations.length; idx++) {
if (locations[idx].getHosts().length < repl) {
if (attempt > 100) {
System.out.println("File " + name + " has " + locations.length + " blocks: " + " The " + idx + " block has only " + locations[idx].getHosts().length + " replicas but is expected to have " + repl + " replicas.");
}
done = false;
break;
}
}
}
FSDataInputStream stm = fileSys.open(name);
final byte[] expected = AppendTestUtil.randomBytes(seed, fileSize);
// do a sanity check. Read the file
byte[] actual = new byte[filesize];
stm.readFully(0, actual);
checkData(actual, 0, expected, "Read 1");
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestExternalBlockReader method testMisconfiguredExternalBlockReader.
@Test
public void testMisconfiguredExternalBlockReader() throws Exception {
Configuration conf = new Configuration();
conf.set(HdfsClientConfigKeys.REPLICA_ACCESSOR_BUILDER_CLASSES_KEY, "org.apache.hadoop.hdfs.NonExistentReplicaAccessorBuilderClass");
conf.setLong(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
final int TEST_LENGTH = 2048;
DistributedFileSystem dfs = cluster.getFileSystem();
try {
DFSTestUtil.createFile(dfs, new Path("/a"), TEST_LENGTH, (short) 1, SEED);
FSDataInputStream stream = dfs.open(new Path("/a"));
byte[] buf = new byte[TEST_LENGTH];
IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_LENGTH);
Assert.assertArrayEquals(expected, buf);
stream.close();
} finally {
dfs.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestBatchIbr method verifyFile.
static boolean verifyFile(Path f, DistributedFileSystem dfs) {
final long seed;
final int numBlocks;
{
final String name = f.getName();
final int i = name.indexOf('_');
seed = Long.parseLong(name.substring(0, i));
numBlocks = Integer.parseInt(name.substring(i + 1));
}
final byte[] computed = IO_BUF.get();
final byte[] expected = VERIFY_BUF.get();
try (FSDataInputStream in = dfs.open(f)) {
for (int i = 0; i < numBlocks; i++) {
in.read(computed);
nextBytes(i, seed, expected);
Assert.assertArrayEquals(expected, computed);
}
return true;
} catch (Exception e) {
LOG.error("Failed to verify file " + f);
return false;
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class MRAppMaster method readJustAMInfos.
private List<AMInfo> readJustAMInfos() {
List<AMInfo> amInfos = new ArrayList<AMInfo>();
FSDataInputStream inputStream = null;
try {
inputStream = getPreviousJobHistoryStream(getConfig(), appAttemptID);
EventReader jobHistoryEventReader = new EventReader(inputStream);
// All AMInfos are contiguous. Track when the first AMStartedEvent
// appears.
boolean amStartedEventsBegan = false;
HistoryEvent event;
while ((event = jobHistoryEventReader.getNextEvent()) != null) {
if (event.getEventType() == EventType.AM_STARTED) {
if (!amStartedEventsBegan) {
// First AMStartedEvent.
amStartedEventsBegan = true;
}
AMStartedEvent amStartedEvent = (AMStartedEvent) event;
amInfos.add(MRBuilderUtils.newAMInfo(amStartedEvent.getAppAttemptId(), amStartedEvent.getStartTime(), amStartedEvent.getContainerId(), StringInterner.weakIntern(amStartedEvent.getNodeManagerHost()), amStartedEvent.getNodeManagerPort(), amStartedEvent.getNodeManagerHttpPort()));
} else if (amStartedEventsBegan) {
// No need to continue reading all the other events.
break;
}
}
} catch (IOException e) {
LOG.warn("Could not parse the old history file. " + "Will not have old AMinfos ", e);
} finally {
if (inputStream != null) {
IOUtils.closeQuietly(inputStream);
}
}
return amInfos;
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestTracing method readTestFile.
private void readTestFile(String testFileName) throws Exception {
Path filePath = new Path(testFileName);
FSDataInputStream istream = dfs.open(filePath, 10240);
ByteBuffer buf = ByteBuffer.allocate(10240);
int count = 0;
try {
while (istream.read(buf) > 0) {
count += 1;
buf.clear();
istream.seek(istream.getPos() + 5);
}
} catch (IOException ioe) {
// Ignore this it's probably a seek after eof.
} finally {
istream.close();
}
}
Aggregations