use of java.io.RandomAccessFile in project hadoop by apache.
the class TestOfflineImageViewer method testTruncatedFSImage.
@Test(expected = IOException.class)
public void testTruncatedFSImage() throws IOException {
File truncatedFile = new File(tempDir, "truncatedFsImage");
PrintStream output = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
copyPartOfFile(originalFsimage, truncatedFile);
new FileDistributionCalculator(new Configuration(), 0, 0, false, output).visit(new RandomAccessFile(truncatedFile, "r"));
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class TestOfflineImageViewerForAcl method testPBDelimitedWriterForAcl.
@Test
public void testPBDelimitedWriterForAcl() throws Exception {
final String DELIMITER = "\t";
ByteArrayOutputStream output = new ByteArrayOutputStream();
try (PrintStream o = new PrintStream(output)) {
PBImageDelimitedTextWriter v = // run in memory.
new PBImageDelimitedTextWriter(o, DELIMITER, "");
v.visit(new RandomAccessFile(originalFsimage, "r"));
}
try (ByteArrayInputStream input = new ByteArrayInputStream(output.toByteArray());
BufferedReader reader = new BufferedReader(new InputStreamReader(input))) {
String line;
boolean header = true;
while ((line = reader.readLine()) != null) {
String[] fields = line.split(DELIMITER);
if (!header) {
String filePath = fields[0];
String permission = fields[9];
if (!filePath.equals("/")) {
boolean hasAcl = !filePath.toLowerCase().contains("noacl");
assertEquals(hasAcl, permission.endsWith("+"));
}
}
header = false;
}
}
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class FsVolumeImpl method loadLastPartialChunkChecksum.
@Override
public byte[] loadLastPartialChunkChecksum(File blockFile, File metaFile) throws IOException {
// readHeader closes the temporary FileInputStream.
DataChecksum dcs;
try (FileInputStream fis = fileIoProvider.getFileInputStream(this, metaFile)) {
dcs = BlockMetadataHeader.readHeader(fis).getChecksum();
}
final int checksumSize = dcs.getChecksumSize();
final long onDiskLen = blockFile.length();
final int bytesPerChecksum = dcs.getBytesPerChecksum();
if (onDiskLen % bytesPerChecksum == 0) {
// because it will not be modified.
return null;
}
long offsetInChecksum = BlockMetadataHeader.getHeaderSize() + (onDiskLen / bytesPerChecksum) * checksumSize;
byte[] lastChecksum = new byte[checksumSize];
try (RandomAccessFile raf = fileIoProvider.getRandomAccessFile(this, metaFile, "r")) {
raf.seek(offsetInChecksum);
int readBytes = raf.read(lastChecksum, 0, checksumSize);
if (readBytes == -1) {
throw new IOException("Expected to read " + checksumSize + " bytes from offset " + offsetInChecksum + " but reached end of file.");
} else if (readBytes != checksumSize) {
throw new IOException("Expected to read " + checksumSize + " bytes from offset " + offsetInChecksum + " but read " + readBytes + " bytes.");
}
}
return lastChecksum;
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class TestFsck method testCorruptBlock.
@Test
public void testCorruptBlock() throws Exception {
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
FileSystem fs = null;
DFSClient dfsClient = null;
LocatedBlocks blocks = null;
int replicaCount = 0;
Random random = new Random();
String outStr = null;
short factor = 1;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
Path file1 = new Path("/testCorruptBlock");
DFSTestUtil.createFile(fs, file1, 1024, factor, 0);
// Wait until file replication has completed
DFSTestUtil.waitReplication(fs, file1, factor);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
// Make sure filesystem is in healthy state
outStr = runFsck(conf, 0, true, "/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// corrupt replicas
File blockFile = cluster.getBlockFile(0, block);
if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
FileChannel channel = raFile.getChannel();
String badString = "BADBAD";
int rand = random.nextInt((int) channel.size() / 2);
raFile.seek(rand);
raFile.write(badString.getBytes());
raFile.close();
}
// Read the file to trigger reportBadBlocks
try {
IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true);
} catch (IOException ie) {
assertTrue(ie instanceof ChecksumException);
}
dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
replicaCount = blocks.get(0).getLocations().length;
while (replicaCount != factor) {
try {
Thread.sleep(100);
} catch (InterruptedException ignore) {
}
blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
replicaCount = blocks.get(0).getLocations().length;
}
assertTrue(blocks.get(0).isCorrupt());
// Check if fsck reports the same
outStr = runFsck(conf, 1, true, "/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
assertTrue(outStr.contains("testCorruptBlock"));
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class TestIOUtils method testWriteFully.
@Test
public void testWriteFully() throws IOException {
final int INPUT_BUFFER_LEN = 10000;
final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
byte[] input = new byte[INPUT_BUFFER_LEN];
for (int i = 0; i < input.length; i++) {
input[i] = (byte) (i & 0xff);
}
byte[] output = new byte[input.length];
try {
RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
FileChannel fc = raf.getChannel();
ByteBuffer buf = ByteBuffer.wrap(input);
IOUtils.writeFully(fc, buf);
raf.seek(0);
raf.read(output);
for (int i = 0; i < input.length; i++) {
assertEquals(input[i], output[i]);
}
buf.rewind();
IOUtils.writeFully(fc, buf, HALFWAY);
for (int i = 0; i < HALFWAY; i++) {
assertEquals(input[i], output[i]);
}
raf.seek(0);
raf.read(output);
for (int i = HALFWAY; i < input.length; i++) {
assertEquals(input[i - HALFWAY], output[i]);
}
raf.close();
} finally {
File f = new File(TEST_FILE_NAME);
if (f.exists()) {
f.delete();
}
}
}
Aggregations