use of java.io.RandomAccessFile in project hadoop by apache.
the class TestFSInputChecker method checkFileCorruption.
private void checkFileCorruption(LocalFileSystem fileSys, Path file, Path fileToCorrupt) throws IOException {
// corrupt the file
RandomAccessFile out = new RandomAccessFile(new File(fileToCorrupt.toString()), "rw");
byte[] buf = new byte[(int) fileSys.getFileStatus(file).getLen()];
int corruptFileLen = (int) fileSys.getFileStatus(fileToCorrupt).getLen();
assertTrue(buf.length >= corruptFileLen);
rand.nextBytes(buf);
out.seek(corruptFileLen / 2);
out.write(buf, 0, corruptFileLen / 4);
out.close();
boolean gotException = false;
InputStream in = fileSys.open(file);
try {
IOUtils.readFully(in, buf, 0, buf.length);
} catch (ChecksumException e) {
gotException = true;
}
assertTrue(gotException);
in.close();
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class TestEditLog method testReadActivelyUpdatedLog.
/**
*
* @throws Exception
*/
@Test
public void testReadActivelyUpdatedLog() throws Exception {
final TestAppender appender = new TestAppender();
LogManager.getRootLogger().addAppender(appender);
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// Set single handler thread, so all transactions hit same thread-local ops.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FSImage fsimage = cluster.getNamesystem().getFSImage();
StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
final DistributedFileSystem fileSys = cluster.getFileSystem();
DFSInotifyEventInputStream events = fileSys.getInotifyEventStream();
fileSys.mkdirs(new Path("/test"));
fileSys.mkdirs(new Path("/test/dir1"));
fileSys.delete(new Path("/test/dir1"), true);
fsimage.getEditLog().logSync();
fileSys.mkdirs(new Path("/test/dir2"));
final File inProgressEdit = NNStorage.getInProgressEditsFile(sd, 1);
assertTrue(inProgressEdit.exists());
EditLogFileInputStream elis = new EditLogFileInputStream(inProgressEdit);
FSEditLogOp op;
long pos = 0;
while (true) {
op = elis.readOp();
if (op != null && op.opCode != FSEditLogOpCodes.OP_INVALID) {
pos = elis.getPosition();
} else {
break;
}
}
elis.close();
assertTrue(pos > 0);
RandomAccessFile rwf = new RandomAccessFile(inProgressEdit, "rw");
rwf.seek(pos);
assertEquals(rwf.readByte(), (byte) -1);
rwf.seek(pos + 1);
rwf.writeByte(2);
rwf.close();
events.poll();
String pattern = "Caught exception after reading (.*) ops";
Pattern r = Pattern.compile(pattern);
final List<LoggingEvent> log = appender.getLog();
for (LoggingEvent event : log) {
Matcher m = r.matcher(event.getRenderedMessage());
if (m.find()) {
fail("Should not try to read past latest syned edit log op");
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
LogManager.getRootLogger().removeAppender(appender);
}
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class TestEditLog method testEditLogFailOverFromCorrupt.
/**
* Test edit log failover from a corrupt edit log
*/
@Test
public void testEditLogFailOverFromCorrupt() throws IOException {
File f1 = new File(TEST_DIR + "/failover0");
File f2 = new File(TEST_DIR + "/failover1");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
NNStorage storage = setupEdits(editUris, 3);
final long startErrorTxId = 1 * TXNS_PER_ROLL + 1;
final long endErrorTxId = 2 * TXNS_PER_ROLL;
File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId, endErrorTxId))) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
long fileLen = files[0].length();
LOG.debug("Corrupting Log File: " + files[0] + " len: " + fileLen);
RandomAccessFile rwf = new RandomAccessFile(files[0], "rw");
// seek to checksum bytes
rwf.seek(fileLen - 4);
int b = rwf.readInt();
rwf.seek(fileLen - 4);
rwf.writeInt(b + 1);
rwf.close();
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
Collection<EditLogInputStream> streams = null;
try {
streams = editlog.selectInputStreams(startTxId, 4 * TXNS_PER_ROLL);
readAllEdits(streams, startTxId);
} catch (IOException e) {
LOG.error("edit log failover didn't work", e);
fail("Edit log failover didn't work");
} finally {
IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0]));
}
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class TestEditLogFileInputStream method testScanCorruptEditLog.
/**
* Regression test for HDFS-8965 which verifies that
* FSEditLogFileInputStream#scanOp verifies Op checksums.
*/
@Test(timeout = 60000)
public void testScanCorruptEditLog() throws Exception {
Configuration conf = new Configuration();
File editLog = new File(GenericTestUtils.getTempPath("testCorruptEditLog"));
LOG.debug("Creating test edit log file: " + editLog);
EditLogFileOutputStream elos = new EditLogFileOutputStream(conf, editLog.getAbsoluteFile(), 8192);
elos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
FSEditLogOp.OpInstanceCache cache = new FSEditLogOp.OpInstanceCache();
FSEditLogOp.MkdirOp mkdirOp = FSEditLogOp.MkdirOp.getInstance(cache);
mkdirOp.reset();
mkdirOp.setRpcCallId(123);
mkdirOp.setTransactionId(1);
mkdirOp.setInodeId(789L);
mkdirOp.setPath("/mydir");
PermissionStatus perms = PermissionStatus.createImmutable("myuser", "mygroup", FsPermission.createImmutable((short) 0777));
mkdirOp.setPermissionStatus(perms);
elos.write(mkdirOp);
mkdirOp.reset();
mkdirOp.setRpcCallId(456);
mkdirOp.setTransactionId(2);
mkdirOp.setInodeId(123L);
mkdirOp.setPath("/mydir2");
perms = PermissionStatus.createImmutable("myuser", "mygroup", FsPermission.createImmutable((short) 0666));
mkdirOp.setPermissionStatus(perms);
elos.write(mkdirOp);
elos.setReadyToFlush();
elos.flushAndSync(false);
elos.close();
long fileLen = editLog.length();
LOG.debug("Corrupting last 4 bytes of edit log file " + editLog + ", whose length is " + fileLen);
RandomAccessFile rwf = new RandomAccessFile(editLog, "rw");
rwf.seek(fileLen - 4);
int b = rwf.readInt();
rwf.seek(fileLen - 4);
rwf.writeInt(b + 1);
rwf.close();
EditLogFileInputStream elis = new EditLogFileInputStream(editLog);
Assert.assertEquals(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, elis.getVersion(true));
Assert.assertEquals(1, elis.scanNextOp());
LOG.debug("Read transaction 1 from " + editLog);
try {
elis.scanNextOp();
Assert.fail("Expected scanNextOp to fail when op checksum was corrupt.");
} catch (IOException e) {
LOG.debug("Caught expected checksum error when reading corrupt " + "transaction 2", e);
GenericTestUtils.assertExceptionContains("Transaction is corrupt.", e);
}
elis.close();
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class TestFSEditLogLoader method truncateFile.
/**
* Truncate the given file to the given length
*/
private void truncateFile(File logFile, long newLength) throws IOException {
RandomAccessFile raf = new RandomAccessFile(logFile, "rw");
raf.setLength(newLength);
raf.close();
}
Aggregations