use of org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes in project hadoop by apache.
the class TestOfflineEditsViewer method hasAllOpCodes.
/**
* Checks that the edits file has all opCodes
*
* @param filename edits file
* @return true is edits (filename) has all opCodes
*/
private boolean hasAllOpCodes(String inFilename) throws IOException {
String outFilename = inFilename + ".stats";
FileOutputStream fout = new FileOutputStream(outFilename);
StatisticsEditsVisitor visitor = new StatisticsEditsVisitor(fout);
OfflineEditsViewer oev = new OfflineEditsViewer();
if (oev.go(inFilename, outFilename, "stats", new Flags(), visitor) != 0)
return false;
LOG.info("Statistics for " + inFilename + "\n" + visitor.getStatisticsString());
boolean hasAllOpCodes = true;
for (FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) {
// don't need to test obsolete opCodes
if (skippedOps.contains(opCode))
continue;
Long count = visitor.getStatistics().get(opCode);
if ((count == null) || (count == 0)) {
hasAllOpCodes = false;
LOG.info("Opcode " + opCode + " not tested in " + inFilename);
}
}
return hasAllOpCodes;
}
use of org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes in project hadoop by apache.
the class TestFileAppendRestart method testAppendRestart.
/**
* Regression test for HDFS-2991. Creates and appends to files
* where blocks start/end on block boundaries.
*/
@Test
public void testAppendRestart() throws Exception {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
MiniDFSCluster cluster = null;
FSDataOutputStream stream = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fs = cluster.getFileSystem();
File editLog = new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0), NNStorage.getInProgressEditsFileName(1));
EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
Path p1 = new Path("/block-boundaries");
writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);
counts = FSImageTestUtil.countEditLogOpTypes(editLog);
// OP_ADD to create file
// OP_ADD_BLOCK for first block
// OP_CLOSE to close file
// OP_APPEND to reopen file
// OP_ADD_BLOCK for second block
// OP_CLOSE to close file
assertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_APPEND).held);
assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_CLOSE).held);
Path p2 = new Path("/not-block-boundaries");
writeAndAppend(fs, p2, BLOCK_SIZE / 2, BLOCK_SIZE);
counts = FSImageTestUtil.countEditLogOpTypes(editLog);
// OP_ADD to create file
// OP_ADD_BLOCK for first block
// OP_CLOSE to close file
// OP_APPEND to re-establish the lease
// OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
// OP_ADD_BLOCK at the start of the second block
// OP_CLOSE to close file
// Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
// in addition to the ones above
assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_APPEND).held);
assertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
assertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
assertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_CLOSE).held);
cluster.restartNameNode();
AppendTestUtil.check(fs, p1, 2 * BLOCK_SIZE);
AppendTestUtil.check(fs, p2, 3 * BLOCK_SIZE / 2);
} finally {
IOUtils.closeStream(stream);
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes in project hadoop by apache.
the class StatisticsEditsVisitor method getStatisticsString.
/**
* Get the statistics in string format, suitable for printing
*
* @return statistics in in string format, suitable for printing
*/
public String getStatisticsString() {
StringBuilder sb = new StringBuilder();
sb.append(String.format(" %-30.30s : %d%n", "VERSION", version));
for (FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) {
Long count = opCodeCount.get(opCode);
sb.append(String.format(" %-30.30s (%3d): %d%n", opCode.toString(), opCode.getOpCode(), count == null ? Long.valueOf(0L) : count));
}
return sb.toString();
}
use of org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes in project hadoop by apache.
the class TestOfflineEditsViewer method testStatisticsStrWithNullOpCodeCount.
@Test
public void testStatisticsStrWithNullOpCodeCount() throws IOException {
String editFilename = nnHelper.generateEdits();
String outFilename = editFilename + ".stats";
FileOutputStream fout = new FileOutputStream(outFilename);
StatisticsEditsVisitor visitor = new StatisticsEditsVisitor(fout);
OfflineEditsViewer oev = new OfflineEditsViewer();
String statisticsStr = null;
if (oev.go(editFilename, outFilename, "stats", new Flags(), visitor) == 0) {
statisticsStr = visitor.getStatisticsString();
}
Assert.assertNotNull(statisticsStr);
String str;
Long count;
Map<FSEditLogOpCodes, Long> opCodeCount = visitor.getStatistics();
for (FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) {
count = opCodeCount.get(opCode);
// Verify the str when the opCode's count is null
if (count == null) {
str = String.format(" %-30.30s (%3d): %d%n", opCode.toString(), opCode.getOpCode(), Long.valueOf(0L));
assertTrue(statisticsStr.contains(str));
}
}
}
Aggregations