use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster in project hadoop by apache.
the class TestDFSInotifyEventInputStream method testBasic.
/**
* Tests all FsEditLogOps that are converted to inotify events.
*/
@Test(timeout = 120000)
@SuppressWarnings("deprecation")
public void testBasic() throws IOException, URISyntaxException, InterruptedException, MissingEventsException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// so that we can get an atime change
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1);
MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(2);
MiniQJMHACluster cluster = builder.build();
try {
cluster.getDfsCluster().waitActive();
cluster.getDfsCluster().transitionToActive(0);
DFSClient client = new DFSClient(cluster.getDfsCluster().getNameNode(0).getNameNodeAddress(), conf);
FileSystem fs = cluster.getDfsCluster().getFileSystem(0);
DFSTestUtil.createFile(fs, new Path("/file"), BLOCK_SIZE, (short) 1, 0L);
DFSTestUtil.createFile(fs, new Path("/file3"), BLOCK_SIZE, (short) 1, 0L);
DFSTestUtil.createFile(fs, new Path("/file5"), BLOCK_SIZE, (short) 1, 0L);
DFSTestUtil.createFile(fs, new Path("/truncate_file"), BLOCK_SIZE * 2, (short) 1, 0L);
DFSInotifyEventInputStream eis = client.getInotifyEventStream();
// RenameOp -> RenameEvent
client.rename("/file", "/file4", null);
// RenameOldOp -> RenameEvent
client.rename("/file4", "/file2");
// DeleteOp, AddOp -> UnlinkEvent, CreateEvent
OutputStream os = client.create("/file2", true, (short) 2, BLOCK_SIZE);
os.write(new byte[BLOCK_SIZE]);
// CloseOp -> CloseEvent
os.close();
// AddOp -> AppendEvent
os = client.append("/file2", BLOCK_SIZE, EnumSet.of(CreateFlag.APPEND), null, null);
os.write(new byte[BLOCK_SIZE]);
// CloseOp -> CloseEvent
os.close();
// so that the atime will get updated on the next line
Thread.sleep(10);
// TimesOp -> MetadataUpdateEvent
client.open("/file2").read(new byte[1]);
// SetReplicationOp -> MetadataUpdateEvent
client.setReplication("/file2", (short) 1);
// ConcatDeleteOp -> AppendEvent, UnlinkEvent, CloseEvent
client.concat("/file2", new String[] { "/file3" });
// DeleteOp -> UnlinkEvent
client.delete("/file2", false);
// MkdirOp -> CreateEvent
client.mkdirs("/dir", null, false);
// SetPermissionsOp -> MetadataUpdateEvent
client.setPermission("/dir", FsPermission.valueOf("-rw-rw-rw-"));
// SetOwnerOp -> MetadataUpdateEvent
client.setOwner("/dir", "username", "groupname");
// SymlinkOp -> CreateEvent
client.createSymlink("/dir", "/dir2", false);
client.setXAttr("/file5", "user.field", "value".getBytes(), EnumSet.of(// SetXAttrOp -> MetadataUpdateEvent
XAttrSetFlag.CREATE));
// RemoveXAttrOp -> MetadataUpdateEvent
client.removeXAttr("/file5", "user.field");
// SetAclOp -> MetadataUpdateEvent
client.setAcl("/file5", AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---", true));
// SetAclOp -> MetadataUpdateEvent
client.removeAcl("/file5");
// RenameOldOp -> RenameEvent
client.rename("/file5", "/dir");
//TruncateOp -> TruncateEvent
client.truncate("/truncate_file", BLOCK_SIZE);
EventBatch batch = null;
// RenameOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
long txid = batch.getTxid();
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
Event.RenameEvent re = (Event.RenameEvent) batch.getEvents()[0];
Assert.assertEquals("/file4", re.getDstPath());
Assert.assertEquals("/file", re.getSrcPath());
Assert.assertTrue(re.getTimestamp() > 0);
LOG.info(re.toString());
Assert.assertTrue(re.toString().startsWith("RenameEvent [srcPath="));
long eventsBehind = eis.getTxidsBehindEstimate();
// RenameOldOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
Event.RenameEvent re2 = (Event.RenameEvent) batch.getEvents()[0];
Assert.assertTrue(re2.getDstPath().equals("/file2"));
Assert.assertTrue(re2.getSrcPath().equals("/file4"));
Assert.assertTrue(re2.getTimestamp() > 0);
LOG.info(re2.toString());
// AddOp with overwrite
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Event.CreateEvent ce = (Event.CreateEvent) batch.getEvents()[0];
Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
Assert.assertTrue(ce.getPath().equals("/file2"));
Assert.assertTrue(ce.getCtime() > 0);
Assert.assertTrue(ce.getReplication() > 0);
Assert.assertTrue(ce.getSymlinkTarget() == null);
Assert.assertTrue(ce.getOverwrite());
Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
LOG.info(ce.toString());
Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
// CloseOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
Event.CloseEvent ce2 = (Event.CloseEvent) batch.getEvents()[0];
Assert.assertTrue(ce2.getPath().equals("/file2"));
Assert.assertTrue(ce2.getFileSize() > 0);
Assert.assertTrue(ce2.getTimestamp() > 0);
LOG.info(ce2.toString());
Assert.assertTrue(ce2.toString().startsWith("CloseEvent [path="));
// AppendOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
Event.AppendEvent append2 = (Event.AppendEvent) batch.getEvents()[0];
Assert.assertEquals("/file2", append2.getPath());
Assert.assertFalse(append2.toNewBlock());
LOG.info(append2.toString());
Assert.assertTrue(append2.toString().startsWith("AppendEvent [path="));
// CloseOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
Assert.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath().equals("/file2"));
// TimesOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue.getPath().equals("/file2"));
Assert.assertTrue(mue.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.TIMES);
LOG.info(mue.toString());
Assert.assertTrue(mue.toString().startsWith("MetadataUpdateEvent [path="));
// SetReplicationOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue2 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue2.getPath().equals("/file2"));
Assert.assertTrue(mue2.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.REPLICATION);
Assert.assertTrue(mue2.getReplication() == 1);
LOG.info(mue2.toString());
// ConcatDeleteOp
batch = waitForNextEvents(eis);
Assert.assertEquals(3, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
Assert.assertTrue(((Event.AppendEvent) batch.getEvents()[0]).getPath().equals("/file2"));
Assert.assertTrue(batch.getEvents()[1].getEventType() == Event.EventType.UNLINK);
Event.UnlinkEvent ue2 = (Event.UnlinkEvent) batch.getEvents()[1];
Assert.assertTrue(ue2.getPath().equals("/file3"));
Assert.assertTrue(ue2.getTimestamp() > 0);
LOG.info(ue2.toString());
Assert.assertTrue(ue2.toString().startsWith("UnlinkEvent [path="));
Assert.assertTrue(batch.getEvents()[2].getEventType() == Event.EventType.CLOSE);
Event.CloseEvent ce3 = (Event.CloseEvent) batch.getEvents()[2];
Assert.assertTrue(ce3.getPath().equals("/file2"));
Assert.assertTrue(ce3.getTimestamp() > 0);
// DeleteOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.UNLINK);
Event.UnlinkEvent ue = (Event.UnlinkEvent) batch.getEvents()[0];
Assert.assertTrue(ue.getPath().equals("/file2"));
Assert.assertTrue(ue.getTimestamp() > 0);
LOG.info(ue.toString());
// MkdirOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Event.CreateEvent ce4 = (Event.CreateEvent) batch.getEvents()[0];
Assert.assertTrue(ce4.getiNodeType() == Event.CreateEvent.INodeType.DIRECTORY);
Assert.assertTrue(ce4.getPath().equals("/dir"));
Assert.assertTrue(ce4.getCtime() > 0);
Assert.assertTrue(ce4.getReplication() == 0);
Assert.assertTrue(ce4.getSymlinkTarget() == null);
LOG.info(ce4.toString());
// SetPermissionsOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue3 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue3.getPath().equals("/dir"));
Assert.assertTrue(mue3.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.PERMS);
Assert.assertTrue(mue3.getPerms().toString().contains("rw-rw-rw-"));
LOG.info(mue3.toString());
// SetOwnerOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue4 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue4.getPath().equals("/dir"));
Assert.assertTrue(mue4.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.OWNER);
Assert.assertTrue(mue4.getOwnerName().equals("username"));
Assert.assertTrue(mue4.getGroupName().equals("groupname"));
LOG.info(mue4.toString());
// SymlinkOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Event.CreateEvent ce5 = (Event.CreateEvent) batch.getEvents()[0];
Assert.assertTrue(ce5.getiNodeType() == Event.CreateEvent.INodeType.SYMLINK);
Assert.assertTrue(ce5.getPath().equals("/dir2"));
Assert.assertTrue(ce5.getCtime() > 0);
Assert.assertTrue(ce5.getReplication() == 0);
Assert.assertTrue(ce5.getSymlinkTarget().equals("/dir"));
LOG.info(ce5.toString());
// SetXAttrOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue5 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue5.getPath().equals("/file5"));
Assert.assertTrue(mue5.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.XATTRS);
Assert.assertTrue(mue5.getxAttrs().size() == 1);
Assert.assertTrue(mue5.getxAttrs().get(0).getName().contains("field"));
Assert.assertTrue(!mue5.isxAttrsRemoved());
LOG.info(mue5.toString());
// RemoveXAttrOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue6 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue6.getPath().equals("/file5"));
Assert.assertTrue(mue6.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.XATTRS);
Assert.assertTrue(mue6.getxAttrs().size() == 1);
Assert.assertTrue(mue6.getxAttrs().get(0).getName().contains("field"));
Assert.assertTrue(mue6.isxAttrsRemoved());
LOG.info(mue6.toString());
// SetAclOp (1)
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue7 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue7.getPath().equals("/file5"));
Assert.assertTrue(mue7.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.ACLS);
Assert.assertTrue(mue7.getAcls().contains(AclEntry.parseAclEntry("user::rwx", true)));
LOG.info(mue7.toString());
// SetAclOp (2)
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue8 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue8.getPath().equals("/file5"));
Assert.assertTrue(mue8.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.ACLS);
Assert.assertTrue(mue8.getAcls() == null);
LOG.info(mue8.toString());
// RenameOp (2)
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
Event.RenameEvent re3 = (Event.RenameEvent) batch.getEvents()[0];
Assert.assertTrue(re3.getDstPath().equals("/dir/file5"));
Assert.assertTrue(re3.getSrcPath().equals("/file5"));
Assert.assertTrue(re3.getTimestamp() > 0);
LOG.info(re3.toString());
// TruncateOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.TRUNCATE);
Event.TruncateEvent et = ((Event.TruncateEvent) batch.getEvents()[0]);
Assert.assertTrue(et.getPath().equals("/truncate_file"));
Assert.assertTrue(et.getFileSize() == BLOCK_SIZE);
Assert.assertTrue(et.getTimestamp() > 0);
LOG.info(et.toString());
Assert.assertTrue(et.toString().startsWith("TruncateEvent [path="));
// Returns null when there are no further events
Assert.assertTrue(eis.poll() == null);
// make sure the estimate hasn't changed since the above assertion
// tells us that we are fully caught up to the current namesystem state
// and we should not have been behind at all when eventsBehind was set
// either, since there were few enough events that they should have all
// been read to the client during the first poll() call
Assert.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster in project hadoop by apache.
the class TestDFSInotifyEventInputStream method testNNFailover.
@Test(timeout = 120000)
public void testNNFailover() throws IOException, URISyntaxException, MissingEventsException {
Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();
try {
cluster.getDfsCluster().waitActive();
cluster.getDfsCluster().transitionToActive(0);
DFSClient client = ((DistributedFileSystem) HATestUtil.configureFailoverFs(cluster.getDfsCluster(), conf)).dfs;
DFSInotifyEventInputStream eis = client.getInotifyEventStream();
for (int i = 0; i < 10; i++) {
client.mkdirs("/dir" + i, null, false);
}
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().transitionToActive(1);
EventBatch batch = null;
// active
for (int i = 0; i < 10; i++) {
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" + i));
}
Assert.assertTrue(eis.poll() == null);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster in project hadoop by apache.
the class TestDFSInotifyEventInputStream method testTwoActiveNNs.
@Test(timeout = 120000)
public void testTwoActiveNNs() throws IOException, MissingEventsException {
Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();
try {
cluster.getDfsCluster().waitActive();
cluster.getDfsCluster().transitionToActive(0);
DFSClient client0 = new DFSClient(cluster.getDfsCluster().getNameNode(0).getNameNodeAddress(), conf);
DFSClient client1 = new DFSClient(cluster.getDfsCluster().getNameNode(1).getNameNodeAddress(), conf);
DFSInotifyEventInputStream eis = client0.getInotifyEventStream();
for (int i = 0; i < 10; i++) {
client0.mkdirs("/dir" + i, null, false);
}
cluster.getDfsCluster().transitionToActive(1);
for (int i = 10; i < 20; i++) {
client1.mkdirs("/dir" + i, null, false);
}
// make sure that the old active can't read any further than the edits
// it logged itself (it has no idea whether the in-progress edits from
// the other writer have actually been committed)
EventBatch batch = null;
for (int i = 0; i < 10; i++) {
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" + i));
}
Assert.assertTrue(eis.poll() == null);
} finally {
try {
cluster.shutdown();
} catch (ExitUtil.ExitException e) {
// expected because the old active will be unable to flush the
// end-of-segment op since it is fenced
}
}
}
use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster in project hadoop by apache.
the class TestRollingUpgradeRollback method testRollbackWithHAQJM.
/**
* Test rollback scenarios where StandbyNameNode does checkpoints during
* rolling upgrade.
*/
@Test
public void testRollbackWithHAQJM() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
try {
cluster = new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
// let NN1 tail editlog every 1s
dfsCluster.getConfiguration(1).setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
// start rolling upgrade
RollingUpgradeInfo info = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
// create new directory
dfs.mkdirs(bar);
dfs.close();
dfs = dfsCluster.getFileSystem(0);
TestRollingUpgrade.queryForPreparation(dfs);
// If the query returns true, both active and the standby NN should have
// rollback fsimage ready.
Assert.assertTrue(dfsCluster.getNameNode(0).getFSImage().hasRollbackFSImage());
Assert.assertTrue(dfsCluster.getNameNode(1).getFSImage().hasRollbackFSImage());
// rollback NN0
dfsCluster.restartNameNode(0, true, "-rollingUpgrade", "rollback");
// shutdown NN1
dfsCluster.shutdownNameNode(1);
dfsCluster.transitionToActive(0);
// make sure /foo is still there, but /bar is not
dfs = dfsCluster.getFileSystem(0);
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
// check the details of NNStorage
NNStorage storage = dfsCluster.getNamesystem(0).getFSImage().getStorage();
// segments:(startSegment, mkdir, start upgrade endSegment),
// (startSegment, mkdir, endSegment)
checkNNStorage(storage, 4, 7);
// check storage in JNs
for (int i = 0; i < NUM_JOURNAL_NODES; i++) {
File dir = cluster.getJournalCluster().getCurrentDir(0, MiniQJMHACluster.NAMESERVICE);
checkJNStorage(dir, 5, 7);
}
// restart NN0 again to make sure we can start using the new fsimage and
// the corresponding md5 checksum
dfsCluster.restartNameNode(0);
// start the rolling upgrade again to make sure we do not load upgrade
// status after the rollback
dfsCluster.transitionToActive(0);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster in project hadoop by apache.
the class TestRollingUpgrade method testFinalize.
private void testFinalize(int nnCount) throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
try {
cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
// let other NN tail editlog every 1s
for (int i = 1; i < nnCount; i++) {
dfsCluster.getConfiguration(i).setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
}
dfsCluster.restartNameNodes();
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
FSImage fsimage = dfsCluster.getNamesystem(0).getFSImage();
// start rolling upgrade
RollingUpgradeInfo info = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
dfs.mkdirs(bar);
queryForPreparation(dfs);
// The NN should have a copy of the fsimage in case of rollbacks.
Assert.assertTrue(fsimage.hasRollbackFSImage());
info = dfs.rollingUpgrade(RollingUpgradeAction.FINALIZE);
Assert.assertTrue(info.isFinalized());
Assert.assertTrue(dfs.exists(foo));
// Once finalized, there should be no more fsimage for rollbacks.
Assert.assertFalse(fsimage.hasRollbackFSImage());
// Should have no problem in restart and replaying edits that include
// the FINALIZE op.
dfsCluster.restartNameNode(0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations