use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestJournalNodeMXBean method testJournalNodeMXBean.
@Test
public void testJournalNodeMXBean() throws Exception {
// we have not formatted the journals yet, and the journal status in jmx
// should be empty since journal objects are created lazily
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName("Hadoop:service=JournalNode,name=JournalNodeInfo");
// getJournalsStatus
String journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus");
assertEquals(jn.getJournalsStatus(), journalStatus);
assertFalse(journalStatus.contains(NAMESERVICE));
// format the journal ns1
final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(12345, "mycluster", "my-bp", 0L);
jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO);
// check again after format
// getJournalsStatus
journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus");
assertEquals(jn.getJournalsStatus(), journalStatus);
Map<String, Map<String, String>> jMap = new HashMap<String, Map<String, String>>();
Map<String, String> infoMap = new HashMap<String, String>();
infoMap.put("Formatted", "true");
jMap.put(NAMESERVICE, infoMap);
Map<String, String> infoMap1 = new HashMap<>();
infoMap1.put("Formatted", "false");
jMap.put(MiniJournalCluster.CLUSTER_WAITACTIVE_URI, infoMap1);
assertEquals(JSON.toString(jMap), journalStatus);
// restart journal node without formatting
jCluster = new MiniJournalCluster.Builder(new Configuration()).format(false).numJournalNodes(NUM_JN).build();
jCluster.waitActive();
jn = jCluster.getJournalNode(0);
// re-check
journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus");
assertEquals(jn.getJournalsStatus(), journalStatus);
assertEquals(JSON.toString(jMap), journalStatus);
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestPBHelper method testConvertNamespaceInfo.
@Test
public void testConvertNamespaceInfo() {
NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300);
NamespaceInfoProto proto = PBHelper.convert(info);
NamespaceInfo info2 = PBHelper.convert(proto);
//Compare the StorageInfo
compare(info, info2);
assertEquals(info.getBlockPoolID(), info2.getBlockPoolID());
assertEquals(info.getBuildVersion(), info2.getBuildVersion());
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestBlockListAsLongs method testDatanodeDetect.
@Test
public void testDatanodeDetect() throws ServiceException, IOException {
final AtomicReference<BlockReportRequestProto> request = new AtomicReference<>();
// just capture the outgoing PB
DatanodeProtocolPB mockProxy = mock(DatanodeProtocolPB.class);
doAnswer(new Answer<BlockReportResponseProto>() {
public BlockReportResponseProto answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
request.set((BlockReportRequestProto) args[1]);
return BlockReportResponseProto.newBuilder().build();
}
}).when(mockProxy).blockReport(any(RpcController.class), any(BlockReportRequestProto.class));
@SuppressWarnings("resource") DatanodeProtocolClientSideTranslatorPB nn = new DatanodeProtocolClientSideTranslatorPB(mockProxy);
DatanodeRegistration reg = DFSTestUtil.getLocalDatanodeRegistration();
NamespaceInfo nsInfo = new NamespaceInfo(1, "cluster", "bp", 1);
reg.setNamespaceInfo(nsInfo);
Replica r = new FinalizedReplica(new Block(1, 2, 3), null, null);
BlockListAsLongs bbl = BlockListAsLongs.encode(Collections.singleton(r));
DatanodeStorage storage = new DatanodeStorage("s1");
StorageBlockReport[] sbr = { new StorageBlockReport(storage, bbl) };
// check DN sends new-style BR
request.set(null);
nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
nn.blockReport(reg, "pool", sbr, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
BlockReportRequestProto proto = request.get();
assertNotNull(proto);
assertTrue(proto.getReports(0).getBlocksList().isEmpty());
assertFalse(proto.getReports(0).getBlocksBuffersList().isEmpty());
// back up to prior version and check DN sends old-style BR
request.set(null);
nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
BlockListAsLongs blockList = getBlockList(r);
StorageBlockReport[] obp = new StorageBlockReport[] { new StorageBlockReport(new DatanodeStorage("s1"), blockList) };
nn.blockReport(reg, "pool", obp, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
proto = request.get();
assertNotNull(proto);
assertFalse(proto.getReports(0).getBlocksList().isEmpty());
assertTrue(proto.getReports(0).getBlocksBuffersList().isEmpty());
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class BackupNode method initialize.
// NameNode
@Override
protected void initialize(Configuration conf) throws IOException {
// async edit logs are incompatible with backup node due to race
// conditions resulting from laxer synchronization
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_ASYNC_LOGGING, false);
// Trash is disabled in BackupNameNode,
// but should be turned back on if it ever becomes active.
conf.setLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
NamespaceInfo nsInfo = handshake(conf);
super.initialize(conf);
namesystem.setBlockPoolId(nsInfo.getBlockPoolID());
if (false == namesystem.isInSafeMode()) {
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
}
// Backup node should never do lease recovery,
// therefore lease hard limit should never expire.
namesystem.leaseManager.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
// register with the active name-node
registerWith(nsInfo);
// Checkpoint daemon should start after the rpc server started
runCheckpointDaemon(conf);
InetSocketAddress addr = getHttpAddress();
if (addr != null) {
conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress()));
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class JournalNodeSyncer method getMissingLogSegments.
private void getMissingLogSegments(List<RemoteEditLog> thisJournalEditLogs, GetEditLogManifestResponseProto response, JournalNodeProxy remoteJNproxy) {
List<RemoteEditLog> otherJournalEditLogs = PBHelper.convert(response.getManifest()).getLogs();
if (otherJournalEditLogs == null || otherJournalEditLogs.isEmpty()) {
LOG.warn("Journal at " + remoteJNproxy.jnAddr + " has no edit logs");
return;
}
List<RemoteEditLog> missingLogs = getMissingLogList(thisJournalEditLogs, otherJournalEditLogs);
if (!missingLogs.isEmpty()) {
NamespaceInfo nsInfo = jnStorage.getNamespaceInfo();
for (RemoteEditLog missingLog : missingLogs) {
URL url = null;
boolean success = false;
try {
if (remoteJNproxy.httpServerUrl == null) {
if (response.hasFromURL()) {
URI uri = URI.create(response.getFromURL());
remoteJNproxy.httpServerUrl = getHttpServerURI(uri.getScheme(), uri.getHost(), uri.getPort());
} else {
remoteJNproxy.httpServerUrl = getHttpServerURI("http", remoteJNproxy.jnAddr.getHostName(), response.getHttpPort());
}
}
String urlPath = GetJournalEditServlet.buildPath(jid, missingLog.getStartTxId(), nsInfo);
url = new URL(remoteJNproxy.httpServerUrl, urlPath);
success = downloadMissingLogSegment(url, missingLog);
} catch (MalformedURLException e) {
LOG.error("MalformedURL when download missing log segment", e);
} catch (Exception e) {
LOG.error("Exception in downloading missing log segment from url " + url, e);
}
if (!success) {
LOG.error("Aborting current sync attempt.");
break;
}
}
}
}
Aggregations