Search in sources :

Example 6 with RemoteEditLogManifest

use of org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest in project hadoop by apache.

the class TestCheckpoint method testNamespaceVerifiedOnFileTransfer.

/**
   * Test that the primary NN will not serve any files to a 2NN who doesn't
   * share its namespace ID, and also will not accept any files from one.
   */
@Test
public void testNamespaceVerifiedOnFileTransfer() throws IOException {
    MiniDFSCluster cluster = null;
    Configuration conf = new HdfsConfiguration();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
        NamenodeProtocols nn = cluster.getNameNodeRpc();
        URL fsName = DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(), conf, DFSUtil.getHttpClientScheme(conf)).toURL();
        // Make a finalized log on the server side. 
        nn.rollEditLog();
        RemoteEditLogManifest manifest = nn.getEditLogManifest(1);
        RemoteEditLog log = manifest.getLogs().get(0);
        NNStorage dstImage = Mockito.mock(NNStorage.class);
        Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written"))).when(dstImage).getFiles(Mockito.<NameNodeDirType>anyObject(), Mockito.anyString());
        File mockImageFile = File.createTempFile("image", "");
        FileOutputStream imageFile = new FileOutputStream(mockImageFile);
        imageFile.write("data".getBytes());
        imageFile.close();
        Mockito.doReturn(mockImageFile).when(dstImage).findImageFile(Mockito.any(NameNodeFile.class), Mockito.anyLong());
        Mockito.doReturn(new StorageInfo(1, 1, "X", 1, NodeType.NAME_NODE).toColonSeparatedString()).when(dstImage).toColonSeparatedString();
        try {
            TransferFsImage.downloadImageToStorage(fsName, 0, dstImage, false, false);
            fail("Storage info was not verified");
        } catch (IOException ioe) {
            String msg = StringUtils.stringifyException(ioe);
            assertTrue(msg, msg.contains("but the secondary expected"));
        }
        try {
            TransferFsImage.downloadEditsToStorage(fsName, log, dstImage);
            fail("Storage info was not verified");
        } catch (IOException ioe) {
            String msg = StringUtils.stringifyException(ioe);
            assertTrue(msg, msg.contains("but the secondary expected"));
        }
        try {
            TransferFsImage.uploadImageFromStorage(fsName, conf, dstImage, NameNodeFile.IMAGE, 0);
            fail("Storage info was not verified");
        } catch (IOException ioe) {
            String msg = StringUtils.stringifyException(ioe);
            assertTrue(msg, msg.contains("but the secondary expected"));
        }
    } finally {
        cleanup(cluster);
        cluster = null;
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URL(java.net.URL) RemoteEditLogManifest(org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest) FileOutputStream(java.io.FileOutputStream) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) RemoteEditLog(org.apache.hadoop.hdfs.server.protocol.RemoteEditLog) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Example 7 with RemoteEditLogManifest

use of org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest in project hadoop by apache.

the class Journal method getEditLogManifest.

/**
   * @see QJournalProtocol#getEditLogManifest(String, long, boolean)
   */
public RemoteEditLogManifest getEditLogManifest(long sinceTxId, boolean inProgressOk) throws IOException {
    // No need to checkRequest() here - anyone may ask for the list
    // of segments.
    checkFormatted();
    List<RemoteEditLog> logs = fjm.getRemoteEditLogs(sinceTxId, inProgressOk);
    if (inProgressOk) {
        RemoteEditLog log = null;
        for (Iterator<RemoteEditLog> iter = logs.iterator(); iter.hasNext(); ) {
            log = iter.next();
            if (log.isInProgress()) {
                iter.remove();
                break;
            }
        }
        if (log != null && log.isInProgress()) {
            logs.add(new RemoteEditLog(log.getStartTxId(), getHighestWrittenTxId(), true));
        }
    }
    return new RemoteEditLogManifest(logs, getCommittedTxnId());
}
Also used : RemoteEditLogManifest(org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest) RemoteEditLog(org.apache.hadoop.hdfs.server.protocol.RemoteEditLog)

Example 8 with RemoteEditLogManifest

use of org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest in project hadoop by apache.

the class JournalSet method getEditLogManifest.

/**
   * Return a manifest of what finalized edit logs are available. All available
   * edit logs are returned starting from the transaction id passed. If
   * 'fromTxId' falls in the middle of a log, that log is returned as well.
   * 
   * @param fromTxId Starting transaction id to read the logs.
   * @return RemoteEditLogManifest object.
   */
public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) {
    // Collect RemoteEditLogs available from each FileJournalManager
    List<RemoteEditLog> allLogs = Lists.newArrayList();
    for (JournalAndStream j : journals) {
        if (j.getManager() instanceof FileJournalManager) {
            FileJournalManager fjm = (FileJournalManager) j.getManager();
            try {
                allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, false));
            } catch (Throwable t) {
                LOG.warn("Cannot list edit logs in " + fjm, t);
            }
        }
    }
    // Group logs by their starting txid
    ImmutableListMultimap<Long, RemoteEditLog> logsByStartTxId = Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID);
    long curStartTxId = fromTxId;
    List<RemoteEditLog> logs = Lists.newArrayList();
    while (true) {
        ImmutableList<RemoteEditLog> logGroup = logsByStartTxId.get(curStartTxId);
        if (logGroup.isEmpty()) {
            // we have a gap in logs - for example because we recovered some old
            // storage directory with ancient logs. Clear out any logs we've
            // accumulated so far, and then skip to the next segment of logs
            // after the gap.
            SortedSet<Long> startTxIds = Sets.newTreeSet(logsByStartTxId.keySet());
            startTxIds = startTxIds.tailSet(curStartTxId);
            if (startTxIds.isEmpty()) {
                break;
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Found gap in logs at " + curStartTxId + ": " + "not returning previous logs in manifest.");
                }
                logs.clear();
                curStartTxId = startTxIds.first();
                continue;
            }
        }
        // Find the one that extends the farthest forward
        RemoteEditLog bestLog = Collections.max(logGroup);
        logs.add(bestLog);
        // And then start looking from after that point
        curStartTxId = bestLog.getEndTxId() + 1;
    }
    RemoteEditLogManifest ret = new RemoteEditLogManifest(logs, curStartTxId - 1);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Generated manifest for logs since " + fromTxId + ":" + ret);
    }
    return ret;
}
Also used : RemoteEditLogManifest(org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest) RemoteEditLog(org.apache.hadoop.hdfs.server.protocol.RemoteEditLog)

Aggregations

RemoteEditLogManifest (org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest)8 RemoteEditLog (org.apache.hadoop.hdfs.server.protocol.RemoteEditLog)7 IOException (java.io.IOException)3 File (java.io.File)2 URL (java.net.URL)2 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)2 Test (org.junit.Test)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 FileOutputStream (java.io.FileOutputStream)1 RandomAccessFile (java.io.RandomAccessFile)1 ArrayList (java.util.ArrayList)1 Map (java.util.Map)1 PriorityQueue (java.util.PriorityQueue)1 Configuration (org.apache.hadoop.conf.Configuration)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 RemoteEditLogManifestProto (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto)1 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)1 EditLogInputStream (org.apache.hadoop.hdfs.server.namenode.EditLogInputStream)1 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)1