Search in sources :

Example 1 with LogVerificationAppender

use of org.apache.hadoop.hdfs.LogVerificationAppender in project hadoop by apache.

the class TestReplicationPolicy method testChooseTargetWithMoreThanAvailableNodes.

/**
   * In this testcase, it tries to choose more targets than available nodes and
   * check the result. 
   * @throws Exception
   */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
    // make data node 0 & 1 to be not qualified to choose: not enough disk space
    for (int i = 0; i < 2; i++) {
        updateHeartbeatWithUsage(dataNodes[i], 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE, 0L, (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
    }
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    // try to choose NUM_OF_DATANODES which is more than actually available
    // nodes.
    DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length);
    assertEquals(targets.length, dataNodes.length - 2);
    final List<LoggingEvent> log = appender.getLog();
    assertNotNull(log);
    assertFalse(log.size() == 0);
    final LoggingEvent lastLogEntry = log.get(log.size() - 1);
    assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
    // Suppose to place replicas on each node but two data nodes are not
    // available for placing replica, so here we expect a short of 2
    assertTrue(((String) lastLogEntry.getMessage()).contains("in need of 2"));
    resetHeartbeatForStorages();
}
Also used : LoggingEvent(org.apache.log4j.spi.LoggingEvent) LogVerificationAppender(org.apache.hadoop.hdfs.LogVerificationAppender) Logger(org.apache.log4j.Logger) Test(org.junit.Test)

Example 2 with LogVerificationAppender

use of org.apache.hadoop.hdfs.LogVerificationAppender in project hadoop by apache.

the class TestStartup method testImageChecksum.

private void testImageChecksum(boolean compress) throws Exception {
    MiniDFSCluster cluster = null;
    if (compress) {
        config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
    }
    try {
        LOG.info("\n===========================================\n" + "Starting empty cluster");
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).format(true).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        fs.mkdirs(new Path("/test"));
        LOG.info("Shutting down cluster #1");
        cluster.shutdown();
        cluster = null;
        // Corrupt the md5 files in all the namedirs
        corruptFSImageMD5(true);
        // Attach our own log appender so we can verify output
        final LogVerificationAppender appender = new LogVerificationAppender();
        final Logger logger = Logger.getRootLogger();
        logger.addAppender(appender);
        // Try to start a new cluster
        LOG.info("\n===========================================\n" + "Starting same cluster after simulated crash");
        try {
            cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).format(false).build();
            fail("Should not have successfully started with corrupt image");
        } catch (IOException ioe) {
            GenericTestUtils.assertExceptionContains("Failed to load FSImage file", ioe);
            int md5failures = appender.countExceptionsWithMessage(" is corrupt with MD5 checksum of ");
            // Two namedirs, so should have seen two failures
            assertEquals(2, md5failures);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) LogVerificationAppender(org.apache.hadoop.hdfs.LogVerificationAppender) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) Logger(org.apache.log4j.Logger)

Example 3 with LogVerificationAppender

use of org.apache.hadoop.hdfs.LogVerificationAppender in project hadoop by apache.

the class TestFsDatasetCache method testFilesExceedMaxLockedMemory.

@Test(timeout = 600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
    LOG.info("beginning testFilesExceedMaxLockedMemory");
    // Create some test files that will exceed total cache capacity
    final int numFiles = 5;
    final long fileSize = CACHE_CAPACITY / (numFiles - 1);
    final Path[] testFiles = new Path[numFiles];
    final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
    final long[] fileSizes = new long[numFiles];
    for (int i = 0; i < numFiles; i++) {
        testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
        DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short) 1, 0xDFAl);
        fileLocs[i] = (HdfsBlockLocation[]) fs.getFileBlockLocations(testFiles[i], 0, fileSize);
        // Get the file size (sum of blocks)
        long[] sizes = getBlockSizes(fileLocs[i]);
        for (int j = 0; j < sizes.length; j++) {
            fileSizes[i] += sizes[j];
        }
    }
    // Cache the first n-1 files
    long total = 0;
    DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
    for (int i = 0; i < numFiles - 1; i++) {
        setHeartbeatResponse(cacheBlocks(fileLocs[i]));
        total = DFSTestUtil.verifyExpectedCacheUsage(rounder.roundUp(total + fileSizes[i]), 4 * (i + 1), fsd);
    }
    // nth file should hit a capacity exception
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    setHeartbeatResponse(cacheBlocks(fileLocs[numFiles - 1]));
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            int lines = appender.countLinesWithMessage("more bytes in the cache: " + DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
            return lines > 0;
        }
    }, 500, 30000);
    // Also check the metrics for the failure
    assertTrue("Expected more than 0 failed cache attempts", fsd.getNumBlocksFailedToCache() > 0);
    // Uncache the n-1 files
    int curCachedBlocks = 16;
    for (int i = 0; i < numFiles - 1; i++) {
        setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
        long uncachedBytes = rounder.roundUp(fileSizes[i]);
        total -= uncachedBytes;
        curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
        DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
    }
    LOG.info("finishing testFilesExceedMaxLockedMemory");
}
Also used : Path(org.apache.hadoop.fs.Path) LogVerificationAppender(org.apache.hadoop.hdfs.LogVerificationAppender) HdfsBlockLocation(org.apache.hadoop.fs.HdfsBlockLocation) Logger(org.apache.log4j.Logger) Matchers.anyBoolean(org.mockito.Matchers.anyBoolean) Test(org.junit.Test)

Aggregations

LogVerificationAppender (org.apache.hadoop.hdfs.LogVerificationAppender)3 Logger (org.apache.log4j.Logger)3 Path (org.apache.hadoop.fs.Path)2 Test (org.junit.Test)2 IOException (java.io.IOException)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 HdfsBlockLocation (org.apache.hadoop.fs.HdfsBlockLocation)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 LoggingEvent (org.apache.log4j.spi.LoggingEvent)1 Matchers.anyBoolean (org.mockito.Matchers.anyBoolean)1