Search in sources :

Example 11 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestDFSUpgradeFromImage method testUpgradeFromCorruptRel22Image.

/**
   * Test upgrade from 0.22 image with corrupt md5, make sure it
   * fails to upgrade
   */
@Test
public void testUpgradeFromCorruptRel22Image() throws IOException {
    unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
    // Overwrite the md5 stored in the VERSION files
    File[] nnDirs = MiniDFSCluster.getNameNodeDirectory(MiniDFSCluster.getBaseDirectory(), 0, 0);
    FSImageTestUtil.corruptVersionFile(new File(nnDirs[0], "current/VERSION"), "imageMD5Digest", "22222222222222222222222222222222");
    FSImageTestUtil.corruptVersionFile(new File(nnDirs[1], "current/VERSION"), "imageMD5Digest", "22222222222222222222222222222222");
    // Attach our own log appender so we can verify output
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    // Upgrade should now fail
    try {
        upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).numDataNodes(4), null);
        fail("Upgrade did not fail with bad MD5");
    } catch (IOException ioe) {
        String msg = StringUtils.stringifyException(ioe);
        if (!msg.contains("Failed to load FSImage file")) {
            throw ioe;
        }
        int md5failures = appender.countExceptionsWithMessage(" is corrupt with MD5 checksum of ");
        assertEquals("Upgrade did not fail with bad MD5", 1, md5failures);
    }
}
Also used : IOException(java.io.IOException) Logger(org.apache.log4j.Logger) File(java.io.File) Test(org.junit.Test)

Example 12 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestStartup method testImageChecksum.

private void testImageChecksum(boolean compress) throws Exception {
    MiniDFSCluster cluster = null;
    if (compress) {
        config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
    }
    try {
        LOG.info("\n===========================================\n" + "Starting empty cluster");
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).format(true).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        fs.mkdirs(new Path("/test"));
        LOG.info("Shutting down cluster #1");
        cluster.shutdown();
        cluster = null;
        // Corrupt the md5 files in all the namedirs
        corruptFSImageMD5(true);
        // Attach our own log appender so we can verify output
        final LogVerificationAppender appender = new LogVerificationAppender();
        final Logger logger = Logger.getRootLogger();
        logger.addAppender(appender);
        // Try to start a new cluster
        LOG.info("\n===========================================\n" + "Starting same cluster after simulated crash");
        try {
            cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).format(false).build();
            fail("Should not have successfully started with corrupt image");
        } catch (IOException ioe) {
            GenericTestUtils.assertExceptionContains("Failed to load FSImage file", ioe);
            int md5failures = appender.countExceptionsWithMessage(" is corrupt with MD5 checksum of ");
            // Two namedirs, so should have seen two failures
            assertEquals(2, md5failures);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) LogVerificationAppender(org.apache.hadoop.hdfs.LogVerificationAppender) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) Logger(org.apache.log4j.Logger)

Example 13 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class MRAppMasterTestLaunchTime method testMRAppMasterCredentials.

@Test
public void testMRAppMasterCredentials() throws Exception {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.DEBUG);
    // Simulate credentials passed to AM via client->RM->NM
    Credentials credentials = new Credentials();
    byte[] identifier = "MyIdentifier".getBytes();
    byte[] password = "MyPassword".getBytes();
    Text kind = new Text("MyTokenKind");
    Text service = new Text("host:port");
    Token<? extends TokenIdentifier> myToken = new Token<TokenIdentifier>(identifier, password, kind, service);
    Text tokenAlias = new Text("myToken");
    credentials.addToken(tokenAlias, myToken);
    Text appTokenService = new Text("localhost:0");
    Token<AMRMTokenIdentifier> appToken = new Token<AMRMTokenIdentifier>(identifier, password, AMRMTokenIdentifier.KIND_NAME, appTokenService);
    credentials.addToken(appTokenService, appToken);
    Text keyAlias = new Text("mySecretKeyAlias");
    credentials.addSecretKey(keyAlias, "mySecretKey".getBytes());
    Token<? extends TokenIdentifier> storedToken = credentials.getToken(tokenAlias);
    JobConf conf = new JobConf();
    Path tokenFilePath = new Path(testDir, "tokens-file");
    Map<String, String> newEnv = new HashMap<String, String>();
    newEnv.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION, tokenFilePath.toUri().getPath());
    setNewEnvironmentHack(newEnv);
    credentials.writeTokenStorageFile(tokenFilePath, conf);
    ApplicationId appId = ApplicationId.newInstance(12345, 56);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, 546);
    String userName = UserGroupInformation.getCurrentUser().getShortUserName();
    // Create staging dir, so MRAppMaster doesn't barf.
    File stagingDir = new File(MRApps.getStagingAreaDir(conf, userName).toString());
    stagingDir.mkdirs();
    // Set login-user to null as that is how real world MRApp starts with.
    // This is null is the reason why token-file is read by UGI.
    UserGroupInformation.setLoginUser(null);
    MRAppMasterTest appMaster = new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis(), false, true);
    MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
    // Now validate the task credentials
    Credentials appMasterCreds = appMaster.getCredentials();
    Assert.assertNotNull(appMasterCreds);
    Assert.assertEquals(1, appMasterCreds.numberOfSecretKeys());
    Assert.assertEquals(1, appMasterCreds.numberOfTokens());
    // Validate the tokens - app token should not be present
    Token<? extends TokenIdentifier> usedToken = appMasterCreds.getToken(tokenAlias);
    Assert.assertNotNull(usedToken);
    Assert.assertEquals(storedToken, usedToken);
    // Validate the keys
    byte[] usedKey = appMasterCreds.getSecretKey(keyAlias);
    Assert.assertNotNull(usedKey);
    Assert.assertEquals("mySecretKey", new String(usedKey));
    // The credentials should also be added to conf so that OuputCommitter can
    // access it - app token should not be present
    Credentials confCredentials = conf.getCredentials();
    Assert.assertEquals(1, confCredentials.numberOfSecretKeys());
    Assert.assertEquals(1, confCredentials.numberOfTokens());
    Assert.assertEquals(storedToken, confCredentials.getToken(tokenAlias));
    Assert.assertEquals("mySecretKey", new String(confCredentials.getSecretKey(keyAlias)));
    // Verify the AM's ugi - app token should be present
    Credentials ugiCredentials = appMaster.getUgi().getCredentials();
    Assert.assertEquals(1, ugiCredentials.numberOfSecretKeys());
    Assert.assertEquals(2, ugiCredentials.numberOfTokens());
    Assert.assertEquals(storedToken, ugiCredentials.getToken(tokenAlias));
    Assert.assertEquals(appToken, ugiCredentials.getToken(appTokenService));
    Assert.assertEquals("mySecretKey", new String(ugiCredentials.getSecretKey(keyAlias)));
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) Token(org.apache.hadoop.security.token.Token) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Logger(org.apache.log4j.Logger) AMRMTokenIdentifier(org.apache.hadoop.yarn.security.AMRMTokenIdentifier) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JobConf(org.apache.hadoop.mapred.JobConf) File(java.io.File) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 14 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestYarnClient method testApplicationType.

@Test(timeout = 30000)
public void testApplicationType() throws Exception {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.DEBUG);
    MockRM rm = new MockRM();
    rm.start();
    RMApp app = rm.submitApp(2000);
    RMApp app1 = rm.submitApp(200, "name", "user", new HashMap<ApplicationAccessType, String>(), false, "default", -1, null, "MAPREDUCE");
    Assert.assertEquals("YARN", app.getApplicationType());
    Assert.assertEquals("MAPREDUCE", app1.getApplicationType());
    rm.stop();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) Logger(org.apache.log4j.Logger) Test(org.junit.Test)

Example 15 with Logger

use of org.apache.log4j.Logger in project hadoop by apache.

the class TestJobMonitorAndPrint method testJobMonitorAndPrint.

@Test
public void testJobMonitorAndPrint() throws Exception {
    JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true);
    JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true);
    doAnswer(new Answer<TaskCompletionEvent[]>() {

        @Override
        public TaskCompletionEvent[] answer(InvocationOnMock invocation) throws Throwable {
            return new TaskCompletionEvent[0];
        }
    }).when(job).getTaskCompletionEvents(anyInt(), anyInt());
    doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
    when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
    // setup the logger to capture all logs
    Layout layout = Logger.getRootLogger().getAppender("stdout").getLayout();
    ByteArrayOutputStream os = new ByteArrayOutputStream();
    WriterAppender appender = new WriterAppender(layout, os);
    appender.setThreshold(Level.ALL);
    Logger qlogger = Logger.getLogger(Job.class);
    qlogger.addAppender(appender);
    job.monitorAndPrintJob();
    qlogger.removeAppender(appender);
    LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
    String line;
    boolean foundHundred = false;
    boolean foundComplete = false;
    boolean foundUber = false;
    String uberModeMatch = "uber mode : true";
    String progressMatch = "map 100% reduce 100%";
    String completionMatch = "completed successfully";
    while ((line = r.readLine()) != null) {
        if (line.contains(uberModeMatch)) {
            foundUber = true;
        }
        foundHundred = line.contains(progressMatch);
        if (foundHundred)
            break;
    }
    line = r.readLine();
    foundComplete = line.contains(completionMatch);
    assertTrue(foundUber);
    assertTrue(foundHundred);
    assertTrue(foundComplete);
    System.out.println("The output of job.toString() is : \n" + job.toString());
    assertTrue(job.toString().contains("Number of maps: 5\n"));
    assertTrue(job.toString().contains("Number of reduces: 5\n"));
}
Also used : ByteArrayOutputStream(java.io.ByteArrayOutputStream) WriterAppender(org.apache.log4j.WriterAppender) Logger(org.apache.log4j.Logger) LineNumberReader(java.io.LineNumberReader) Layout(org.apache.log4j.Layout) InvocationOnMock(org.mockito.invocation.InvocationOnMock) StringReader(java.io.StringReader) Test(org.junit.Test)

Aggregations

Logger (org.apache.log4j.Logger)391 Test (org.testng.annotations.Test)100 ArrayList (java.util.ArrayList)86 Test (org.junit.Test)53 InetSocketAddress (java.net.InetSocketAddress)37 ConditionCheck (com.linkedin.databus2.test.ConditionCheck)36 HashMap (java.util.HashMap)35 Checkpoint (com.linkedin.databus.core.Checkpoint)34 IOException (java.io.IOException)32 File (java.io.File)30 RegisterResponseEntry (com.linkedin.databus2.core.container.request.RegisterResponseEntry)24 List (java.util.List)22 IdNamePair (com.linkedin.databus.core.util.IdNamePair)21 DBUtil (ngse.org.DBUtil)20 Channel (org.jboss.netty.channel.Channel)19 DbusEventBuffer (com.linkedin.databus.core.DbusEventBuffer)18 SQLException (java.sql.SQLException)18 Vector (java.util.Vector)18 DbusEventGenerator (com.linkedin.databus.core.test.DbusEventGenerator)17 OutputStream (java.io.OutputStream)17