Search in sources :

Example 1 with LogValue

use of org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue in project hadoop by apache.

the class TestAggregatedLogFormat method testContainerLogsFileAccess.

@Test(timeout = 10000)
public void testContainerLogsFileAccess() throws IOException {
    // This test will run only if NativeIO is enabled as SecureIOUtils 
    // require it to be enabled.
    Assume.assumeTrue(NativeIO.isAvailable());
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    File workDir = new File(testWorkDir, "testContainerLogsFileAccess1");
    Path remoteAppLogFile = new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
    Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
    String data = "Log File content for container : ";
    // Creating files for container1. Log aggregator will try to read log files
    // with illegal user.
    ApplicationId applicationId = ApplicationId.newInstance(1, 1);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 1);
    ContainerId testContainerId1 = ContainerId.newContainerId(applicationAttemptId, 1);
    Path appDir = new Path(srcFileRoot, testContainerId1.getApplicationAttemptId().getApplicationId().toString());
    Path srcFilePath1 = new Path(appDir, testContainerId1.toString());
    String stdout = "stdout";
    String stderr = "stderr";
    writeSrcFile(srcFilePath1, stdout, data + testContainerId1.toString() + stdout);
    writeSrcFile(srcFilePath1, stderr, data + testContainerId1.toString() + stderr);
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
    LogKey logKey = new LogKey(testContainerId1);
    String randomUser = "randomUser";
    LogValue logValue = spy(new LogValue(Collections.singletonList(srcFileRoot.toString()), testContainerId1, randomUser));
    // It is trying simulate a situation where first log file is owned by
    // different user (probably symlink) and second one by the user itself.
    // The first file should not be aggregated. Because this log file has the invalid
    // user name.
    when(logValue.getUser()).thenReturn(randomUser).thenReturn(ugi.getShortUserName());
    logWriter.append(logKey, logValue);
    logWriter.close();
    BufferedReader in = new BufferedReader(new FileReader(new File(remoteAppLogFile.toUri().getRawPath())));
    String line;
    StringBuffer sb = new StringBuffer("");
    while ((line = in.readLine()) != null) {
        LOG.info(line);
        sb.append(line);
    }
    line = sb.toString();
    String expectedOwner = ugi.getShortUserName();
    if (Path.WINDOWS) {
        final String adminsGroupString = "Administrators";
        if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
            expectedOwner = adminsGroupString;
        }
    }
    // This file: stderr should not be aggregated.
    // And we will not aggregate the log message.
    String stdoutFile1 = StringUtils.join(File.separator, Arrays.asList(new String[] { workDir.getAbsolutePath(), "srcFiles", testContainerId1.getApplicationAttemptId().getApplicationId().toString(), testContainerId1.toString(), stderr }));
    // The file: stdout is expected to be aggregated.
    String stdoutFile2 = StringUtils.join(File.separator, Arrays.asList(new String[] { workDir.getAbsolutePath(), "srcFiles", testContainerId1.getApplicationAttemptId().getApplicationId().toString(), testContainerId1.toString(), stdout }));
    String message2 = "Owner '" + expectedOwner + "' for path " + stdoutFile2 + " did not match expected owner '" + ugi.getShortUserName() + "'";
    Assert.assertFalse(line.contains(message2));
    Assert.assertFalse(line.contains(data + testContainerId1.toString() + stderr));
    Assert.assertTrue(line.contains(data + testContainerId1.toString() + stdout));
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) LogKey(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LogValue(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue) TestContainerId(org.apache.hadoop.yarn.api.TestContainerId) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) LogWriter(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter) BufferedReader(java.io.BufferedReader) FileReader(java.io.FileReader) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 2 with LogValue

use of org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue in project hadoop by apache.

the class TestAggregatedLogFormat method writeSrcFileAndALog.

private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length, Path remoteAppLogFile, Path srcFileRoot, ContainerId testContainerId) throws Exception {
    File dir = new File(srcFilePath.toString());
    if (!dir.exists()) {
        if (!dir.mkdirs()) {
            throw new IOException("Unable to create directory : " + dir);
        }
    }
    File outputFile = new File(new File(srcFilePath.toString()), fileName);
    FileOutputStream os = new FileOutputStream(outputFile);
    final OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
    final int ch = filler;
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    LogWriter logWriter = new LogWriter(new Configuration(), remoteAppLogFile, ugi);
    LogKey logKey = new LogKey(testContainerId);
    LogValue logValue = spy(new LogValue(Collections.singletonList(srcFileRoot.toString()), testContainerId, ugi.getShortUserName()));
    final CountDownLatch latch = new CountDownLatch(1);
    Thread t = new Thread() {

        public void run() {
            try {
                for (int i = 0; i < length / 3; i++) {
                    osw.write(ch);
                }
                latch.countDown();
                for (int i = 0; i < (2 * length) / 3; i++) {
                    osw.write(ch);
                }
                osw.close();
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
    };
    t.start();
    //Wait till the osw is partially written
    //aggregation starts once the ows has completed 1/3rd of its work
    latch.await();
    //Aggregate The Logs
    logWriter.append(logKey, logValue);
    logWriter.close();
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) LogValue(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue) LogWriter(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter) FileOutputStream(java.io.FileOutputStream) LogKey(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey) OutputStreamWriter(java.io.OutputStreamWriter) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) File(java.io.File) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 3 with LogValue

use of org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue in project hadoop by apache.

the class TestAggregatedLogFormat method testReadAcontainerLog.

private void testReadAcontainerLog(boolean logUploadedTime) throws Exception {
    Configuration conf = new Configuration();
    File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
    Path remoteAppLogFile = new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
    Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
    ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
    Path t = new Path(srcFileRoot, testContainerId.getApplicationAttemptId().getApplicationId().toString());
    Path srcFilePath = new Path(t, testContainerId.toString());
    int numChars = 80000;
    // create a sub-folder under srcFilePath
    // and create file logs in this sub-folder.
    // We only aggregate top level files.
    // So, this log file should be ignored.
    Path subDir = new Path(srcFilePath, "subDir");
    fs.mkdirs(subDir);
    writeSrcFile(subDir, "logs", numChars);
    // create file stderr and stdout in containerLogDir
    writeSrcFile(srcFilePath, "stderr", numChars);
    writeSrcFile(srcFilePath, "stdout", numChars);
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
    LogKey logKey = new LogKey(testContainerId);
    LogValue logValue = new LogValue(Collections.singletonList(srcFileRoot.toString()), testContainerId, ugi.getShortUserName());
    // When we try to open FileInputStream for stderr, it will throw out an IOException.
    // Skip the log aggregation for stderr.
    LogValue spyLogValue = spy(logValue);
    File errorFile = new File((new Path(srcFilePath, "stderr")).toString());
    doThrow(new IOException("Mock can not open FileInputStream")).when(spyLogValue).secureOpenFile(errorFile);
    logWriter.append(logKey, spyLogValue);
    logWriter.close();
    // make sure permission are correct on the file
    FileStatus fsStatus = fs.getFileStatus(remoteAppLogFile);
    Assert.assertEquals("permissions on log aggregation file are wrong", FsPermission.createImmutable((short) 0640), fsStatus.getPermission());
    LogReader logReader = new LogReader(conf, remoteAppLogFile);
    LogKey rLogKey = new LogKey();
    DataInputStream dis = logReader.next(rLogKey);
    Writer writer = new StringWriter();
    if (logUploadedTime) {
        LogReader.readAcontainerLogs(dis, writer, System.currentTimeMillis());
    } else {
        LogReader.readAcontainerLogs(dis, writer);
    }
    // We should only do the log aggregation for stdout.
    // Since we could not open the fileInputStream for stderr, this file is not
    // aggregated.
    String s = writer.toString();
    int expectedLength = "LogType:stdout".length() + (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System.currentTimeMillis())).length() : 0) + ("\nLogLength:" + numChars).length() + "\nLog Contents:\n".length() + numChars + "\n".length() + "\nEnd of LogType:stdout\n".length();
    Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
    Assert.assertTrue("log file:stderr should not be aggregated.", !s.contains("LogType:stderr"));
    Assert.assertTrue("log file:logs should not be aggregated.", !s.contains("LogType:logs"));
    Assert.assertTrue("LogLength not matched", s.contains("LogLength:" + numChars));
    Assert.assertTrue("Log Contents not matched", s.contains("Log Contents"));
    StringBuilder sb = new StringBuilder();
    for (int i = 0; i < numChars; i++) {
        sb.append(filler);
    }
    String expectedContent = sb.toString();
    Assert.assertTrue("Log content incorrect", s.contains(expectedContent));
    Assert.assertEquals(expectedLength, s.length());
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) LogKey(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) LogValue(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue) StringWriter(java.io.StringWriter) TestContainerId(org.apache.hadoop.yarn.api.TestContainerId) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) LogWriter(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter) LogReader(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader) File(java.io.File) LogWriter(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter) OutputStreamWriter(java.io.OutputStreamWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 4 with LogValue

use of org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue in project hadoop by apache.

the class TestAppLogAggregatorImpl method verifyLogAggregationWithExpectedFiles2DeleteAndUpload.

/**
   * Verify if the application log aggregator, configured with given log
   * retention period and the recovered log initialization time of
   * the application, uploads and deletes the set of log files as expected.
   * @param appId    application id
   * @param containerId  container id
   * @param logRetentionSecs log retention period
   * @param recoveredLogInitedTimeMillis recovered log initialization time
   * @param expectedFilesToDelete   the set of files expected to be deleted
   * @param expectedFilesToUpload  the set of files expected to be uploaded.
   */
public void verifyLogAggregationWithExpectedFiles2DeleteAndUpload(ApplicationId appId, ContainerId containerId, long logRetentionSecs, long recoveredLogInitedTimeMillis, Set<File> expectedFilesToDelete, Set<File> expectedFilesToUpload) throws IOException {
    final Set<String> filesExpected2Delete = new HashSet<>();
    for (File file : expectedFilesToDelete) {
        filesExpected2Delete.add(file.getAbsolutePath());
    }
    final Set<String> filesExpected2Upload = new HashSet<>();
    for (File file : expectedFilesToUpload) {
        filesExpected2Upload.add(file.getAbsolutePath());
    }
    // deletion service with verification to check files to delete
    DeletionService deletionServiceWithExpectedFiles = createDeletionServiceWithExpectedFile2Delete(filesExpected2Delete);
    final YarnConfiguration config = new YarnConfiguration();
    config.setLong(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, logRetentionSecs);
    final AppLogAggregatorInTest appLogAggregator = createAppLogAggregator(appId, LOCAL_LOG_DIR.getAbsolutePath(), config, recoveredLogInitedTimeMillis, deletionServiceWithExpectedFiles);
    appLogAggregator.startContainerLogAggregation(new ContainerLogContext(containerId, ContainerType.TASK, 0));
    // set app finished flag first
    appLogAggregator.finishLogAggregation();
    appLogAggregator.run();
    // verify uploaded files
    ArgumentCaptor<LogValue> logValCaptor = ArgumentCaptor.forClass(LogValue.class);
    verify(appLogAggregator.logWriter).append(any(LogKey.class), logValCaptor.capture());
    Set<String> filesUploaded = new HashSet<>();
    LogValue logValue = logValCaptor.getValue();
    for (File file : logValue.getPendingLogFilesToUploadForThisContainer()) {
        filesUploaded.add(file.getAbsolutePath());
    }
    verifyFilesUploaded(filesUploaded, filesExpected2Upload);
}
Also used : LogValue(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) LogKey(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) ContainerLogContext(org.apache.hadoop.yarn.server.api.ContainerLogContext) File(java.io.File) HashSet(java.util.HashSet)

Aggregations

File (java.io.File)4 LogKey (org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey)4 LogValue (org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue)4 Configuration (org.apache.hadoop.conf.Configuration)3 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)3 LogWriter (org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter)3 IOException (java.io.IOException)2 OutputStreamWriter (java.io.OutputStreamWriter)2 Path (org.apache.hadoop.fs.Path)2 TestContainerId (org.apache.hadoop.yarn.api.TestContainerId)2 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)2 BufferedReader (java.io.BufferedReader)1 DataInputStream (java.io.DataInputStream)1 FileOutputStream (java.io.FileOutputStream)1 FileReader (java.io.FileReader)1 StringWriter (java.io.StringWriter)1 Writer (java.io.Writer)1 HashSet (java.util.HashSet)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 FileStatus (org.apache.hadoop.fs.FileStatus)1