use of org.apache.hadoop.test.GenericTestUtils.LogCapturer in project hadoop by apache.
the class TestNativeAzureFileSystemClientLogging method testLoggingEnabled.
@Test
public void testLoggingEnabled() throws Exception {
LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger.getRootLogger()));
// Update configuration based on the Test.
updateFileSystemConfiguration(true);
performWASBOperations();
assertTrue(verifyStorageClientLogs(logs.getOutput(), TEMP_DIR));
}
use of org.apache.hadoop.test.GenericTestUtils.LogCapturer in project hadoop by apache.
the class TestEncryptedTransfer method testEncryptedWrite.
private void testEncryptedWrite(int numDns) throws IOException {
setEncryptionConfigKeys();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDns).build();
fs = getFileSystem(conf);
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataTransferSaslUtil.class));
try {
writeTestDataToFile(fs);
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
}
use of org.apache.hadoop.test.GenericTestUtils.LogCapturer in project hadoop by apache.
the class TestEncryptedTransfer method testClientThatDoesNotSupportEncryption.
@Test
public void testClientThatDoesNotSupportEncryption() throws IOException {
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
writeUnencryptedAndThenRestartEncryptedCluster();
DFSClient client = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
DFSClient spyClient = Mockito.spy(client);
Mockito.doReturn(false).when(spyClient).shouldEncryptData();
DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataNode.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) {
fail("Should not have been able to read without encryption enabled.");
}
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Could not obtain block:", ioe);
} finally {
logs.stopCapturing();
}
if (resolverClazz == null) {
GenericTestUtils.assertMatches(logs.getOutput(), "Failed to read expected encryption handshake from client at");
}
}
use of org.apache.hadoop.test.GenericTestUtils.LogCapturer in project hadoop by apache.
the class TestDataStream method testDfsClient.
@Test(timeout = 60000)
public void testDfsClient() throws IOException, InterruptedException {
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataStreamer.class));
byte[] toWrite = new byte[PACKET_SIZE];
new Random(1).nextBytes(toWrite);
final Path path = new Path("/file1");
final DistributedFileSystem dfs = cluster.getFileSystem();
FSDataOutputStream out = null;
out = dfs.create(path, false);
out.write(toWrite);
out.write(toWrite);
out.hflush();
//Wait to cross slow IO warning threshold
Thread.sleep(15 * 1000);
out.write(toWrite);
out.write(toWrite);
out.hflush();
//Wait for capturing logs in busy cluster
Thread.sleep(5 * 1000);
out.close();
logs.stopCapturing();
GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Slow ReadProcessor read fields for block");
}
use of org.apache.hadoop.test.GenericTestUtils.LogCapturer in project hadoop by apache.
the class TestSaslDataTransfer method testServerSaslNoClientSasl.
@Test
public void testServerSaslNoClientSasl() throws Exception {
HdfsConfiguration clusterConf = createSecureConfig("authentication,integrity,privacy");
// Set short retry timeouts so this test runs faster
clusterConf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
startCluster(clusterConf);
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataNode.class));
try {
doTest(clientConf);
Assert.fail("Should fail if SASL data transfer protection is not " + "configured or not supported in client");
} catch (IOException e) {
GenericTestUtils.assertMatches(e.getMessage(), "could only be replicated to 0 nodes");
} finally {
logs.stopCapturing();
}
GenericTestUtils.assertMatches(logs.getOutput(), "Failed to read expected SASL data transfer protection " + "handshake from client at");
}
Aggregations