use of org.apache.hadoop.test.GenericTestUtils.LogCapturer in project hadoop by apache.
the class TestDiskspaceQuotaUpdate method testQuotaIssuesBeforeCommitting.
/**
* Test that the cached quota remains correct when the block has been
* written to but not yet committed, even if the replication factor
* is updated during this time.
*/
private void testQuotaIssuesBeforeCommitting(short initialReplication, short finalReplication) throws Exception {
final String logStmt = "BUG: Inconsistent storagespace for directory";
final Path dir = new Path(getParent(GenericTestUtils.getMethodName()), String.format("%d-%d", initialReplication, finalReplication));
final Path file = new Path(dir, "testfile");
LogCapturer logs = LogCapturer.captureLogs(NameNode.LOG);
getDFS().mkdirs(dir);
getDFS().setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
FSDataOutputStream out = TestFileCreation.createFile(getDFS(), file, initialReplication);
TestFileCreation.writeFile(out, BLOCKSIZE / 2);
out.hflush();
getDFS().getContentSummary(dir);
if (finalReplication != initialReplication) {
// While the block is visible to the NN but has not yet been committed,
// change the replication
getDFS().setReplication(file, finalReplication);
}
out.close();
getDFS().getContentSummary(dir);
assertFalse(logs.getOutput().contains(logStmt));
}
use of org.apache.hadoop.test.GenericTestUtils.LogCapturer in project hadoop by apache.
the class TestDiskspaceQuotaUpdate method testQuotaIssuesWhileCommittingHelper.
private void testQuotaIssuesWhileCommittingHelper(DatanodeProtocolClientSideTranslatorPB nnSpy, final short initialReplication, final short finalReplication) throws Exception {
final String logStmt = "BUG: Inconsistent storagespace for directory";
final Path dir = new Path(getParent(GenericTestUtils.getMethodName()), String.format("%d-%d", initialReplication, finalReplication));
final Path file = new Path(dir, "testfile");
LogCapturer logs = LogCapturer.captureLogs(NameNode.LOG);
Mockito.doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
if (finalReplication != initialReplication) {
getDFS().setReplication(file, finalReplication);
}
// Call getContentSummary before the DN can notify the NN
// that the block has been received to check for discrepancy
getDFS().getContentSummary(dir);
invocation.callRealMethod();
return null;
}
}).when(nnSpy).blockReceivedAndDeleted(Mockito.<DatanodeRegistration>anyObject(), Mockito.anyString(), Mockito.<StorageReceivedDeletedBlocks[]>anyObject());
getDFS().mkdirs(dir);
getDFS().setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
DFSTestUtil.createFile(getDFS(), file, BLOCKSIZE / 2, initialReplication, 1L);
// Also check for discrepancy after completing the file
getDFS().getContentSummary(dir);
assertFalse(logs.getOutput().contains(logStmt));
}
use of org.apache.hadoop.test.GenericTestUtils.LogCapturer in project hadoop by apache.
the class TestEncryptedTransfer method testEncryptedRead.
private void testEncryptedRead(String algorithm, String cipherSuite, boolean matchLog, boolean readAfterRestart) throws IOException {
// set encryption algorithm and cipher suites, but don't enable transfer
// encryption yet.
conf.set(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY, algorithm);
conf.set(HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuite);
FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster();
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
if (resolverClazz == null) {
if (matchLog) {
// Test client and server negotiate cipher option
GenericTestUtils.assertMatches(logs.getOutput(), "Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertMatches(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
} else {
// Test client and server negotiate cipher option
GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
}
if (readAfterRestart) {
cluster.restartNameNode();
fs = getFileSystem(conf);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
}
}
use of org.apache.hadoop.test.GenericTestUtils.LogCapturer in project hadoop by apache.
the class TestFSNamesystemLock method testFSWriteLockLongHoldingReport.
/**
* Test when FSNamesystem write lock is held for a long time,
* logger will report it.
*/
@Test(timeout = 45000)
public void testFSWriteLockLongHoldingReport() throws Exception {
final long writeLockReportingThreshold = 100L;
final long writeLockSuppressWarningInterval = 10000L;
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY, writeLockReportingThreshold);
conf.setTimeDuration(DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY, writeLockSuppressWarningInterval, TimeUnit.MILLISECONDS);
final FakeTimer timer = new FakeTimer();
final FSNamesystemLock fsnLock = new FSNamesystemLock(conf, null, timer);
timer.advance(writeLockSuppressWarningInterval);
LogCapturer logs = LogCapturer.captureLogs(FSNamesystem.LOG);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.INFO);
// Don't report if the write lock is held for a short time
fsnLock.writeLock();
fsnLock.writeUnlock();
assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
// Report if the write lock is held for a long time
fsnLock.writeLock();
timer.advance(writeLockReportingThreshold + 10);
logs.clearOutput();
fsnLock.writeUnlock();
assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName()));
// Track but do not report if the write lock is held (interruptibly) for
// a long time but time since last report does not exceed the suppress
// warning interval
fsnLock.writeLockInterruptibly();
timer.advance(writeLockReportingThreshold + 10);
logs.clearOutput();
fsnLock.writeUnlock();
assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
// Track but do not report if it's held for a long time when re-entering
// write lock but time since last report does not exceed the suppress
// warning interval
fsnLock.writeLock();
timer.advance(writeLockReportingThreshold / 2 + 1);
fsnLock.writeLockInterruptibly();
timer.advance(writeLockReportingThreshold / 2 + 1);
fsnLock.writeLock();
timer.advance(writeLockReportingThreshold / 2);
logs.clearOutput();
fsnLock.writeUnlock();
assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
logs.clearOutput();
fsnLock.writeUnlock();
assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
logs.clearOutput();
fsnLock.writeUnlock();
assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
// Report if it's held for a long time and time since last report exceeds
// the supress warning interval
timer.advance(writeLockSuppressWarningInterval);
fsnLock.writeLock();
timer.advance(writeLockReportingThreshold + 100);
logs.clearOutput();
fsnLock.writeUnlock();
assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName()));
assertTrue(logs.getOutput().contains("Number of suppressed write-lock reports: 2"));
}
use of org.apache.hadoop.test.GenericTestUtils.LogCapturer in project hadoop by apache.
the class TestBootstrapStandby method testSharedEditsMissingLogs.
/**
* Test for the case where the shared edits dir doesn't have
* all of the recent edit logs.
*/
@Test
public void testSharedEditsMissingLogs() throws Exception {
removeStandbyNameDirs();
CheckpointSignature sig = nn0.getRpcServer().rollEditLog();
assertEquals(3, sig.getCurSegmentTxId());
// Should have created edits_1-2 in shared edits dir
URI editsUri = cluster.getSharedEditsDir(0, maxNNCount - 1);
File editsDir = new File(editsUri);
File currentDir = new File(editsDir, "current");
File editsSegment = new File(currentDir, NNStorage.getFinalizedEditsFileName(1, 2));
GenericTestUtils.assertExists(editsSegment);
GenericTestUtils.assertExists(currentDir);
// Delete the segment.
assertTrue(editsSegment.delete());
// Trying to bootstrap standby should now fail since the edit
// logs aren't available in the shared dir.
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(BootstrapStandby.class));
try {
assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, forceBootstrap(1));
} finally {
logs.stopCapturing();
}
GenericTestUtils.assertMatches(logs.getOutput(), "FATAL.*Unable to read transaction ids 1-3 from the configured shared");
}
Aggregations