use of java.util.concurrent.atomic.AtomicReference in project hadoop by apache.
the class TestInstrumentedLock method testTryWithResourceSyntax.
/**
* Test the correctness with try-with-resource syntax.
* @throws Exception
*/
@Test(timeout = 10000)
public void testTryWithResourceSyntax() throws Exception {
String testname = name.getMethodName();
final AtomicReference<Thread> lockThread = new AtomicReference<>(null);
Lock lock = new InstrumentedLock(testname, LOG, 0, 300) {
@Override
public void lock() {
super.lock();
lockThread.set(Thread.currentThread());
}
@Override
public void unlock() {
super.unlock();
lockThread.set(null);
}
};
AutoCloseableLock acl = new AutoCloseableLock(lock);
try (AutoCloseable localLock = acl.acquire()) {
assertEquals(acl, localLock);
Thread competingThread = new Thread() {
@Override
public void run() {
assertNotEquals(Thread.currentThread(), lockThread.get());
assertFalse(lock.tryLock());
}
};
competingThread.start();
competingThread.join();
assertEquals(Thread.currentThread(), lockThread.get());
}
assertNull(lockThread.get());
}
use of java.util.concurrent.atomic.AtomicReference in project hadoop by apache.
the class TestFileAppend4 method testRecoverFinalizedBlock.
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, and then tries to recover
* the lease from another thread.
*/
@Test(timeout = 60000)
public void testRecoverFinalizedBlock() throws Throwable {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
// Delay completeFile
GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(anyString(), anyString(), (ExtendedBlock) anyObject(), anyLong());
DFSClient client = new DFSClient(null, spyNN, conf, null);
file1 = new Path("/testRecoverFinalized");
final OutputStream stm = client.create("/testRecoverFinalized", true);
// write 1/2 block
AppendTestUtil.write(stm, 0, 4096);
final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
Thread t = new Thread() {
@Override
public void run() {
try {
stm.close();
} catch (Throwable t) {
err.set(t);
}
}
};
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
// At this point, the block is finalized on the DNs, but the file
// has not been completed in the NN.
// Lose the leases
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1 = cluster.getFileSystem();
FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Telling close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
// We expect that close will get a "File is not open" error.
Throwable thrownByClose = err.get();
assertNotNull(thrownByClose);
assertTrue(thrownByClose instanceof LeaseExpiredException);
GenericTestUtils.assertExceptionContains("File is not open for writing", thrownByClose);
} finally {
cluster.shutdown();
}
}
use of java.util.concurrent.atomic.AtomicReference in project hadoop by apache.
the class TestBlockListAsLongs method testDatanodeDetect.
@Test
public void testDatanodeDetect() throws ServiceException, IOException {
final AtomicReference<BlockReportRequestProto> request = new AtomicReference<>();
// just capture the outgoing PB
DatanodeProtocolPB mockProxy = mock(DatanodeProtocolPB.class);
doAnswer(new Answer<BlockReportResponseProto>() {
public BlockReportResponseProto answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
request.set((BlockReportRequestProto) args[1]);
return BlockReportResponseProto.newBuilder().build();
}
}).when(mockProxy).blockReport(any(RpcController.class), any(BlockReportRequestProto.class));
@SuppressWarnings("resource") DatanodeProtocolClientSideTranslatorPB nn = new DatanodeProtocolClientSideTranslatorPB(mockProxy);
DatanodeRegistration reg = DFSTestUtil.getLocalDatanodeRegistration();
NamespaceInfo nsInfo = new NamespaceInfo(1, "cluster", "bp", 1);
reg.setNamespaceInfo(nsInfo);
Replica r = new FinalizedReplica(new Block(1, 2, 3), null, null);
BlockListAsLongs bbl = BlockListAsLongs.encode(Collections.singleton(r));
DatanodeStorage storage = new DatanodeStorage("s1");
StorageBlockReport[] sbr = { new StorageBlockReport(storage, bbl) };
// check DN sends new-style BR
request.set(null);
nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
nn.blockReport(reg, "pool", sbr, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
BlockReportRequestProto proto = request.get();
assertNotNull(proto);
assertTrue(proto.getReports(0).getBlocksList().isEmpty());
assertFalse(proto.getReports(0).getBlocksBuffersList().isEmpty());
// back up to prior version and check DN sends old-style BR
request.set(null);
nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
BlockListAsLongs blockList = getBlockList(r);
StorageBlockReport[] obp = new StorageBlockReport[] { new StorageBlockReport(new DatanodeStorage("s1"), blockList) };
nn.blockReport(reg, "pool", obp, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
proto = request.get();
assertNotNull(proto);
assertFalse(proto.getReports(0).getBlocksList().isEmpty());
assertTrue(proto.getReports(0).getBlocksBuffersList().isEmpty());
}
use of java.util.concurrent.atomic.AtomicReference in project hadoop by apache.
the class TestNativeIO method testMultiThreadedFstat.
/**
* Test for races in fstat usage
*
* NOTE: this test is likely to fail on RHEL 6.0 which has a non-threadsafe
* implementation of getpwuid_r.
*/
@Test(timeout = 30000)
public void testMultiThreadedFstat() throws Exception {
assumeNotWindows();
final FileOutputStream fos = new FileOutputStream(new File(TEST_DIR, "testfstat"));
final AtomicReference<Throwable> thrown = new AtomicReference<Throwable>();
List<Thread> statters = new ArrayList<Thread>();
for (int i = 0; i < 10; i++) {
Thread statter = new Thread() {
@Override
public void run() {
long et = Time.now() + 5000;
while (Time.now() < et) {
try {
NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD());
assertEquals(System.getProperty("user.name"), stat.getOwner());
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file", S_IFREG, stat.getMode() & S_IFMT);
} catch (Throwable t) {
thrown.set(t);
}
}
}
};
statters.add(statter);
statter.start();
}
for (Thread t : statters) {
t.join();
}
fos.close();
if (thrown.get() != null) {
throw new RuntimeException(thrown.get());
}
}
use of java.util.concurrent.atomic.AtomicReference in project hadoop by apache.
the class TestBlockReportRateLimiting method testLeaseExpiration.
/**
* Start a 2-node cluster with only one block report lease. When the
* first datanode gets a lease, kill it. Then wait for the lease to
* expire, and the second datanode to send a full block report.
*/
@Test(timeout = 180000)
public void testLeaseExpiration() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES, 1);
conf.setLong(DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS, 100L);
final Semaphore gotFbrSem = new Semaphore(0);
final AtomicReference<String> failure = new AtomicReference<>();
final AtomicReference<MiniDFSCluster> cluster = new AtomicReference<>();
final AtomicReference<String> datanodeToStop = new AtomicReference<>();
final BlockManagerFaultInjector injector = new BlockManagerFaultInjector() {
@Override
public void incomingBlockReportRpc(DatanodeID nodeID, BlockReportContext context) throws IOException {
if (context.getLeaseId() == 0) {
setFailure(failure, "Got unexpected rate-limiting-" + "bypassing full block report RPC from " + nodeID);
}
if (nodeID.getXferAddr().equals(datanodeToStop.get())) {
throw new IOException("Injecting failure into block " + "report RPC for " + nodeID);
}
gotFbrSem.release();
}
@Override
public void requestBlockReportLease(DatanodeDescriptor node, long leaseId) {
if (leaseId == 0) {
return;
}
datanodeToStop.compareAndSet(null, node.getXferAddr());
}
@Override
public void removeBlockReportLease(DatanodeDescriptor node, long leaseId) {
}
};
try {
BlockManagerFaultInjector.instance = injector;
cluster.set(new MiniDFSCluster.Builder(conf).numDataNodes(2).build());
cluster.get().waitActive();
Assert.assertNotNull(cluster.get().stopDataNode(datanodeToStop.get()));
gotFbrSem.acquire();
Assert.assertNull(failure.get());
} finally {
if (cluster.get() != null) {
cluster.get().shutdown();
}
}
}
Aggregations