use of java.util.concurrent.ExecutionException in project hadoop by apache.
the class TestJournalNode method testAcceptRecoveryBehavior.
/**
* Test that the JournalNode performs correctly as a Paxos
* <em>Acceptor</em> process.
*/
@Test(timeout = 100000)
public void testAcceptRecoveryBehavior() throws Exception {
// different proposals for the same decision.
try {
ch.prepareRecovery(1L).get();
fail("Did not throw IllegalState when trying to run paxos without an epoch");
} catch (ExecutionException ise) {
GenericTestUtils.assertExceptionContains("bad epoch", ise);
}
ch.newEpoch(1).get();
ch.setEpoch(1);
// prepare() with no previously accepted value and no logs present
PrepareRecoveryResponseProto prep = ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertFalse(prep.hasSegmentState());
// Make a log segment, and prepare again -- this time should see the
// segment existing.
ch.startLogSegment(1L, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L, 1L, 1, QJMTestUtil.createTxnData(1, 1)).get();
prep = ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertTrue(prep.hasSegmentState());
// accept() should save the accepted value in persistent storage
ch.acceptRecovery(prep.getSegmentState(), new URL("file:///dev/null")).get();
// So another prepare() call from a new epoch would return this value
ch.newEpoch(2);
ch.setEpoch(2);
prep = ch.prepareRecovery(1L).get();
assertEquals(1L, prep.getAcceptedInEpoch());
assertEquals(1L, prep.getSegmentState().getEndTxId());
// A prepare() or accept() call from an earlier epoch should now be rejected
ch.setEpoch(1);
try {
ch.prepareRecovery(1L).get();
fail("prepare from earlier epoch not rejected");
} catch (ExecutionException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2", ioe);
}
try {
ch.acceptRecovery(prep.getSegmentState(), new URL("file:///dev/null")).get();
fail("accept from earlier epoch not rejected");
} catch (ExecutionException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2", ioe);
}
}
use of java.util.concurrent.ExecutionException in project hadoop by apache.
the class TestIPCLoggerChannel method testStopSendingEditsWhenOutOfSync.
/**
* Test that, if the remote node gets unsynchronized (eg some edits were
* missed or the node rebooted), the client stops sending edits until
* the next roll. Test for HDFS-3726.
*/
@Test
public void testStopSendingEditsWhenOutOfSync() throws Exception {
Mockito.doThrow(new IOException("injected error")).when(mockProxy).journal(Mockito.<RequestInfo>any(), Mockito.eq(1L), Mockito.eq(1L), Mockito.eq(1), Mockito.same(FAKE_DATA));
try {
ch.sendEdits(1L, 1L, 1, FAKE_DATA).get();
fail("Injected JOOSE did not cause sendEdits() to throw");
} catch (ExecutionException ee) {
GenericTestUtils.assertExceptionContains("injected", ee);
}
Mockito.verify(mockProxy).journal(Mockito.<RequestInfo>any(), Mockito.eq(1L), Mockito.eq(1L), Mockito.eq(1), Mockito.same(FAKE_DATA));
assertTrue(ch.isOutOfSync());
try {
ch.sendEdits(1L, 2L, 1, FAKE_DATA).get();
fail("sendEdits() should throw until next roll");
} catch (ExecutionException ee) {
GenericTestUtils.assertExceptionContains("disabled until next roll", ee.getCause());
}
// It should have failed without even sending the edits, since it was not sync.
Mockito.verify(mockProxy, Mockito.never()).journal(Mockito.<RequestInfo>any(), Mockito.eq(1L), Mockito.eq(2L), Mockito.eq(1), Mockito.same(FAKE_DATA));
// It should have sent a heartbeat instead.
Mockito.verify(mockProxy).heartbeat(Mockito.<RequestInfo>any());
// After a roll, sending new edits should not fail.
ch.startLogSegment(3L, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
assertFalse(ch.isOutOfSync());
ch.sendEdits(3L, 3L, 1, FAKE_DATA).get();
}
use of java.util.concurrent.ExecutionException in project hadoop by apache.
the class TestPread method testTruncateWhileReading.
@Test
public void testTruncateWhileReading() throws Exception {
Path path = new Path("/testfile");
final int blockSize = 512;
// prevent initial pre-fetch of multiple block locations
Configuration conf = new Configuration();
conf.setLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY, blockSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
DistributedFileSystem fs = cluster.getFileSystem();
// create multi-block file
FSDataOutputStream dos = fs.create(path, true, blockSize, (short) 1, blockSize);
dos.write(new byte[blockSize * 3]);
dos.close();
// truncate a file while it's open
final FSDataInputStream dis = fs.open(path);
while (!fs.truncate(path, 10)) {
Thread.sleep(10);
}
// verify that reading bytes outside the initial pre-fetch do
// not send the client into an infinite loop querying locations.
ExecutorService executor = Executors.newFixedThreadPool(1);
Future<?> future = executor.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
// read from 2nd block.
dis.readFully(blockSize, new byte[4]);
return null;
}
});
try {
future.get(4, TimeUnit.SECONDS);
Assert.fail();
} catch (ExecutionException ee) {
assertTrue(ee.toString(), ee.getCause() instanceof EOFException);
} finally {
future.cancel(true);
executor.shutdown();
}
} finally {
cluster.shutdown();
}
}
use of java.util.concurrent.ExecutionException in project hadoop by apache.
the class TestEncryptionZones method testStartFileRetry.
/**
* Tests the retry logic in startFile. We release the lock while generating
* an EDEK, so tricky things can happen in the intervening time.
*/
@Test
public void testStartFileRetry() throws Exception {
final Path zone1 = new Path("/zone1");
final Path file = new Path(zone1, "file1");
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
ExecutorService executor = Executors.newSingleThreadExecutor();
// Test when the parent directory becomes an EZ. With no initial EZ,
// the fsn lock must not be yielded.
executor.submit(new InjectFaultTask() {
@Override
public void doCleanup() throws Exception {
assertEquals("Expected no startFile key generation", -1, injector.generateCount);
fsWrapper.delete(file, false);
}
}).get();
// Test when the parent directory unbecomes an EZ. The generation of
// the EDEK will yield the lock, then re-resolve the path and use the
// previous EDEK.
dfsAdmin.createEncryptionZone(zone1, TEST_KEY, NO_TRASH);
executor.submit(new InjectFaultTask() {
@Override
public void doFault() throws Exception {
fsWrapper.delete(zone1, true);
}
@Override
public void doCleanup() throws Exception {
assertEquals("Expected no startFile retries", 1, injector.generateCount);
fsWrapper.delete(file, false);
}
}).get();
// Test when the parent directory becomes a different EZ. The generation
// of the EDEK will yield the lock, re-resolve will detect the EZ has
// changed, and client will be asked to retry a 2nd time
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
final String otherKey = "other_key";
DFSTestUtil.createKey(otherKey, cluster, conf);
dfsAdmin.createEncryptionZone(zone1, TEST_KEY, NO_TRASH);
executor.submit(new InjectFaultTask() {
@Override
public void doFault() throws Exception {
fsWrapper.delete(zone1, true);
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone1, otherKey, NO_TRASH);
}
@Override
public void doCleanup() throws Exception {
assertEquals("Expected a startFile retry", 2, injector.generateCount);
fsWrapper.delete(zone1, true);
}
}).get();
// Test that the retry limit leads to an error
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
final String anotherKey = "another_key";
DFSTestUtil.createKey(anotherKey, cluster, conf);
dfsAdmin.createEncryptionZone(zone1, anotherKey, NO_TRASH);
String keyToUse = otherKey;
MyInjector injector = new MyInjector();
EncryptionFaultInjector.instance = injector;
Future<?> future = executor.submit(new CreateFileTask(fsWrapper, file));
// Flip-flop between two EZs to repeatedly fail
for (int i = 0; i < DFSOutputStream.CREATE_RETRY_COUNT + 1; i++) {
injector.ready.await();
fsWrapper.delete(zone1, true);
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone1, keyToUse, NO_TRASH);
if (keyToUse == otherKey) {
keyToUse = anotherKey;
} else {
keyToUse = otherKey;
}
injector.wait.countDown();
injector = new MyInjector();
EncryptionFaultInjector.instance = injector;
}
try {
future.get();
fail("Expected exception from too many retries");
} catch (ExecutionException e) {
assertExceptionContains("Too many retries because of encryption zone operations", e.getCause());
}
}
use of java.util.concurrent.ExecutionException in project hadoop by apache.
the class TestFSDownload method testDownloadBadPublic.
@Test(timeout = 10000)
public void testDownloadBadPublic() throws IOException, URISyntaxException, InterruptedException {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Map<LocalResource, LocalResourceVisibility> rsrcVis = new HashMap<LocalResource, LocalResourceVisibility>();
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = HadoopExecutors.newSingleThreadExecutor();
LocalDirAllocator dirs = new LocalDirAllocator(TestFSDownload.class.getName());
int size = 512;
LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC;
Path path = new Path(basedir, "test-file");
LocalResource rsrc = createFile(files, path, size, rand, vis);
rsrcVis.put(rsrc, vis);
Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
destPath = new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd = new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS)) ;
Assert.assertTrue(pending.get(rsrc).isDone());
try {
for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) {
p.getValue().get();
Assert.fail("We localized a file that is not public.");
}
} catch (ExecutionException e) {
Assert.assertTrue(e.getCause() instanceof IOException);
}
}
Aggregations