Search in sources :

Example 76 with TimeoutException

use of java.util.concurrent.TimeoutException in project hadoop by apache.

the class TestBlockReaderLocal method testStatistics.

private void testStatistics(boolean isShortCircuit) throws Exception {
    Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
    HdfsConfiguration conf = new HdfsConfiguration();
    TemporarySocketDirectory sockDir = null;
    if (isShortCircuit) {
        DFSInputStream.tcpReadsDisabledForTesting = true;
        sockDir = new TemporarySocketDirectory();
        conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), "TestStatisticsForLocalRead.%d.sock").getAbsolutePath());
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
        DomainSocket.disableBindPathValidation();
    } else {
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
    }
    MiniDFSCluster cluster = null;
    final Path TEST_PATH = new Path("/a");
    final long RANDOM_SEED = 4567L;
    FSDataInputStream fsIn = null;
    byte[] original = new byte[BlockReaderLocalTest.TEST_LENGTH];
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).hosts(new String[] { NetUtils.getLocalHostname() }).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, BlockReaderLocalTest.TEST_LENGTH, (short) 1, RANDOM_SEED);
        try {
            DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        } catch (InterruptedException e) {
            Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
        } catch (TimeoutException e) {
            Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
        }
        fsIn = fs.open(TEST_PATH);
        IOUtils.readFully(fsIn, original, 0, BlockReaderLocalTest.TEST_LENGTH);
        HdfsDataInputStream dfsIn = (HdfsDataInputStream) fsIn;
        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalBytesRead());
        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalLocalBytesRead());
        if (isShortCircuit) {
            Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
        } else {
            Assert.assertEquals(0, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
        }
        fsIn.close();
        fsIn = null;
    } finally {
        DFSInputStream.tcpReadsDisabledForTesting = false;
        if (fsIn != null)
            fsIn.close();
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
        if (sockDir != null)
            sockDir.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream) TimeoutException(java.util.concurrent.TimeoutException)

Example 77 with TimeoutException

use of java.util.concurrent.TimeoutException in project hadoop by apache.

the class TestFsVolumeList method testGetNextVolumeWithClosedVolume.

@Test(timeout = 30000)
public void testGetNextVolumeWithClosedVolume() throws IOException {
    FsVolumeList volumeList = new FsVolumeList(Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
    final List<FsVolumeImpl> volumes = new ArrayList<>();
    for (int i = 0; i < 3; i++) {
        File curDir = new File(baseDir, "nextvolume-" + i);
        curDir.mkdirs();
        FsVolumeImpl volume = new FsVolumeImplBuilder().setConf(conf).setDataset(dataset).setStorageID("storage-id").setStorageDirectory(new StorageDirectory(StorageLocation.parse(curDir.getPath()))).build();
        volume.setCapacityForTesting(1024 * 1024 * 1024);
        volumes.add(volume);
        volumeList.addVolume(volume.obtainReference());
    }
    // Close the second volume.
    volumes.get(1).setClosed();
    try {
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                return volumes.get(1).checkClosed();
            }
        }, 100, 3000);
    } catch (TimeoutException e) {
        fail("timed out while waiting for volume to be removed.");
    } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
    }
    for (int i = 0; i < 10; i++) {
        try (FsVolumeReference ref = volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
            // volume No.2 will not be chosen.
            assertNotEquals(ref.getVolume(), volumes.get(1));
        }
    }
}
Also used : ArrayList(java.util.ArrayList) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) File(java.io.File) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 78 with TimeoutException

use of java.util.concurrent.TimeoutException in project hadoop by apache.

the class TestEditLogTailer method testTriggersLogRollsForAllStandbyNN.

/*
    1. when all NN become standby nn, standby NN execute to roll log,
    it will be failed.
    2. when one NN become active, standby NN roll log success.
   */
@Test
public void testTriggersLogRollsForAllStandbyNN() throws Exception {
    Configuration conf = getConf();
    // Roll every 1s
    conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100);
    // Have to specify IPC ports so the NNs can talk to each other.
    MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(ServerSocketUtil.getPort(0, 100))).addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ServerSocketUtil.getPort(0, 100))).addNN(new MiniDFSNNTopology.NNConf("nn3").setIpcPort(ServerSocketUtil.getPort(0, 100))));
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
    try {
        cluster.transitionToStandby(0);
        cluster.transitionToStandby(1);
        cluster.transitionToStandby(2);
        try {
            waitForLogRollInSharedDir(cluster, 3);
            fail("After all NN become Standby state, Standby NN should roll log, " + "but it will be failed");
        } catch (TimeoutException ignore) {
        }
        cluster.transitionToActive(0);
        waitForLogRollInSharedDir(cluster, 3);
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MiniDFSNNTopology(org.apache.hadoop.hdfs.MiniDFSNNTopology) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 79 with TimeoutException

use of java.util.concurrent.TimeoutException in project hadoop by apache.

the class TestIPC method checkBlocking.

// goal is to jam a handler with a connection, fill the callq with
// connections, in turn jamming the readers - then flood the server and
// ensure that the listener blocks when the reader connection queues fill
@SuppressWarnings("unchecked")
private void checkBlocking(int readers, int readerQ, int callQ) throws Exception {
    // makes it easier
    int handlers = 1;
    final Configuration conf = new Configuration();
    conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_KEY, readerQ);
    // send in enough clients to block up the handlers, callq, and readers
    final int initialClients = readers + callQ + handlers;
    // max connections we should ever end up accepting at once
    // 1 = listener
    final int maxAccept = initialClients + readers * readerQ + 1;
    // stress it with 2X the max
    int clients = maxAccept * 2;
    final AtomicInteger failures = new AtomicInteger(0);
    final CountDownLatch callFinishedLatch = new CountDownLatch(clients);
    // start server
    final TestServerQueue server = new TestServerQueue(clients, readers, callQ, handlers, conf);
    CallQueueManager<Call> spy = spy((CallQueueManager<Call>) Whitebox.getInternalState(server, "callQueue"));
    Whitebox.setInternalState(server, "callQueue", spy);
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    Client.setConnectTimeout(conf, 10000);
    // instantiate the threads, will start in batches
    Thread[] threads = new Thread[clients];
    for (int i = 0; i < clients; i++) {
        threads[i] = new Thread(new Runnable() {

            @Override
            public void run() {
                Client client = new Client(LongWritable.class, conf);
                try {
                    call(client, new LongWritable(Thread.currentThread().getId()), addr, 60000, conf);
                } catch (Throwable e) {
                    LOG.error(e);
                    failures.incrementAndGet();
                    return;
                } finally {
                    callFinishedLatch.countDown();
                    client.stop();
                }
            }
        });
    }
    // and others not blocking in the race to fill the callq
    for (int i = 0; i < initialClients; i++) {
        threads[i].start();
        if (i == 0) {
            // let first reader block in a call
            server.firstCallLatch.await();
        }
        // wait until reader put a call to callQueue, to make sure all readers
        // are blocking on the queue after initialClients threads are started.
        verify(spy, timeout(100).times(i + 1)).put(Mockito.<Call>anyObject());
    }
    try {
        // wait till everything is slotted, should happen immediately
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                return server.getNumOpenConnections() >= initialClients;
            }
        }, 100, 3000);
    } catch (TimeoutException e) {
        fail("timed out while waiting for connections to open.");
    }
    LOG.info("(initial clients) need:" + initialClients + " connections have:" + server.getNumOpenConnections());
    LOG.info("ipc layer should be blocked");
    assertEquals(callQ, server.getCallQueueLen());
    assertEquals(initialClients, server.getNumOpenConnections());
    // connection queues should fill and then the listener should block
    for (int i = initialClients; i < clients; i++) {
        threads[i].start();
    }
    Thread.sleep(10);
    try {
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                return server.getNumOpenConnections() >= maxAccept;
            }
        }, 100, 3000);
    } catch (TimeoutException e) {
        fail("timed out while waiting for connections to open until maxAccept.");
    }
    LOG.info("(max clients) need:" + maxAccept + " connections have:" + server.getNumOpenConnections());
    // check a few times to make sure we didn't go over
    for (int i = 0; i < 4; i++) {
        assertEquals(maxAccept, server.getNumOpenConnections());
        Thread.sleep(100);
    }
    // sanity check that no calls have finished
    assertEquals(clients, callFinishedLatch.getCount());
    LOG.info("releasing the calls");
    server.callBlockLatch.countDown();
    callFinishedLatch.await();
    for (Thread t : threads) {
        t.join();
    }
    assertEquals(0, failures.get());
    server.stop();
}
Also used : Call(org.apache.hadoop.ipc.Server.Call) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LongWritable(org.apache.hadoop.io.LongWritable) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TimeoutException(java.util.concurrent.TimeoutException) ConnectTimeoutException(org.apache.hadoop.net.ConnectTimeoutException) SocketTimeoutException(java.net.SocketTimeoutException)

Example 80 with TimeoutException

use of java.util.concurrent.TimeoutException in project hadoop by apache.

the class TestSaslRPC method testSaslResponseOrdering.

// ensure that for all qop settings, client can handle postponed rpc
// responses.  basically ensures that the rpc server isn't encrypting
// and queueing the responses out of order.
@Test(timeout = 10000)
public void testSaslResponseOrdering() throws Exception {
    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.TOKEN, conf);
    UserGroupInformation.setConfiguration(conf);
    TestTokenSecretManager sm = new TestTokenSecretManager();
    Server server = setupTestServer(conf, 1, sm);
    try {
        final InetSocketAddress addr = NetUtils.getConnectAddress(server);
        final UserGroupInformation clientUgi = UserGroupInformation.createRemoteUser("client");
        clientUgi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
        TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(clientUgi.getUserName()));
        Token<?> token = new Token<>(tokenId, sm);
        SecurityUtil.setTokenService(token, addr);
        clientUgi.addToken(token);
        clientUgi.doAs(new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                final TestRpcService proxy = getClient(addr, conf);
                final ExecutorService executor = Executors.newCachedThreadPool();
                final AtomicInteger count = new AtomicInteger();
                try {
                    // queue up a bunch of futures for postponed calls serviced
                    // in a random order.
                    Future<?>[] futures = new Future<?>[10];
                    for (int i = 0; i < futures.length; i++) {
                        futures[i] = executor.submit(new Callable<Void>() {

                            @Override
                            public Void call() throws Exception {
                                String expect = "future" + count.getAndIncrement();
                                String answer = convert(proxy.echoPostponed(null, newEchoRequest(expect)));
                                assertEquals(expect, answer);
                                return null;
                            }
                        });
                        try {
                            // ensures the call is initiated and the response is blocked.
                            futures[i].get(100, TimeUnit.MILLISECONDS);
                        } catch (TimeoutException te) {
                            // expected.
                            continue;
                        }
                        Assert.fail("future" + i + " did not block");
                    }
                    // triggers responses to be unblocked in a random order.  having
                    // only 1 handler ensures that the prior calls are already
                    // postponed.  1 handler also ensures that this call will
                    // timeout if the postponing doesn't work (ie. free up handler)
                    proxy.sendPostponed(null, newEmptyRequest());
                    for (int i = 0; i < futures.length; i++) {
                        LOG.info("waiting for future" + i);
                        futures[i].get();
                    }
                } finally {
                    RPC.stopProxy(proxy);
                    executor.shutdownNow();
                }
                return null;
            }
        });
    } finally {
        server.stop();
    }
}
Also used : SaslServer(javax.security.sasl.SaslServer) InetSocketAddress(java.net.InetSocketAddress) Text(org.apache.hadoop.io.Text) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) ServiceException(com.google.protobuf.ServiceException) UnsupportedCallbackException(javax.security.auth.callback.UnsupportedCallbackException) TimeoutException(java.util.concurrent.TimeoutException) SaslException(javax.security.sasl.SaslException) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Aggregations

TimeoutException (java.util.concurrent.TimeoutException)788 ExecutionException (java.util.concurrent.ExecutionException)249 IOException (java.io.IOException)184 Test (org.junit.Test)149 ArrayList (java.util.ArrayList)75 CountDownLatch (java.util.concurrent.CountDownLatch)73 ExecutorService (java.util.concurrent.ExecutorService)71 Future (java.util.concurrent.Future)54 CancellationException (java.util.concurrent.CancellationException)44 Test (org.testng.annotations.Test)44 List (java.util.List)39 HashMap (java.util.HashMap)38 Map (java.util.Map)38 File (java.io.File)36 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)36 TimeUnit (java.util.concurrent.TimeUnit)34 AtomicReference (java.util.concurrent.atomic.AtomicReference)26 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)22 URI (java.net.URI)21 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)21