Search in sources :

Example 31 with AtomicReference

use of java.util.concurrent.atomic.AtomicReference in project hbase by apache.

the class TestProcedureNonce method testConcurrentNonceRegistration.

private void testConcurrentNonceRegistration(final boolean submitProcedure, final long nonceGroup, final long nonce) throws IOException {
    // register the nonce
    final NonceKey nonceKey = procExecutor.createNonceKey(nonceGroup, nonce);
    final AtomicReference<Throwable> t1Exception = new AtomicReference();
    final AtomicReference<Throwable> t2Exception = new AtomicReference();
    final CountDownLatch t1NonceRegisteredLatch = new CountDownLatch(1);
    final CountDownLatch t2BeforeNonceRegisteredLatch = new CountDownLatch(1);
    final Thread[] threads = new Thread[2];
    threads[0] = new Thread() {

        @Override
        public void run() {
            try {
                // release the nonce and wake t2
                assertFalse("unexpected already registered nonce", procExecutor.registerNonce(nonceKey) >= 0);
                t1NonceRegisteredLatch.countDown();
                // hold the submission until t2 is registering the nonce
                t2BeforeNonceRegisteredLatch.await();
                Threads.sleep(1000);
                if (submitProcedure) {
                    CountDownLatch latch = new CountDownLatch(1);
                    TestSingleStepProcedure proc = new TestSingleStepProcedure();
                    procEnv.setWaitLatch(latch);
                    procExecutor.submitProcedure(proc, nonceKey);
                    Threads.sleep(100);
                    // complete the procedure
                    latch.countDown();
                } else {
                    procExecutor.unregisterNonceIfProcedureWasNotSubmitted(nonceKey);
                }
            } catch (Throwable e) {
                t1Exception.set(e);
            } finally {
                t1NonceRegisteredLatch.countDown();
                t2BeforeNonceRegisteredLatch.countDown();
            }
        }
    };
    threads[1] = new Thread() {

        @Override
        public void run() {
            try {
                // wait until t1 has registered the nonce
                t1NonceRegisteredLatch.await();
                // register the nonce
                t2BeforeNonceRegisteredLatch.countDown();
                assertFalse("unexpected non registered nonce", procExecutor.registerNonce(nonceKey) < 0);
            } catch (Throwable e) {
                t2Exception.set(e);
            } finally {
                t1NonceRegisteredLatch.countDown();
                t2BeforeNonceRegisteredLatch.countDown();
            }
        }
    };
    for (int i = 0; i < threads.length; ++i) threads[i].start();
    for (int i = 0; i < threads.length; ++i) Threads.shutdown(threads[i]);
    ProcedureTestingUtility.waitNoProcedureRunning(procExecutor);
    assertEquals(null, t1Exception.get());
    assertEquals(null, t2Exception.get());
}
Also used : NonceKey(org.apache.hadoop.hbase.util.NonceKey) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch)

Example 32 with AtomicReference

use of java.util.concurrent.atomic.AtomicReference in project hive by apache.

the class Rpc method createClient.

/**
   * Creates an RPC client for a server running on the given remote host and port.
   *
   * @param config RPC configuration data.
   * @param eloop Event loop for managing the connection.
   * @param host Host name or IP address to connect to.
   * @param port Port where server is listening.
   * @param clientId The client ID that identifies the connection.
   * @param secret Secret for authenticating the client with the server.
   * @param dispatcher Dispatcher used to handle RPC calls.
   * @return A future that can be used to monitor the creation of the RPC object.
   */
public static Promise<Rpc> createClient(Map<String, String> config, final NioEventLoopGroup eloop, String host, int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
    final RpcConfiguration rpcConf = new RpcConfiguration(config);
    int connectTimeoutMs = (int) rpcConf.getConnectTimeoutMs();
    final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
    }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);
    final Promise<Rpc> promise = eloop.next().newPromise();
    final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();
    // Set up a timeout to undo everything.
    final Runnable timeoutTask = new Runnable() {

        @Override
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
        }
    };
    final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, connectTimeoutMs, TimeUnit.MILLISECONDS);
    // The channel listener instantiates the Rpc instance when the connection is established,
    // and initiates the SASL handshake.
    cf.addListener(new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture cf) throws Exception {
            if (cf.isSuccess()) {
                SaslClientHandler saslHandler = new SaslClientHandler(rpcConf, clientId, promise, timeoutFuture, secret, dispatcher);
                Rpc rpc = createRpc(rpcConf, saslHandler, (SocketChannel) cf.channel(), eloop);
                saslHandler.rpc = rpc;
                saslHandler.sendHello(cf.channel());
            } else {
                promise.setFailure(cf.cause());
            }
        }
    });
    // Handle cancellation of the promise.
    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {

        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (p.isCancelled()) {
                cf.cancel(true);
            }
        }
    });
    return promise;
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) SocketChannel(io.netty.channel.socket.SocketChannel) AtomicReference(java.util.concurrent.atomic.AtomicReference) ChannelFutureListener(io.netty.channel.ChannelFutureListener) TimeoutException(java.util.concurrent.TimeoutException) SaslException(javax.security.sasl.SaslException) IOException(java.io.IOException) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) Promise(io.netty.util.concurrent.Promise) Bootstrap(io.netty.bootstrap.Bootstrap) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) TimeoutException(java.util.concurrent.TimeoutException)

Example 33 with AtomicReference

use of java.util.concurrent.atomic.AtomicReference in project kafka by apache.

the class KafkaConsumerTest method consumerCloseTest.

private void consumerCloseTest(final long closeTimeoutMs, List<? extends AbstractResponse> responses, long waitMs, boolean interrupt) throws Exception {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 5000;
    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);
    Metadata metadata = new Metadata(0, Long.MAX_VALUE);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();
    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, false, 1000);
    consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
    Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);
    client.prepareMetadataUpdate(cluster, Collections.<String>emptySet());
    // Poll with responses
    client.prepareResponseFrom(fetchResponse(tp0, 0, 1), node);
    client.prepareResponseFrom(fetchResponse(tp0, 1, 0), node);
    consumer.poll(0);
    // Initiate close() after a commit request on another thread.
    // Kafka consumer is single-threaded, but the implementation allows calls on a
    // different thread as long as the calls are not executed concurrently. So this is safe.
    ExecutorService executor = Executors.newSingleThreadExecutor();
    final AtomicReference<Exception> closeException = new AtomicReference<Exception>();
    try {
        Future<?> future = executor.submit(new Runnable() {

            @Override
            public void run() {
                consumer.commitAsync();
                try {
                    consumer.close(closeTimeoutMs, TimeUnit.MILLISECONDS);
                } catch (Exception e) {
                    closeException.set(e);
                }
            }
        });
        // if close timeout is not zero.
        try {
            future.get(100, TimeUnit.MILLISECONDS);
            if (closeTimeoutMs != 0)
                fail("Close completed without waiting for commit or leave response");
        } catch (TimeoutException e) {
        // Expected exception
        }
        // Ensure close has started and queued at least one more request after commitAsync
        client.waitForRequests(2, 1000);
        // In non-graceful mode, close() times out without an exception even though commit response is pending
        for (int i = 0; i < responses.size(); i++) {
            client.waitForRequests(1, 1000);
            client.respondFrom(responses.get(i), coordinator);
            if (i != responses.size() - 1) {
                try {
                    future.get(100, TimeUnit.MILLISECONDS);
                    fail("Close completed without waiting for response");
                } catch (TimeoutException e) {
                // Expected exception
                }
            }
        }
        if (waitMs > 0)
            time.sleep(waitMs);
        if (interrupt)
            assertTrue("Close terminated prematurely", future.cancel(true));
        // Make sure that close task completes and another task can be run on the single threaded executor
        executor.submit(new Runnable() {

            @Override
            public void run() {
            }
        }).get(500, TimeUnit.MILLISECONDS);
        if (!interrupt) {
            // Should succeed without TimeoutException or ExecutionException
            future.get(500, TimeUnit.MILLISECONDS);
            assertNull("Unexpected exception during close", closeException.get());
        } else
            assertTrue("Expected exception not thrown " + closeException, closeException.get() instanceof InterruptException);
    } finally {
        executor.shutdownNow();
    }
}
Also used : Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) InterruptException(org.apache.kafka.common.errors.InterruptException) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(java.util.concurrent.TimeoutException) WakeupException(org.apache.kafka.common.errors.WakeupException) InterruptException(org.apache.kafka.common.errors.InterruptException) ExpectedException(org.junit.rules.ExpectedException) ExecutorService(java.util.concurrent.ExecutorService) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) TimeoutException(java.util.concurrent.TimeoutException)

Example 34 with AtomicReference

use of java.util.concurrent.atomic.AtomicReference in project kafka by apache.

the class WorkerGroupMember method stop.

private void stop(boolean swallowException) {
    log.trace("Stopping the Connect group member.");
    AtomicReference<Throwable> firstException = new AtomicReference<Throwable>();
    this.stopped = true;
    ClientUtils.closeQuietly(coordinator, "coordinator", firstException);
    ClientUtils.closeQuietly(metrics, "consumer metrics", firstException);
    ClientUtils.closeQuietly(client, "consumer network client", firstException);
    AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId);
    if (firstException.get() != null && !swallowException)
        throw new KafkaException("Failed to stop the Connect group member", firstException.get());
    else
        log.debug("The Connect group member has stopped.");
}
Also used : AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaException(org.apache.kafka.common.KafkaException)

Example 35 with AtomicReference

use of java.util.concurrent.atomic.AtomicReference in project kafka by apache.

the class RecordAccumulatorTest method testAppendInExpiryCallback.

@Test
public void testAppendInExpiryCallback() throws InterruptedException {
    long retryBackoffMs = 100L;
    long lingerMs = 3000L;
    int requestTimeout = 60;
    int messagesPerBatch = 1024 / msgSize;
    final RecordAccumulator accum = new RecordAccumulator(1024, 10 * 1024, CompressionType.NONE, lingerMs, retryBackoffMs, metrics, time);
    final AtomicInteger expiryCallbackCount = new AtomicInteger();
    final AtomicReference<Exception> unexpectedException = new AtomicReference<Exception>();
    Callback callback = new Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            if (exception instanceof TimeoutException) {
                expiryCallbackCount.incrementAndGet();
                try {
                    accum.append(tp1, 0L, key, value, null, maxBlockTimeMs);
                } catch (InterruptedException e) {
                    throw new RuntimeException("Unexpected interruption", e);
                }
            } else if (exception != null)
                unexpectedException.compareAndSet(null, exception);
        }
    };
    for (int i = 0; i < messagesPerBatch + 1; i++) accum.append(tp1, 0L, key, value, callback, maxBlockTimeMs);
    assertEquals(2, accum.batches().get(tp1).size());
    assertTrue("First batch not full", accum.batches().get(tp1).peekFirst().isFull());
    // Advance the clock to expire the first batch.
    time.sleep(requestTimeout + 1);
    List<ProducerBatch> expiredBatches = accum.abortExpiredBatches(requestTimeout, time.milliseconds());
    assertEquals("The batch was not expired", 1, expiredBatches.size());
    assertEquals("Callbacks not invoked for expiry", messagesPerBatch, expiryCallbackCount.get());
    assertNull("Unexpected exception", unexpectedException.get());
    assertEquals("Some messages not appended from expiry callbacks", 2, accum.batches().get(tp1).size());
    assertTrue("First batch not full after expiry callbacks with appends", accum.batches().get(tp1).peekFirst().isFull());
}
Also used : AtomicReference(java.util.concurrent.atomic.AtomicReference) TimeoutException(org.apache.kafka.common.errors.TimeoutException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Aggregations

AtomicReference (java.util.concurrent.atomic.AtomicReference)1331 Test (org.junit.Test)668 CountDownLatch (java.util.concurrent.CountDownLatch)437 IOException (java.io.IOException)263 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)205 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)159 ArrayList (java.util.ArrayList)108 HashMap (java.util.HashMap)105 List (java.util.List)95 Map (java.util.Map)77 Test (org.testng.annotations.Test)76 File (java.io.File)64 ExecutionException (java.util.concurrent.ExecutionException)60 HashSet (java.util.HashSet)54 URI (java.net.URI)48 TimeoutException (java.util.concurrent.TimeoutException)48 HttpServletRequest (javax.servlet.http.HttpServletRequest)48 HttpServletResponse (javax.servlet.http.HttpServletResponse)46 MockResponse (okhttp3.mockwebserver.MockResponse)46 ByteBuffer (java.nio.ByteBuffer)44