Search in sources :

Example 56 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hbase by apache.

the class TestAcidGuarantees method createThreadPool.

private ExecutorService createThreadPool() {
    int maxThreads = 256;
    int coreThreads = 128;
    long keepAliveTime = 60;
    BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(maxThreads * HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS);
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory(toString() + "-shared"));
    tpe.allowCoreThreadTimeOut(true);
    return tpe;
}
Also used : ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 57 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hbase by apache.

the class MultiHConnection method createBatchPool.

// Copied from ConnectionImplementation.getBatchPool()
// We should get rid of this when Connection.processBatchCallback is un-deprecated and provides
// an API to manage a batch pool
private void createBatchPool(Configuration conf) {
    // Use the same config for keep alive as in ConnectionImplementation.getBatchPool();
    int maxThreads = conf.getInt("hbase.multihconnection.threads.max", 256);
    if (maxThreads == 0) {
        maxThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60);
    LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(maxThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory("MultiHConnection" + "-shared-"));
    tpe.allowCoreThreadTimeOut(true);
    this.batchPool = tpe;
}
Also used : ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 58 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project flink by apache.

the class MutableHashTable method prepareNextPartition.

protected boolean prepareNextPartition() throws IOException {
    // finalize and cleanup the partitions of the current table
    int buffersAvailable = 0;
    for (int i = 0; i < this.partitionsBeingBuilt.size(); i++) {
        final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(i);
        p.setFurtherPatitioning(this.furtherPartitioning);
        buffersAvailable += p.finalizeProbePhase(this.availableMemory, this.partitionsPending, this.buildSideOuterJoin);
    }
    this.partitionsBeingBuilt.clear();
    this.writeBehindBuffersAvailable += buffersAvailable;
    releaseTable();
    if (this.currentSpilledBuildSide != null) {
        this.currentSpilledBuildSide.closeAndDelete();
        this.currentSpilledBuildSide = null;
    }
    if (this.currentSpilledProbeSide != null) {
        this.currentSpilledProbeSide.closeAndDelete();
        this.currentSpilledProbeSide = null;
    }
    if (this.partitionsPending.isEmpty()) {
        // no more data
        return false;
    }
    // there are pending partitions
    final HashPartition<BT, PT> p = this.partitionsPending.get(0);
    if (p.probeSideRecordCounter == 0) {
        // unprobed spilled partitions are only re-processed for a build-side outer join;
        // there is no need to create a hash table since there are no probe-side records
        List<MemorySegment> memory = new ArrayList<MemorySegment>();
        MemorySegment seg1 = getNextBuffer();
        if (seg1 != null) {
            memory.add(seg1);
            MemorySegment seg2 = getNextBuffer();
            if (seg2 != null) {
                memory.add(seg2);
            }
        } else {
            throw new IllegalStateException("Attempting to begin reading spilled partition without any memory available");
        }
        this.currentSpilledBuildSide = this.ioManager.createBlockChannelReader(p.getBuildSideChannel().getChannelID());
        final ChannelReaderInputView inView = new HeaderlessChannelReaderInputView(currentSpilledBuildSide, memory, p.getBuildSideBlockCount(), p.getLastSegmentLimit(), false);
        final ChannelReaderInputViewIterator<BT> inIter = new ChannelReaderInputViewIterator<BT>(inView, this.availableMemory, this.buildSideSerializer);
        this.unmatchedBuildIterator = inIter;
        this.partitionsPending.remove(0);
        return true;
    }
    this.probeMatchedPhase = true;
    this.unmatchedBuildVisited = false;
    // build the next table; memory must be allocated after this call
    buildTableFromSpilledPartition(p);
    // set the probe side - gather memory segments for reading
    LinkedBlockingQueue<MemorySegment> returnQueue = new LinkedBlockingQueue<MemorySegment>();
    this.currentSpilledProbeSide = this.ioManager.createBlockChannelReader(p.getProbeSideChannel().getChannelID(), returnQueue);
    List<MemorySegment> memory = new ArrayList<MemorySegment>();
    MemorySegment seg1 = getNextBuffer();
    if (seg1 != null) {
        memory.add(seg1);
        MemorySegment seg2 = getNextBuffer();
        if (seg2 != null) {
            memory.add(seg2);
        }
    } else {
        throw new IllegalStateException("Attempting to begin probing of partition without any memory available");
    }
    ChannelReaderInputViewIterator<PT> probeReader = new ChannelReaderInputViewIterator<PT>(this.currentSpilledProbeSide, returnQueue, memory, this.availableMemory, this.probeSideSerializer, p.getProbeSideBlockCount());
    this.probeIterator.set(probeReader);
    // unregister the pending partition
    this.partitionsPending.remove(0);
    this.currentRecursionDepth = p.getRecursionLevel() + 1;
    // recursively get the next
    return nextRecord();
}
Also used : ArrayList(java.util.ArrayList) ChannelReaderInputViewIterator(org.apache.flink.runtime.io.disk.ChannelReaderInputViewIterator) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) MemorySegment(org.apache.flink.core.memory.MemorySegment) HeaderlessChannelReaderInputView(org.apache.flink.runtime.io.disk.iomanager.HeaderlessChannelReaderInputView) ChannelReaderInputView(org.apache.flink.runtime.io.disk.iomanager.ChannelReaderInputView) HeaderlessChannelReaderInputView(org.apache.flink.runtime.io.disk.iomanager.HeaderlessChannelReaderInputView)

Example 59 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project flink by apache.

the class KvStateServerTest method testSimpleRequest.

/**
	 * Tests a simple successful query via a SocketChannel.
	 */
@Test
public void testSimpleRequest() throws Exception {
    KvStateServer server = null;
    Bootstrap bootstrap = null;
    try {
        KvStateRegistry registry = new KvStateRegistry();
        KvStateRequestStats stats = new AtomicKvStateRequestStats();
        server = new KvStateServer(InetAddress.getLocalHost(), 0, 1, 1, registry, stats);
        server.start();
        KvStateServerAddress serverAddress = server.getAddress();
        int numKeyGroups = 1;
        AbstractStateBackend abstractBackend = new MemoryStateBackend();
        DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
        dummyEnv.setKvStateRegistry(registry);
        AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(dummyEnv, new JobID(), "test_op", IntSerializer.INSTANCE, numKeyGroups, new KeyGroupRange(0, 0), registry.createTaskRegistry(new JobID(), new JobVertexID()));
        final KvStateServerHandlerTest.TestRegistryListener registryListener = new KvStateServerHandlerTest.TestRegistryListener();
        registry.registerListener(registryListener);
        ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
        desc.setQueryable("vanilla");
        ValueState<Integer> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
        // Update KvState
        int expectedValue = 712828289;
        int key = 99812822;
        backend.setCurrentKey(key);
        state.update(expectedValue);
        // Request
        byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(key, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE);
        // Connect to the server
        final BlockingQueue<ByteBuf> responses = new LinkedBlockingQueue<>();
        bootstrap = createBootstrap(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4), new ChannelInboundHandlerAdapter() {

            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                responses.add((ByteBuf) msg);
            }
        });
        Channel channel = bootstrap.connect(serverAddress.getHost(), serverAddress.getPort()).sync().channel();
        long requestId = Integer.MAX_VALUE + 182828L;
        assertTrue(registryListener.registrationName.equals("vanilla"));
        ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(channel.alloc(), requestId, registryListener.kvStateId, serializedKeyAndNamespace);
        channel.writeAndFlush(request);
        ByteBuf buf = responses.poll(TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
        assertEquals(KvStateRequestType.REQUEST_RESULT, KvStateRequestSerializer.deserializeHeader(buf));
        KvStateRequestResult response = KvStateRequestSerializer.deserializeKvStateRequestResult(buf);
        assertEquals(requestId, response.getRequestId());
        int actualValue = KvStateRequestSerializer.deserializeValue(response.getSerializedResult(), IntSerializer.INSTANCE);
        assertEquals(expectedValue, actualValue);
    } finally {
        if (server != null) {
            server.shutDown();
        }
        if (bootstrap != null) {
            EventLoopGroup group = bootstrap.group();
            if (group != null) {
                group.shutdownGracefully();
            }
        }
    }
}
Also used : KvStateRegistry(org.apache.flink.runtime.query.KvStateRegistry) KvStateRequestResult(org.apache.flink.runtime.query.netty.message.KvStateRequestResult) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) KvStateServerAddress(org.apache.flink.runtime.query.KvStateServerAddress) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ByteBuf(io.netty.buffer.ByteBuf) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) Bootstrap(io.netty.bootstrap.Bootstrap) LengthFieldBasedFrameDecoder(io.netty.handler.codec.LengthFieldBasedFrameDecoder) AbstractStateBackend(org.apache.flink.runtime.state.AbstractStateBackend) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) SocketChannel(io.netty.channel.socket.SocketChannel) Channel(io.netty.channel.Channel) DummyEnvironment(org.apache.flink.runtime.operators.testutils.DummyEnvironment) EventLoopGroup(io.netty.channel.EventLoopGroup) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) JobID(org.apache.flink.api.common.JobID) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) Test(org.junit.Test)

Example 60 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project flink by apache.

the class AkkaKvStateLocationLookupServiceTest method testLeaderSessionIdChange.

/**
	 * Tests that messages are properly decorated with the leader session ID.
	 */
@Test
public void testLeaderSessionIdChange() throws Exception {
    TestingLeaderRetrievalService leaderRetrievalService = new TestingLeaderRetrievalService();
    Queue<LookupKvStateLocation> received = new LinkedBlockingQueue<>();
    AkkaKvStateLocationLookupService lookupService = new AkkaKvStateLocationLookupService(leaderRetrievalService, testActorSystem, TIMEOUT, new AkkaKvStateLocationLookupService.DisabledLookupRetryStrategyFactory());
    lookupService.start();
    // Create test actors with random leader session IDs
    KvStateLocation expected1 = new KvStateLocation(new JobID(), new JobVertexID(), 8282, "salt");
    UUID leaderSessionId1 = UUID.randomUUID();
    ActorRef testActor1 = LookupResponseActor.create(received, leaderSessionId1, expected1);
    String testActorAddress1 = AkkaUtils.getAkkaURL(testActorSystem, testActor1);
    KvStateLocation expected2 = new KvStateLocation(new JobID(), new JobVertexID(), 22321, "pepper");
    UUID leaderSessionId2 = UUID.randomUUID();
    ActorRef testActor2 = LookupResponseActor.create(received, leaderSessionId1, expected2);
    String testActorAddress2 = AkkaUtils.getAkkaURL(testActorSystem, testActor2);
    JobID jobId = new JobID();
    //
    // Notify about first leader
    //
    leaderRetrievalService.notifyListener(testActorAddress1, leaderSessionId1);
    KvStateLocation location = Await.result(lookupService.getKvStateLookupInfo(jobId, "rock"), TIMEOUT);
    assertEquals(expected1, location);
    assertEquals(1, received.size());
    verifyLookupMsg(received.poll(), jobId, "rock");
    //
    // Notify about second leader
    //
    leaderRetrievalService.notifyListener(testActorAddress2, leaderSessionId2);
    location = Await.result(lookupService.getKvStateLookupInfo(jobId, "roll"), TIMEOUT);
    assertEquals(expected2, location);
    assertEquals(1, received.size());
    verifyLookupMsg(received.poll(), jobId, "roll");
}
Also used : TestingLeaderRetrievalService(org.apache.flink.runtime.leaderelection.TestingLeaderRetrievalService) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) LookupKvStateLocation(org.apache.flink.runtime.query.KvStateMessage.LookupKvStateLocation) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) UUID(java.util.UUID) LookupKvStateLocation(org.apache.flink.runtime.query.KvStateMessage.LookupKvStateLocation) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Aggregations

LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)259 Test (org.junit.Test)91 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)64 IOException (java.io.IOException)26 ArrayList (java.util.ArrayList)23 Emitter (io.socket.emitter.Emitter)19 JSONObject (org.json.JSONObject)19 CountDownLatch (java.util.concurrent.CountDownLatch)18 ThreadFactory (java.util.concurrent.ThreadFactory)16 ExecutorService (java.util.concurrent.ExecutorService)14 BlockingQueue (java.util.concurrent.BlockingQueue)13 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)13 List (java.util.List)12 URI (java.net.URI)11 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)11 Intent (android.content.Intent)9 HashMap (java.util.HashMap)9 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)8 Map (java.util.Map)8 UUID (java.util.UUID)8