use of java.util.concurrent.LinkedBlockingQueue in project hbase by apache.
the class TestAcidGuarantees method createThreadPool.
private ExecutorService createThreadPool() {
int maxThreads = 256;
int coreThreads = 128;
long keepAliveTime = 60;
BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(maxThreads * HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS);
ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory(toString() + "-shared"));
tpe.allowCoreThreadTimeOut(true);
return tpe;
}
use of java.util.concurrent.LinkedBlockingQueue in project hbase by apache.
the class MultiHConnection method createBatchPool.
// Copied from ConnectionImplementation.getBatchPool()
// We should get rid of this when Connection.processBatchCallback is un-deprecated and provides
// an API to manage a batch pool
private void createBatchPool(Configuration conf) {
// Use the same config for keep alive as in ConnectionImplementation.getBatchPool();
int maxThreads = conf.getInt("hbase.multihconnection.threads.max", 256);
if (maxThreads == 0) {
maxThreads = Runtime.getRuntime().availableProcessors() * 8;
}
long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60);
LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
ThreadPoolExecutor tpe = new ThreadPoolExecutor(maxThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory("MultiHConnection" + "-shared-"));
tpe.allowCoreThreadTimeOut(true);
this.batchPool = tpe;
}
use of java.util.concurrent.LinkedBlockingQueue in project flink by apache.
the class MutableHashTable method prepareNextPartition.
protected boolean prepareNextPartition() throws IOException {
// finalize and cleanup the partitions of the current table
int buffersAvailable = 0;
for (int i = 0; i < this.partitionsBeingBuilt.size(); i++) {
final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(i);
p.setFurtherPatitioning(this.furtherPartitioning);
buffersAvailable += p.finalizeProbePhase(this.availableMemory, this.partitionsPending, this.buildSideOuterJoin);
}
this.partitionsBeingBuilt.clear();
this.writeBehindBuffersAvailable += buffersAvailable;
releaseTable();
if (this.currentSpilledBuildSide != null) {
this.currentSpilledBuildSide.closeAndDelete();
this.currentSpilledBuildSide = null;
}
if (this.currentSpilledProbeSide != null) {
this.currentSpilledProbeSide.closeAndDelete();
this.currentSpilledProbeSide = null;
}
if (this.partitionsPending.isEmpty()) {
// no more data
return false;
}
// there are pending partitions
final HashPartition<BT, PT> p = this.partitionsPending.get(0);
if (p.probeSideRecordCounter == 0) {
// unprobed spilled partitions are only re-processed for a build-side outer join;
// there is no need to create a hash table since there are no probe-side records
List<MemorySegment> memory = new ArrayList<MemorySegment>();
MemorySegment seg1 = getNextBuffer();
if (seg1 != null) {
memory.add(seg1);
MemorySegment seg2 = getNextBuffer();
if (seg2 != null) {
memory.add(seg2);
}
} else {
throw new IllegalStateException("Attempting to begin reading spilled partition without any memory available");
}
this.currentSpilledBuildSide = this.ioManager.createBlockChannelReader(p.getBuildSideChannel().getChannelID());
final ChannelReaderInputView inView = new HeaderlessChannelReaderInputView(currentSpilledBuildSide, memory, p.getBuildSideBlockCount(), p.getLastSegmentLimit(), false);
final ChannelReaderInputViewIterator<BT> inIter = new ChannelReaderInputViewIterator<BT>(inView, this.availableMemory, this.buildSideSerializer);
this.unmatchedBuildIterator = inIter;
this.partitionsPending.remove(0);
return true;
}
this.probeMatchedPhase = true;
this.unmatchedBuildVisited = false;
// build the next table; memory must be allocated after this call
buildTableFromSpilledPartition(p);
// set the probe side - gather memory segments for reading
LinkedBlockingQueue<MemorySegment> returnQueue = new LinkedBlockingQueue<MemorySegment>();
this.currentSpilledProbeSide = this.ioManager.createBlockChannelReader(p.getProbeSideChannel().getChannelID(), returnQueue);
List<MemorySegment> memory = new ArrayList<MemorySegment>();
MemorySegment seg1 = getNextBuffer();
if (seg1 != null) {
memory.add(seg1);
MemorySegment seg2 = getNextBuffer();
if (seg2 != null) {
memory.add(seg2);
}
} else {
throw new IllegalStateException("Attempting to begin probing of partition without any memory available");
}
ChannelReaderInputViewIterator<PT> probeReader = new ChannelReaderInputViewIterator<PT>(this.currentSpilledProbeSide, returnQueue, memory, this.availableMemory, this.probeSideSerializer, p.getProbeSideBlockCount());
this.probeIterator.set(probeReader);
// unregister the pending partition
this.partitionsPending.remove(0);
this.currentRecursionDepth = p.getRecursionLevel() + 1;
// recursively get the next
return nextRecord();
}
use of java.util.concurrent.LinkedBlockingQueue in project flink by apache.
the class KvStateServerTest method testSimpleRequest.
/**
* Tests a simple successful query via a SocketChannel.
*/
@Test
public void testSimpleRequest() throws Exception {
KvStateServer server = null;
Bootstrap bootstrap = null;
try {
KvStateRegistry registry = new KvStateRegistry();
KvStateRequestStats stats = new AtomicKvStateRequestStats();
server = new KvStateServer(InetAddress.getLocalHost(), 0, 1, 1, registry, stats);
server.start();
KvStateServerAddress serverAddress = server.getAddress();
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new MemoryStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(dummyEnv, new JobID(), "test_op", IntSerializer.INSTANCE, numKeyGroups, new KeyGroupRange(0, 0), registry.createTaskRegistry(new JobID(), new JobVertexID()));
final KvStateServerHandlerTest.TestRegistryListener registryListener = new KvStateServerHandlerTest.TestRegistryListener();
registry.registerListener(registryListener);
ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
desc.setQueryable("vanilla");
ValueState<Integer> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
// Update KvState
int expectedValue = 712828289;
int key = 99812822;
backend.setCurrentKey(key);
state.update(expectedValue);
// Request
byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(key, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE);
// Connect to the server
final BlockingQueue<ByteBuf> responses = new LinkedBlockingQueue<>();
bootstrap = createBootstrap(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4), new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
responses.add((ByteBuf) msg);
}
});
Channel channel = bootstrap.connect(serverAddress.getHost(), serverAddress.getPort()).sync().channel();
long requestId = Integer.MAX_VALUE + 182828L;
assertTrue(registryListener.registrationName.equals("vanilla"));
ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(channel.alloc(), requestId, registryListener.kvStateId, serializedKeyAndNamespace);
channel.writeAndFlush(request);
ByteBuf buf = responses.poll(TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
assertEquals(KvStateRequestType.REQUEST_RESULT, KvStateRequestSerializer.deserializeHeader(buf));
KvStateRequestResult response = KvStateRequestSerializer.deserializeKvStateRequestResult(buf);
assertEquals(requestId, response.getRequestId());
int actualValue = KvStateRequestSerializer.deserializeValue(response.getSerializedResult(), IntSerializer.INSTANCE);
assertEquals(expectedValue, actualValue);
} finally {
if (server != null) {
server.shutDown();
}
if (bootstrap != null) {
EventLoopGroup group = bootstrap.group();
if (group != null) {
group.shutdownGracefully();
}
}
}
}
use of java.util.concurrent.LinkedBlockingQueue in project flink by apache.
the class AkkaKvStateLocationLookupServiceTest method testLeaderSessionIdChange.
/**
* Tests that messages are properly decorated with the leader session ID.
*/
@Test
public void testLeaderSessionIdChange() throws Exception {
TestingLeaderRetrievalService leaderRetrievalService = new TestingLeaderRetrievalService();
Queue<LookupKvStateLocation> received = new LinkedBlockingQueue<>();
AkkaKvStateLocationLookupService lookupService = new AkkaKvStateLocationLookupService(leaderRetrievalService, testActorSystem, TIMEOUT, new AkkaKvStateLocationLookupService.DisabledLookupRetryStrategyFactory());
lookupService.start();
// Create test actors with random leader session IDs
KvStateLocation expected1 = new KvStateLocation(new JobID(), new JobVertexID(), 8282, "salt");
UUID leaderSessionId1 = UUID.randomUUID();
ActorRef testActor1 = LookupResponseActor.create(received, leaderSessionId1, expected1);
String testActorAddress1 = AkkaUtils.getAkkaURL(testActorSystem, testActor1);
KvStateLocation expected2 = new KvStateLocation(new JobID(), new JobVertexID(), 22321, "pepper");
UUID leaderSessionId2 = UUID.randomUUID();
ActorRef testActor2 = LookupResponseActor.create(received, leaderSessionId1, expected2);
String testActorAddress2 = AkkaUtils.getAkkaURL(testActorSystem, testActor2);
JobID jobId = new JobID();
//
// Notify about first leader
//
leaderRetrievalService.notifyListener(testActorAddress1, leaderSessionId1);
KvStateLocation location = Await.result(lookupService.getKvStateLookupInfo(jobId, "rock"), TIMEOUT);
assertEquals(expected1, location);
assertEquals(1, received.size());
verifyLookupMsg(received.poll(), jobId, "rock");
//
// Notify about second leader
//
leaderRetrievalService.notifyListener(testActorAddress2, leaderSessionId2);
location = Await.result(lookupService.getKvStateLookupInfo(jobId, "roll"), TIMEOUT);
assertEquals(expected2, location);
assertEquals(1, received.size());
verifyLookupMsg(received.poll(), jobId, "roll");
}
Aggregations