use of java.util.concurrent.LinkedBlockingQueue in project hbase by apache.
the class ThriftServerRunner method setupServer.
/**
* Setting up the thrift TServer
*/
private void setupServer() throws Exception {
// Construct correct ProtocolFactory
TProtocolFactory protocolFactory;
if (conf.getBoolean(COMPACT_CONF_KEY, false)) {
LOG.debug("Using compact protocol");
protocolFactory = new TCompactProtocol.Factory();
} else {
LOG.debug("Using binary protocol");
protocolFactory = new TBinaryProtocol.Factory();
}
final TProcessor p = new Hbase.Processor<>(handler);
ImplType implType = ImplType.getServerImpl(conf);
TProcessor processor = p;
// Construct correct TransportFactory
TTransportFactory transportFactory;
if (conf.getBoolean(FRAMED_CONF_KEY, false) || implType.isAlwaysFramed) {
if (qop != null) {
throw new RuntimeException("Thrift server authentication" + " doesn't work with framed transport yet");
}
transportFactory = new TFramedTransport.Factory(conf.getInt(MAX_FRAME_SIZE_CONF_KEY, 2) * 1024 * 1024);
LOG.debug("Using framed transport");
} else if (qop == null) {
transportFactory = new TTransportFactory();
} else {
// Extract the name from the principal
String name = SecurityUtil.getUserFromPrincipal(conf.get("hbase.thrift.kerberos.principal"));
Map<String, String> saslProperties = new HashMap<>();
saslProperties.put(Sasl.QOP, qop);
TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory();
saslFactory.addServerDefinition("GSSAPI", name, host, saslProperties, new SaslGssCallbackHandler() {
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if (callback instanceof AuthorizeCallback) {
ac = (AuthorizeCallback) callback;
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL GSSAPI Callback");
}
}
if (ac != null) {
String authid = ac.getAuthenticationID();
String authzid = ac.getAuthorizationID();
if (!authid.equals(authzid)) {
ac.setAuthorized(false);
} else {
ac.setAuthorized(true);
String userName = SecurityUtil.getUserFromPrincipal(authzid);
LOG.info("Effective user: " + userName);
ac.setAuthorizedID(userName);
}
}
}
});
transportFactory = saslFactory;
// Create a processor wrapper, to get the caller
processor = new TProcessor() {
@Override
public boolean process(TProtocol inProt, TProtocol outProt) throws TException {
TSaslServerTransport saslServerTransport = (TSaslServerTransport) inProt.getTransport();
SaslServer saslServer = saslServerTransport.getSaslServer();
String principal = saslServer.getAuthorizationID();
hbaseHandler.setEffectiveUser(principal);
return p.process(inProt, outProt);
}
};
}
if (conf.get(BIND_CONF_KEY) != null && !implType.canSpecifyBindIP) {
LOG.error("Server types " + Joiner.on(", ").join(ImplType.serversThatCannotSpecifyBindIP()) + " don't support IP " + "address binding at the moment. See " + "https://issues.apache.org/jira/browse/HBASE-2155 for details.");
throw new RuntimeException("-" + BIND_CONF_KEY + " not supported with " + implType);
}
// Thrift's implementation uses '0' as a placeholder for 'use the default.'
int backlog = conf.getInt(BACKLOG_CONF_KEY, 0);
if (implType == ImplType.HS_HA || implType == ImplType.NONBLOCKING || implType == ImplType.THREADED_SELECTOR) {
InetAddress listenAddress = getBindAddress(conf);
TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(new InetSocketAddress(listenAddress, listenPort));
if (implType == ImplType.NONBLOCKING) {
TNonblockingServer.Args serverArgs = new TNonblockingServer.Args(serverTransport);
serverArgs.processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
tserver = new TNonblockingServer(serverArgs);
} else if (implType == ImplType.HS_HA) {
THsHaServer.Args serverArgs = new THsHaServer.Args(serverTransport);
CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics);
ExecutorService executorService = createExecutor(callQueue, serverArgs.getMaxWorkerThreads(), serverArgs.getMaxWorkerThreads());
serverArgs.executorService(executorService).processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
tserver = new THsHaServer(serverArgs);
} else {
// THREADED_SELECTOR
TThreadedSelectorServer.Args serverArgs = new HThreadedSelectorServerArgs(serverTransport, conf);
CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics);
ExecutorService executorService = createExecutor(callQueue, serverArgs.getWorkerThreads(), serverArgs.getWorkerThreads());
serverArgs.executorService(executorService).processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
tserver = new TThreadedSelectorServer(serverArgs);
}
LOG.info("starting HBase " + implType.simpleClassName() + " server on " + Integer.toString(listenPort));
} else if (implType == ImplType.THREAD_POOL) {
// Thread pool server. Get the IP address to bind to.
InetAddress listenAddress = getBindAddress(conf);
int readTimeout = conf.getInt(THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY, THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT);
TServerTransport serverTransport = new TServerSocket(new TServerSocket.ServerSocketTransportArgs().bindAddr(new InetSocketAddress(listenAddress, listenPort)).backlog(backlog).clientTimeout(readTimeout));
TBoundedThreadPoolServer.Args serverArgs = new TBoundedThreadPoolServer.Args(serverTransport, conf);
serverArgs.processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
LOG.info("starting " + ImplType.THREAD_POOL.simpleClassName() + " on " + listenAddress + ":" + Integer.toString(listenPort) + " with readTimeout " + readTimeout + "ms; " + serverArgs);
TBoundedThreadPoolServer tserver = new TBoundedThreadPoolServer(serverArgs, metrics);
this.tserver = tserver;
} else {
throw new AssertionError("Unsupported Thrift server implementation: " + implType.simpleClassName());
}
// A sanity check that we instantiated the right type of server.
if (tserver.getClass() != implType.serverClass) {
throw new AssertionError("Expected to create Thrift server class " + implType.serverClass.getName() + " but got " + tserver.getClass().getName());
}
registerFilters(conf);
}
use of java.util.concurrent.LinkedBlockingQueue in project hbase by apache.
the class ThriftServer method createExecutor.
private static ExecutorService createExecutor(int workerThreads, int maxCallQueueSize, ThriftMetrics metrics) {
CallQueue callQueue;
if (maxCallQueueSize > 0) {
callQueue = new CallQueue(new LinkedBlockingQueue<>(maxCallQueueSize), metrics);
} else {
callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics);
}
ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
tfb.setDaemon(true);
tfb.setNameFormat("thrift2-worker-%d");
ThreadPoolExecutor pool = new THBaseThreadPoolExecutor(workerThreads, workerThreads, Long.MAX_VALUE, TimeUnit.SECONDS, callQueue, tfb.build(), metrics);
pool.prestartAllCoreThreads();
return pool;
}
use of java.util.concurrent.LinkedBlockingQueue in project tomcat by apache.
the class ContainerBase method reconfigureStartStopExecutor.
/*
* Implementation note: If there is a demand for more control than this then
* it is likely that the best solution will be to reference an external
* executor.
*/
private void reconfigureStartStopExecutor(int threads) {
if (threads == 1) {
if (!(startStopExecutor instanceof InlineExecutorService)) {
startStopExecutor = new InlineExecutorService();
}
} else {
if (startStopExecutor instanceof ThreadPoolExecutor) {
((ThreadPoolExecutor) startStopExecutor).setMaximumPoolSize(threads);
((ThreadPoolExecutor) startStopExecutor).setCorePoolSize(threads);
} else {
BlockingQueue<Runnable> startStopQueue = new LinkedBlockingQueue<>();
ThreadPoolExecutor tpe = new ThreadPoolExecutor(threads, threads, 10, TimeUnit.SECONDS, startStopQueue, new StartStopThreadFactory(getName() + "-startStop-"));
tpe.allowCoreThreadTimeOut(true);
startStopExecutor = tpe;
}
}
}
use of java.util.concurrent.LinkedBlockingQueue in project weave by continuuity.
the class ZKClientTest method testExpireRewatch.
@Test
public void testExpireRewatch() throws InterruptedException, IOException, ExecutionException {
InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
zkServer.startAndWait();
try {
final CountDownLatch expireReconnectLatch = new CountDownLatch(1);
final AtomicBoolean expired = new AtomicBoolean(false);
final ZKClientService client = ZKClientServices.delegate(ZKClients.reWatchOnExpire(ZKClientService.Builder.of(zkServer.getConnectionStr()).setSessionTimeout(2000).setConnectionWatcher(new Watcher() {
@Override
public void process(WatchedEvent event) {
if (event.getState() == Event.KeeperState.Expired) {
expired.set(true);
} else if (event.getState() == Event.KeeperState.SyncConnected && expired.compareAndSet(true, true)) {
expireReconnectLatch.countDown();
}
}
}).build()));
client.startAndWait();
try {
final BlockingQueue<Watcher.Event.EventType> events = new LinkedBlockingQueue<Watcher.Event.EventType>();
client.exists("/expireRewatch", new Watcher() {
@Override
public void process(WatchedEvent event) {
client.exists("/expireRewatch", this);
events.add(event.getType());
}
});
client.create("/expireRewatch", null, CreateMode.PERSISTENT);
Assert.assertEquals(Watcher.Event.EventType.NodeCreated, events.poll(2, TimeUnit.SECONDS));
KillZKSession.kill(client.getZooKeeperSupplier().get(), zkServer.getConnectionStr(), 1000);
Assert.assertTrue(expireReconnectLatch.await(5, TimeUnit.SECONDS));
client.delete("/expireRewatch");
Assert.assertEquals(Watcher.Event.EventType.NodeDeleted, events.poll(4, TimeUnit.SECONDS));
} finally {
client.stopAndWait();
}
} finally {
zkServer.stopAndWait();
}
}
use of java.util.concurrent.LinkedBlockingQueue in project druid by druid-io.
the class DirectDruidClient method run.
@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> context) {
QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
boolean isBySegment = BaseQuery.getContextBySegment(query, false);
Pair<JavaType, JavaType> types = typesMap.get(query.getClass());
if (types == null) {
final TypeFactory typeFactory = objectMapper.getTypeFactory();
JavaType baseType = typeFactory.constructType(toolChest.getResultTypeReference());
JavaType bySegmentType = typeFactory.constructParametricType(Result.class, typeFactory.constructParametricType(BySegmentResultValueClass.class, baseType));
types = Pair.of(baseType, bySegmentType);
typesMap.put(query.getClass(), types);
}
final JavaType typeRef;
if (isBySegment) {
typeRef = types.rhs;
} else {
typeRef = types.lhs;
}
final ListenableFuture<InputStream> future;
final String url = String.format("http://%s/druid/v2/", host);
final String cancelUrl = String.format("http://%s/druid/v2/%s", host, query.getId());
try {
log.debug("Querying queryId[%s] url[%s]", query.getId(), url);
final long requestStartTime = System.currentTimeMillis();
final ServiceMetricEvent.Builder builder = toolChest.makeMetricBuilder(query);
builder.setDimension("server", host);
final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {
private long responseStartTime;
private final AtomicLong byteCount = new AtomicLong(0);
private final BlockingQueue<InputStream> queue = new LinkedBlockingQueue<>();
private final AtomicBoolean done = new AtomicBoolean(false);
@Override
public ClientResponse<InputStream> handleResponse(HttpResponse response) {
log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
responseStartTime = System.currentTimeMillis();
emitter.emit(builder.build("query/node/ttfb", responseStartTime - requestStartTime));
try {
final String responseContext = response.headers().get("X-Druid-Response-Context");
// context may be null in case of error or query timeout
if (responseContext != null) {
context.putAll(objectMapper.<Map<String, Object>>readValue(responseContext, new TypeReference<Map<String, Object>>() {
}));
}
queue.put(new ChannelBufferInputStream(response.getContent()));
} catch (final IOException e) {
log.error(e, "Error parsing response context from url [%s]", url);
return ClientResponse.<InputStream>finished(new InputStream() {
@Override
public int read() throws IOException {
throw e;
}
});
} catch (InterruptedException e) {
log.error(e, "Queue appending interrupted");
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
byteCount.addAndGet(response.getContent().readableBytes());
return ClientResponse.<InputStream>finished(new SequenceInputStream(new Enumeration<InputStream>() {
@Override
public boolean hasMoreElements() {
// Then the stream should be spouting good InputStreams.
synchronized (done) {
return !done.get() || !queue.isEmpty();
}
}
@Override
public InputStream nextElement() {
try {
return queue.take();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
}
}));
}
@Override
public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk) {
final ChannelBuffer channelBuffer = chunk.getContent();
final int bytes = channelBuffer.readableBytes();
if (bytes > 0) {
try {
queue.put(new ChannelBufferInputStream(channelBuffer));
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
byteCount.addAndGet(bytes);
}
return clientResponse;
}
@Override
public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
long stopTime = System.currentTimeMillis();
log.debug("Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, byteCount.get(), stopTime - responseStartTime, byteCount.get() / (0.0001 * (stopTime - responseStartTime)));
emitter.emit(builder.build("query/node/time", stopTime - requestStartTime));
emitter.emit(builder.build("query/node/bytes", byteCount.get()));
synchronized (done) {
try {
// An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
// after done is set to true, regardless of the rest of the stream's state.
queue.put(ByteSource.empty().openStream());
} catch (InterruptedException e) {
log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url);
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
} catch (IOException e) {
// This should never happen
throw Throwables.propagate(e);
} finally {
done.set(true);
}
}
return ClientResponse.<InputStream>finished(clientResponse.getObj());
}
@Override
public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
// Don't wait for lock in case the lock had something to do with the error
synchronized (done) {
done.set(true);
// Make a best effort to put a zero length buffer into the queue in case something is waiting on the take()
// If nothing is waiting on take(), this will be closed out anyways.
queue.offer(new InputStream() {
@Override
public int read() throws IOException {
throw new IOException(e);
}
});
}
}
};
future = httpClient.go(new Request(HttpMethod.POST, new URL(url)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler);
queryWatcher.registerQuery(query, future);
openConnections.getAndIncrement();
Futures.addCallback(future, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream result) {
openConnections.getAndDecrement();
}
@Override
public void onFailure(Throwable t) {
openConnections.getAndDecrement();
if (future.isCancelled()) {
// forward the cancellation to underlying queriable node
try {
StatusResponseHolder res = httpClient.go(new Request(HttpMethod.DELETE, new URL(cancelUrl)).setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), new StatusResponseHandler(Charsets.UTF_8)).get();
if (res.getStatus().getCode() >= 500) {
throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(), res.getStatus().getReasonPhrase());
}
} catch (IOException | ExecutionException | InterruptedException e) {
Throwables.propagate(e);
}
}
}
});
} catch (IOException e) {
throw Throwables.propagate(e);
}
Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {
@Override
public JsonParserIterator<T> make() {
return new JsonParserIterator<T>(typeRef, future, url);
}
@Override
public void cleanup(JsonParserIterator<T> iterFromMake) {
CloseQuietly.close(iterFromMake);
}
});
// avoid the cost of de-serializing and then re-serializing again when adding to cache
if (!isBySegment) {
retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
}
return retVal;
}
Aggregations