use of org.apache.accumulo.fate.util.LoggingRunnable in project accumulo by apache.
the class TServerUtils method startTServer.
/**
* Start the appropriate Thrift server (SSL or non-blocking server) for the given parameters. Non-null SSL parameters will cause an SSL server to be started.
*
* @return A ServerAddress encapsulating the Thrift server created and the host/port which it is bound to.
*/
public static ServerAddress startTServer(ThriftServerType serverType, TimedProcessor processor, TProtocolFactory protocolFactory, String serverName, String threadName, int numThreads, int numSTThreads, long timeBetweenThreadChecks, long maxMessageSize, SslConnectionParams sslParams, SaslServerConnectionParams saslParams, long serverSocketTimeout, HostAndPort... addresses) throws TTransportException {
// This is presently not supported. It's hypothetically possible, I believe, to work, but it would require changes in how the transports
// work at the Thrift layer to ensure that both the SSL and SASL handshakes function. SASL's quality of protection addresses privacy issues.
checkArgument(!(sslParams != null && saslParams != null), "Cannot start a Thrift server using both SSL and SASL");
ServerAddress serverAddress = null;
for (HostAndPort address : addresses) {
try {
switch(serverType) {
case SSL:
log.debug("Instantiating SSL Thrift server");
serverAddress = createSslThreadPoolServer(address, processor, protocolFactory, serverSocketTimeout, sslParams, serverName, numThreads, numSTThreads, timeBetweenThreadChecks);
break;
case SASL:
log.debug("Instantiating SASL Thrift server");
serverAddress = createSaslThreadPoolServer(address, processor, protocolFactory, serverSocketTimeout, saslParams, serverName, threadName, numThreads, numSTThreads, timeBetweenThreadChecks);
break;
case THREADPOOL:
log.debug("Instantiating unsecure TThreadPool Thrift server");
serverAddress = createBlockingServer(address, processor, protocolFactory, maxMessageSize, serverName, numThreads, numSTThreads, timeBetweenThreadChecks);
break;
// Intentional passthrough -- Our custom wrapper around HsHa is the default
case CUSTOM_HS_HA:
default:
log.debug("Instantiating default, unsecure custom half-async Thrift server");
serverAddress = createNonBlockingServer(address, processor, protocolFactory, serverName, threadName, numThreads, numSTThreads, timeBetweenThreadChecks, maxMessageSize);
}
break;
} catch (TTransportException e) {
log.warn("Error attempting to create server at {}. Error: {}", address.toString(), e.getMessage());
}
}
if (null == serverAddress) {
throw new TTransportException("Unable to create server on addresses: " + Arrays.toString(addresses));
}
final TServer finalServer = serverAddress.server;
Runnable serveTask = new Runnable() {
@Override
public void run() {
try {
finalServer.serve();
} catch (Error e) {
Halt.halt("Unexpected error in TThreadPoolServer " + e + ", halting.", 1);
}
}
};
serveTask = new LoggingRunnable(TServerUtils.log, serveTask);
Thread thread = new Daemon(serveTask, threadName);
thread.start();
// check for the special "bind to everything address"
if (serverAddress.address.getHost().equals("0.0.0.0")) {
// can't get the address from the bind, so we'll do our best to invent our hostname
try {
serverAddress = new ServerAddress(finalServer, HostAndPort.fromParts(InetAddress.getLocalHost().getHostName(), serverAddress.address.getPort()));
} catch (UnknownHostException e) {
throw new TTransportException(e);
}
}
return serverAddress;
}
use of org.apache.accumulo.fate.util.LoggingRunnable in project accumulo by apache.
the class Fate method startTransactionRunners.
/**
* Launches the specified number of worker threads.
*/
public void startTransactionRunners(int numThreads) {
final AtomicInteger runnerCount = new AtomicInteger(0);
executor = Executors.newFixedThreadPool(numThreads, new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(new LoggingRunnable(log, r), "Repo runner " + runnerCount.getAndIncrement());
t.setDaemon(true);
return t;
}
});
for (int i = 0; i < numThreads; i++) {
executor.execute(new TransactionRunner());
}
}
use of org.apache.accumulo.fate.util.LoggingRunnable in project accumulo by apache.
the class SecurityUtil method startTicketRenewalThread.
/**
* Start a thread that periodically attempts to renew the current Kerberos user's ticket.
*
* @param ugi
* The current Kerberos user.
* @param renewalPeriod
* The amount of time between attempting renewals.
*/
static void startTicketRenewalThread(final UserGroupInformation ugi, final long renewalPeriod) {
Thread t = new Daemon(new LoggingRunnable(renewalLog, new Runnable() {
@Override
public void run() {
while (true) {
try {
renewalLog.debug("Invoking renewal attempt for Kerberos ticket");
// While we run this "frequently", the Hadoop implementation will only perform the login at 80% of ticket lifetime.
ugi.checkTGTAndReloginFromKeytab();
} catch (IOException e) {
// Should failures to renew the ticket be retried more quickly?
renewalLog.error("Failed to renew Kerberos ticket", e);
}
// Wait for a bit before checking again.
try {
Thread.sleep(renewalPeriod);
} catch (InterruptedException e) {
renewalLog.error("Renewal thread interrupted", e);
Thread.currentThread().interrupt();
return;
}
}
}
}));
t.setName("Kerberos Ticket Renewal");
t.start();
}
use of org.apache.accumulo.fate.util.LoggingRunnable in project accumulo by apache.
the class ProblemReports method deleteProblemReport.
public void deleteProblemReport(Table.ID table, ProblemType pType, String resource) {
final ProblemReport pr = new ProblemReport(table, pType, resource, null);
Runnable r = new Runnable() {
@Override
public void run() {
try {
if (isMeta(pr.getTableId())) {
// file report in zookeeper
pr.removeFromZooKeeper();
} else {
// file report in metadata table
pr.removeFromMetadataTable(context);
}
} catch (Exception e) {
log.error("Failed to delete problem report {} {} {}", pr.getTableId(), pr.getProblemType(), pr.getResource(), e);
}
}
};
try {
reportExecutor.execute(new LoggingRunnable(log, r));
} catch (RejectedExecutionException ree) {
log.error("Failed to delete problem report {} {} {} {}", pr.getTableId(), pr.getProblemType(), pr.getResource(), ree.getMessage());
}
}
use of org.apache.accumulo.fate.util.LoggingRunnable in project accumulo by apache.
the class TabletServerLogger method startLogMaker.
private synchronized void startLogMaker() {
if (nextLogMaker != null) {
return;
}
nextLogMaker = new SimpleThreadPool(1, "WALog creator");
nextLogMaker.submit(new LoggingRunnable(log, new Runnable() {
@Override
public void run() {
final ServerResources conf = tserver.getServerConfig();
final VolumeManager fs = conf.getFileSystem();
while (!nextLogMaker.isShutdown()) {
DfsLogger alog = null;
try {
log.debug("Creating next WAL");
alog = new DfsLogger(conf, syncCounter, flushCounter);
alog.open(tserver.getClientAddressString());
String fileName = alog.getFileName();
log.debug("Created next WAL " + fileName);
tserver.addNewLogMarker(alog);
while (!nextLog.offer(alog, 12, TimeUnit.HOURS)) {
log.info("Our WAL was not used for 12 hours: {}", fileName);
}
} catch (Exception t) {
log.error("Failed to open WAL", t);
if (null != alog) {
// object before trying to create a new one.
try {
alog.close();
} catch (Exception e) {
log.error("Failed to close WAL after it failed to open", e);
}
// Try to avoid leaving a bunch of empty WALs lying around
try {
Path path = alog.getPath();
if (fs.exists(path)) {
fs.delete(path);
}
} catch (Exception e) {
log.warn("Failed to delete a WAL that failed to open", e);
}
}
try {
nextLog.offer(t, 12, TimeUnit.HOURS);
} catch (InterruptedException ex) {
// ignore
}
}
}
}
}));
}
Aggregations