Search in sources :

Example 1 with ServerAddress

use of org.apache.accumulo.server.rpc.ServerAddress in project accumulo by apache.

the class Proxy method execute.

@Override
public void execute(final String[] args) throws Exception {
    Opts opts = new Opts();
    opts.parseArgs(Proxy.class.getName(), args);
    Properties props = new Properties();
    if (opts.prop != null) {
        props = opts.prop;
    } else {
        try (InputStream is = this.getClass().getClassLoader().getResourceAsStream("proxy.properties")) {
            if (is != null) {
                props.load(is);
            } else {
                System.err.println("proxy.properties needs to be specified as argument (using -p) or on the classpath (by putting the file in conf/)");
                System.exit(-1);
            }
        }
    }
    boolean useMini = Boolean.parseBoolean(props.getProperty(USE_MINI_ACCUMULO_KEY, USE_MINI_ACCUMULO_DEFAULT));
    boolean useMock = Boolean.parseBoolean(props.getProperty(USE_MOCK_INSTANCE_KEY, USE_MOCK_INSTANCE_DEFAULT));
    String instance = props.getProperty(ACCUMULO_INSTANCE_NAME_KEY);
    String zookeepers = props.getProperty(ZOOKEEPERS_KEY);
    if (!useMini && !useMock && instance == null) {
        System.err.println("Properties file must contain one of : useMiniAccumulo=true, useMockInstance=true, or instance=<instance name>");
        System.exit(1);
    }
    if (instance != null && zookeepers == null) {
        System.err.println("When instance is set in properties file, zookeepers must also be set.");
        System.exit(1);
    }
    if (!props.containsKey("port")) {
        System.err.println("No port property");
        System.exit(1);
    }
    if (useMini) {
        log.info("Creating mini cluster");
        final File folder = Files.createTempDirectory(System.currentTimeMillis() + "").toFile();
        final MiniAccumuloCluster accumulo = new MiniAccumuloCluster(folder, "secret");
        accumulo.start();
        props.setProperty("instance", accumulo.getConfig().getInstanceName());
        props.setProperty("zookeepers", accumulo.getZooKeepers());
        Runtime.getRuntime().addShutdownHook(new Thread() {

            @Override
            public void start() {
                try {
                    accumulo.stop();
                } catch (Exception e) {
                    throw new RuntimeException();
                } finally {
                    if (!folder.delete())
                        log.warn("Unexpected error removing {}", folder);
                }
            }
        });
    }
    Class<? extends TProtocolFactory> protoFactoryClass = Class.forName(props.getProperty("protocolFactory", TCompactProtocol.Factory.class.getName())).asSubclass(TProtocolFactory.class);
    TProtocolFactory protoFactory = protoFactoryClass.newInstance();
    int port = Integer.parseInt(props.getProperty("port"));
    String hostname = props.getProperty(THRIFT_SERVER_HOSTNAME, THRIFT_SERVER_HOSTNAME_DEFAULT);
    HostAndPort address = HostAndPort.fromParts(hostname, port);
    ServerAddress server = createProxyServer(address, protoFactory, props);
    // Wait for the server to come up
    while (!server.server.isServing()) {
        Thread.sleep(100);
    }
    log.info("Proxy server started on {}", server.getAddress());
    while (server.server.isServing()) {
        Thread.sleep(1000);
    }
}
Also used : TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) MiniAccumuloCluster(org.apache.accumulo.minicluster.MiniAccumuloCluster) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) MetricsFactory(org.apache.accumulo.server.metrics.MetricsFactory) LoggerFactory(org.slf4j.LoggerFactory) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) Properties(java.util.Properties) IOException(java.io.IOException) HostAndPort(org.apache.accumulo.core.util.HostAndPort) AccumuloProxy(org.apache.accumulo.proxy.thrift.AccumuloProxy) File(java.io.File)

Example 2 with ServerAddress

use of org.apache.accumulo.server.rpc.ServerAddress in project accumulo by apache.

the class Proxy method createProxyServer.

public static ServerAddress createProxyServer(HostAndPort address, TProtocolFactory protocolFactory, Properties properties, ClientConfiguration clientConf) throws Exception {
    final int numThreads = Integer.parseInt(properties.getProperty(THRIFT_THREAD_POOL_SIZE_KEY, THRIFT_THREAD_POOL_SIZE_DEFAULT));
    final long maxFrameSize = ConfigurationTypeHelper.getFixedMemoryAsBytes(properties.getProperty(THRIFT_MAX_FRAME_SIZE_KEY, THRIFT_MAX_FRAME_SIZE_DEFAULT));
    final int simpleTimerThreadpoolSize = Integer.parseInt(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE.getDefaultValue());
    // How frequently to try to resize the thread pool
    final long threadpoolResizeInterval = 1000l * 5;
    // No timeout
    final long serverSocketTimeout = 0l;
    // Use the new hadoop metrics2 support
    final MetricsFactory metricsFactory = new MetricsFactory(false);
    final String serverName = "Proxy", threadName = "Accumulo Thrift Proxy";
    // create the implementation of the proxy interface
    ProxyServer impl = new ProxyServer(properties);
    // Wrap the implementation -- translate some exceptions
    AccumuloProxy.Iface wrappedImpl = RpcWrapper.service(impl);
    // Create the processor from the implementation
    TProcessor processor = new AccumuloProxy.Processor<>(wrappedImpl);
    // Get the type of thrift server to instantiate
    final String serverTypeStr = properties.getProperty(THRIFT_SERVER_TYPE, THRIFT_SERVER_TYPE_DEFAULT);
    ThriftServerType serverType = DEFAULT_SERVER_TYPE;
    if (!THRIFT_SERVER_TYPE_DEFAULT.equals(serverTypeStr)) {
        serverType = ThriftServerType.get(serverTypeStr);
    }
    SslConnectionParams sslParams = null;
    SaslServerConnectionParams saslParams = null;
    switch(serverType) {
        case SSL:
            sslParams = SslConnectionParams.forClient(ClientContext.convertClientConfig(clientConf));
            break;
        case SASL:
            if (!clientConf.hasSasl()) {
                // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
                log.error("FATAL: SASL thrift server was requested but it is disabled in client configuration");
                throw new RuntimeException("SASL is not enabled in configuration");
            }
            // Kerberos needs to be enabled to use it
            if (!UserGroupInformation.isSecurityEnabled()) {
                // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
                log.error("FATAL: Hadoop security is not enabled");
                throw new RuntimeException();
            }
            // Login via principal and keytab
            final String kerberosPrincipal = properties.getProperty(KERBEROS_PRINCIPAL, ""), kerberosKeytab = properties.getProperty(KERBEROS_KEYTAB, "");
            if (StringUtils.isBlank(kerberosPrincipal) || StringUtils.isBlank(kerberosKeytab)) {
                // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
                log.error("FATAL: Kerberos principal and keytab must be provided");
                throw new RuntimeException();
            }
            UserGroupInformation.loginUserFromKeytab(kerberosPrincipal, kerberosKeytab);
            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
            log.info("Logged in as {}", ugi.getUserName());
            // The kerberosPrimary set in the SASL server needs to match the principal we're logged in as.
            final String shortName = ugi.getShortUserName();
            log.info("Setting server primary to {}", shortName);
            clientConf.setProperty(ClientProperty.KERBEROS_SERVER_PRIMARY, shortName);
            KerberosToken token = new KerberosToken();
            saslParams = new SaslServerConnectionParams(clientConf, token, null);
            processor = new UGIAssumingProcessor(processor);
            break;
        default:
            // nothing to do -- no extra configuration necessary
            break;
    }
    // Hook up support for tracing for thrift calls
    TimedProcessor timedProcessor = new TimedProcessor(metricsFactory, processor, serverName, threadName);
    // Create the thrift server with our processor and properties
    ServerAddress serverAddr = TServerUtils.startTServer(serverType, timedProcessor, protocolFactory, serverName, threadName, numThreads, simpleTimerThreadpoolSize, threadpoolResizeInterval, maxFrameSize, sslParams, saslParams, serverSocketTimeout, address);
    return serverAddr;
}
Also used : SaslServerConnectionParams(org.apache.accumulo.server.rpc.SaslServerConnectionParams) AccumuloProxy(org.apache.accumulo.proxy.thrift.AccumuloProxy) UGIAssumingProcessor(org.apache.accumulo.server.rpc.UGIAssumingProcessor) TimedProcessor(org.apache.accumulo.server.rpc.TimedProcessor) TProcessor(org.apache.thrift.TProcessor) UGIAssumingProcessor(org.apache.accumulo.server.rpc.UGIAssumingProcessor) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) SslConnectionParams(org.apache.accumulo.core.rpc.SslConnectionParams) ThriftServerType(org.apache.accumulo.server.rpc.ThriftServerType) TProcessor(org.apache.thrift.TProcessor) MetricsFactory(org.apache.accumulo.server.metrics.MetricsFactory) TimedProcessor(org.apache.accumulo.server.rpc.TimedProcessor) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 3 with ServerAddress

use of org.apache.accumulo.server.rpc.ServerAddress in project accumulo by apache.

the class Master method run.

public void run() throws IOException, InterruptedException, KeeperException {
    final String zroot = ZooUtil.getRoot(getInstance());
    // ACCUMULO-4424 Put up the Thrift servers before getting the lock as a sign of process health when a hot-standby
    // 
    // Start the Master's Client service
    clientHandler = new MasterClientServiceHandler(this);
    // Ensure that calls before the master gets the lock fail
    Iface haProxy = HighlyAvailableServiceWrapper.service(clientHandler, this);
    Iface rpcProxy = RpcWrapper.service(haProxy);
    final Processor<Iface> processor;
    if (ThriftServerType.SASL == getThriftServerType()) {
        Iface tcredsProxy = TCredentialsUpdatingWrapper.service(rpcProxy, clientHandler.getClass(), getConfiguration());
        processor = new Processor<>(tcredsProxy);
    } else {
        processor = new Processor<>(rpcProxy);
    }
    ServerAddress sa = TServerUtils.startServer(this, hostname, Property.MASTER_CLIENTPORT, processor, "Master", "Master Client Service Handler", null, Property.MASTER_MINTHREADS, Property.MASTER_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE);
    clientService = sa.server;
    log.info("Started Master client service at {}", sa.address);
    // Start the replication coordinator which assigns tservers to service replication requests
    MasterReplicationCoordinator impl = new MasterReplicationCoordinator(this);
    ReplicationCoordinator.Iface haReplicationProxy = HighlyAvailableServiceWrapper.service(impl, this);
    ReplicationCoordinator.Processor<ReplicationCoordinator.Iface> replicationCoordinatorProcessor = new ReplicationCoordinator.Processor<>(RpcWrapper.service(haReplicationProxy));
    ServerAddress replAddress = TServerUtils.startServer(this, hostname, Property.MASTER_REPLICATION_COORDINATOR_PORT, replicationCoordinatorProcessor, "Master Replication Coordinator", "Replication Coordinator", null, Property.MASTER_REPLICATION_COORDINATOR_MINTHREADS, Property.MASTER_REPLICATION_COORDINATOR_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE);
    log.info("Started replication coordinator service at " + replAddress.address);
    // block until we can obtain the ZK lock for the master
    getMasterLock(zroot + Constants.ZMASTER_LOCK);
    recoveryManager = new RecoveryManager(this);
    TableManager.getInstance().addObserver(this);
    StatusThread statusThread = new StatusThread();
    statusThread.start();
    MigrationCleanupThread migrationCleanupThread = new MigrationCleanupThread();
    migrationCleanupThread.start();
    tserverSet.startListeningForTabletServerChanges();
    ZooReaderWriter zReaderWriter = ZooReaderWriter.getInstance();
    zReaderWriter.getChildren(zroot + Constants.ZRECOVERY, new Watcher() {

        @Override
        public void process(WatchedEvent event) {
            nextEvent.event("Noticed recovery changes", event.getType());
            try {
                // watcher only fires once, add it back
                ZooReaderWriter.getInstance().getChildren(zroot + Constants.ZRECOVERY, this);
            } catch (Exception e) {
                log.error("Failed to add log recovery watcher back", e);
            }
        }
    });
    watchers.add(new TabletGroupWatcher(this, new MetaDataStateStore(this, this), null) {

        @Override
        boolean canSuspendTablets() {
            // Always allow user data tablets to enter suspended state.
            return true;
        }
    });
    watchers.add(new TabletGroupWatcher(this, new RootTabletStateStore(this, this), watchers.get(0)) {

        @Override
        boolean canSuspendTablets() {
            // be immediately reassigned, even if there's a global table.suspension.duration setting.
            return getConfiguration().getBoolean(Property.MASTER_METADATA_SUSPENDABLE);
        }
    });
    watchers.add(new TabletGroupWatcher(this, new ZooTabletStateStore(new ZooStore(zroot)), watchers.get(1)) {

        @Override
        boolean canSuspendTablets() {
            // Never allow root tablet to enter suspended state.
            return false;
        }
    });
    for (TabletGroupWatcher watcher : watchers) {
        watcher.start();
    }
    // Once we are sure the upgrade is complete, we can safely allow fate use.
    waitForMetadataUpgrade.await();
    try {
        final AgeOffStore<Master> store = new AgeOffStore<>(new org.apache.accumulo.fate.ZooStore<Master>(ZooUtil.getRoot(getInstance()) + Constants.ZFATE, ZooReaderWriter.getInstance()), 1000 * 60 * 60 * 8);
        int threads = getConfiguration().getCount(Property.MASTER_FATE_THREADPOOL_SIZE);
        fate = new Fate<>(this, store);
        fate.startTransactionRunners(threads);
        SimpleTimer.getInstance(getConfiguration()).schedule(new Runnable() {

            @Override
            public void run() {
                store.ageOff();
            }
        }, 63000, 63000);
    } catch (KeeperException | InterruptedException e) {
        throw new IOException(e);
    }
    ZooKeeperInitialization.ensureZooKeeperInitialized(zReaderWriter, zroot);
    // the master client service.
    if (null != authenticationTokenKeyManager && null != keyDistributor) {
        log.info("Starting delegation-token key manager");
        keyDistributor.initialize();
        authenticationTokenKeyManager.start();
        boolean logged = false;
        while (!authenticationTokenKeyManager.isInitialized()) {
            // Print out a status message when we start waiting for the key manager to get initialized
            if (!logged) {
                log.info("Waiting for AuthenticationTokenKeyManager to be initialized");
                logged = true;
            }
            sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
        }
        // And log when we are initialized
        log.info("AuthenticationTokenSecretManager is initialized");
    }
    String address = sa.address.toString();
    log.info("Setting master lock data to {}", address);
    masterLock.replaceLockData(address.getBytes());
    while (!clientService.isServing()) {
        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    }
    // Start the daemon to scan the replication table and make units of work
    replicationWorkDriver = new ReplicationDriver(this);
    replicationWorkDriver.start();
    // Start the daemon to assign work to tservers to replicate to our peers
    try {
        replicationWorkAssigner = new WorkDriver(this);
    } catch (AccumuloException | AccumuloSecurityException e) {
        log.error("Caught exception trying to initialize replication WorkDriver", e);
        throw new RuntimeException(e);
    }
    replicationWorkAssigner.start();
    // Advertise that port we used so peers don't have to be told what it is
    ZooReaderWriter.getInstance().putPersistentData(ZooUtil.getRoot(getInstance()) + Constants.ZMASTER_REPLICATION_COORDINATOR_ADDR, replAddress.address.toString().getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
    // Register replication metrics
    MasterMetricsFactory factory = new MasterMetricsFactory(getConfiguration(), this);
    Metrics replicationMetrics = factory.createReplicationMetrics();
    try {
        replicationMetrics.register();
    } catch (Exception e) {
        log.error("Failed to register replication metrics", e);
    }
    // The master is fully initialized. Clients are allowed to connect now.
    masterInitialized.set(true);
    while (clientService.isServing()) {
        sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
    }
    log.info("Shutting down fate.");
    fate.shutdown();
    log.info("Shutting down timekeeping.");
    timeKeeper.shutdown();
    final long deadline = System.currentTimeMillis() + MAX_CLEANUP_WAIT_TIME;
    statusThread.join(remaining(deadline));
    replicationWorkAssigner.join(remaining(deadline));
    replicationWorkDriver.join(remaining(deadline));
    replAddress.server.stop();
    // Signal that we want it to stop, and wait for it to do so.
    if (authenticationTokenKeyManager != null) {
        authenticationTokenKeyManager.gracefulStop();
        authenticationTokenKeyManager.join(remaining(deadline));
    }
    // don't stop
    for (TabletGroupWatcher watcher : watchers) {
        watcher.join(remaining(deadline));
    }
    log.info("exiting");
}
Also used : Processor(org.apache.accumulo.core.master.thrift.MasterClientService.Processor) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) Watcher(org.apache.zookeeper.Watcher) MasterReplicationCoordinator(org.apache.accumulo.master.replication.MasterReplicationCoordinator) ReplicationCoordinator(org.apache.accumulo.core.replication.thrift.ReplicationCoordinator) WatchedEvent(org.apache.zookeeper.WatchedEvent) Iface(org.apache.accumulo.core.master.thrift.MasterClientService.Iface) Metrics(org.apache.accumulo.server.metrics.Metrics) RootTabletStateStore(org.apache.accumulo.server.master.state.RootTabletStateStore) MasterReplicationCoordinator(org.apache.accumulo.master.replication.MasterReplicationCoordinator) WorkDriver(org.apache.accumulo.master.replication.WorkDriver) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) AgeOffStore(org.apache.accumulo.fate.AgeOffStore) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ZooReaderWriter(org.apache.accumulo.server.zookeeper.ZooReaderWriter) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) ZooStore(org.apache.accumulo.server.master.state.ZooStore) IOException(java.io.IOException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) NoAuthException(org.apache.zookeeper.KeeperException.NoAuthException) WalMarkerException(org.apache.accumulo.server.log.WalStateManager.WalMarkerException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) TTransportException(org.apache.thrift.transport.TTransportException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) KeeperException(org.apache.zookeeper.KeeperException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) RecoveryManager(org.apache.accumulo.master.recovery.RecoveryManager) MetaDataStateStore(org.apache.accumulo.server.master.state.MetaDataStateStore) ReplicationDriver(org.apache.accumulo.master.replication.ReplicationDriver) MasterMetricsFactory(org.apache.accumulo.master.metrics.MasterMetricsFactory) ZooTabletStateStore(org.apache.accumulo.server.master.state.ZooTabletStateStore) KeeperException(org.apache.zookeeper.KeeperException)

Example 4 with ServerAddress

use of org.apache.accumulo.server.rpc.ServerAddress in project accumulo by apache.

the class SimpleGarbageCollector method startStatsService.

private HostAndPort startStatsService() throws UnknownHostException {
    Iface rpcProxy = RpcWrapper.service(this);
    final Processor<Iface> processor;
    if (ThriftServerType.SASL == getThriftServerType()) {
        Iface tcProxy = TCredentialsUpdatingWrapper.service(rpcProxy, getClass(), getConfiguration());
        processor = new Processor<>(tcProxy);
    } else {
        processor = new Processor<>(rpcProxy);
    }
    int[] port = getConfiguration().getPort(Property.GC_PORT);
    HostAndPort[] addresses = TServerUtils.getHostAndPorts(this.opts.getAddress(), port);
    long maxMessageSize = getConfiguration().getAsBytes(Property.GENERAL_MAX_MESSAGE_SIZE);
    try {
        ServerAddress server = TServerUtils.startTServer(getConfiguration(), getThriftServerType(), processor, this.getClass().getSimpleName(), "GC Monitor Service", 2, getConfiguration().getCount(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE), 1000, maxMessageSize, getServerSslParams(), getSaslParams(), 0, addresses);
        log.debug("Starting garbage collector listening on " + server.address);
        return server.address;
    } catch (Exception ex) {
        // ACCUMULO-3651 Level changed to error and FATAL added to message for slf4j compatibility
        log.error("FATAL:", ex);
        throw new RuntimeException(ex);
    }
}
Also used : HostAndPort(org.apache.accumulo.core.util.HostAndPort) Iface(org.apache.accumulo.core.gc.thrift.GCMonitorService.Iface) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) FileNotFoundException(java.io.FileNotFoundException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) AccumuloException(org.apache.accumulo.core.client.AccumuloException)

Example 5 with ServerAddress

use of org.apache.accumulo.server.rpc.ServerAddress in project accumulo by apache.

the class TServerUtilsTest method testStartServerUsedPortWithSearch.

@Test
public void testStartServerUsedPortWithSearch() throws Exception {
    TServer server = null;
    int[] port = findTwoFreeSequentialPorts(1024);
    // Bind to the port
    InetAddress addr = InetAddress.getByName("localhost");
    ((ConfigurationCopy) factory.getSystemConfiguration()).set(Property.TSERV_CLIENTPORT, Integer.toString(port[0]));
    ((ConfigurationCopy) factory.getSystemConfiguration()).set(Property.TSERV_PORTSEARCH, "true");
    try (ServerSocket s = new ServerSocket(port[0], 50, addr)) {
        ServerAddress address = startServer();
        assertNotNull(address);
        server = address.getServer();
        assertNotNull(server);
        assertEquals(port[1], address.getAddress().getPort());
    } finally {
        if (null != server) {
            TServerUtils.stopTServer(server);
        }
    }
}
Also used : ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) TServer(org.apache.thrift.server.TServer) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) ServerSocket(java.net.ServerSocket) TServerSocket(org.apache.thrift.transport.TServerSocket) InetAddress(java.net.InetAddress) Test(org.junit.Test)

Aggregations

ServerAddress (org.apache.accumulo.server.rpc.ServerAddress)12 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)5 TServer (org.apache.thrift.server.TServer)5 Test (org.junit.Test)5 IOException (java.io.IOException)4 AccumuloException (org.apache.accumulo.core.client.AccumuloException)3 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)3 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)3 TException (org.apache.thrift.TException)3 KeeperException (org.apache.zookeeper.KeeperException)3 InetAddress (java.net.InetAddress)2 ServerSocket (java.net.ServerSocket)2 UnknownHostException (java.net.UnknownHostException)2 ThriftSecurityException (org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException)2 ThriftTableOperationException (org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException)2 Property (org.apache.accumulo.core.conf.Property)2 Iface (org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Iface)2 Processor (org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Processor)2 HostAndPort (org.apache.accumulo.core.util.HostAndPort)2 AccumuloProxy (org.apache.accumulo.proxy.thrift.AccumuloProxy)2