Search in sources :

Example 1 with IZooReaderWriter

use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.

the class BackupMasterIT method test.

@Test
public void test() throws Exception {
    // wait for master
    UtilWaitThread.sleep(1000);
    // create a backup
    Process backup = exec(Master.class);
    try {
        String secret = getCluster().getSiteConfiguration().get(Property.INSTANCE_SECRET);
        IZooReaderWriter writer = new ZooReaderWriterFactory().getZooReaderWriter(cluster.getZooKeepers(), 30 * 1000, secret);
        String root = "/accumulo/" + getConnector().getInstance().getInstanceID();
        List<String> children = Collections.emptyList();
        // wait for 2 lock entries
        do {
            UtilWaitThread.sleep(100);
            children = writer.getChildren(root + "/masters/lock");
        } while (children.size() != 2);
        Collections.sort(children);
        // wait for the backup master to learn to be the backup
        UtilWaitThread.sleep(1000);
        // generate a false zookeeper event
        String lockPath = root + "/masters/lock/" + children.get(0);
        byte[] data = writer.getData(lockPath, null);
        writer.getZooKeeper().setData(lockPath, data, -1);
        // let it propagate
        UtilWaitThread.sleep(500);
        // kill the master by removing its lock
        writer.recursiveDelete(lockPath, NodeMissingPolicy.FAIL);
        // ensure the backup becomes the master
        getConnector().tableOperations().create(getUniqueNames(1)[0]);
    } finally {
        backup.destroy();
    }
}
Also used : IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) ZooReaderWriterFactory(org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory) Test(org.junit.Test)

Example 2 with IZooReaderWriter

use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.

the class MiniAccumuloClusterImpl method start.

/**
 * Starts Accumulo and Zookeeper processes. Can only be called once.
 */
@Override
public synchronized void start() throws IOException, InterruptedException {
    if (config.useMiniDFS() && miniDFS == null) {
        throw new IllegalStateException("Cannot restart mini when using miniDFS");
    }
    MiniAccumuloClusterControl control = getClusterControl();
    if (config.useExistingInstance()) {
        Configuration acuConf = config.getAccumuloConfiguration();
        Configuration hadoopConf = config.getHadoopConfiguration();
        ConfigurationCopy cc = new ConfigurationCopy(acuConf);
        VolumeManager fs;
        try {
            fs = VolumeManagerImpl.get(cc, hadoopConf);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        Path instanceIdPath = Accumulo.getAccumuloInstanceIdPath(fs);
        String instanceIdFromFile = ZooUtil.getInstanceIDFromHdfs(instanceIdPath, cc, hadoopConf);
        IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter(cc.get(Property.INSTANCE_ZK_HOST), (int) cc.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT), cc.get(Property.INSTANCE_SECRET));
        String rootPath = ZooUtil.getRoot(instanceIdFromFile);
        String instanceName = null;
        try {
            for (String name : zrw.getChildren(Constants.ZROOT + Constants.ZINSTANCES)) {
                String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + name;
                byte[] bytes = zrw.getData(instanceNamePath, new Stat());
                String iid = new String(bytes, UTF_8);
                if (iid.equals(instanceIdFromFile)) {
                    instanceName = name;
                }
            }
        } catch (KeeperException e) {
            throw new RuntimeException("Unable to read instance name from zookeeper.", e);
        }
        if (instanceName == null)
            throw new RuntimeException("Unable to read instance name from zookeeper.");
        config.setInstanceName(instanceName);
        if (!AccumuloStatus.isAccumuloOffline(zrw, rootPath))
            throw new RuntimeException("The Accumulo instance being used is already running. Aborting.");
    } else {
        if (!initialized) {
            Runtime.getRuntime().addShutdownHook(new Thread() {

                @Override
                public void run() {
                    try {
                        MiniAccumuloClusterImpl.this.stop();
                    } catch (IOException e) {
                        log.error("IOException while attempting to stop the MiniAccumuloCluster.", e);
                    } catch (InterruptedException e) {
                        log.error("The stopping of MiniAccumuloCluster was interrupted.", e);
                    }
                }
            });
        }
        if (!config.useExistingZooKeepers())
            control.start(ServerType.ZOOKEEPER);
        if (!initialized) {
            if (!config.useExistingZooKeepers()) {
                // sleep a little bit to let zookeeper come up before calling init, seems to work better
                long startTime = System.currentTimeMillis();
                while (true) {
                    Socket s = null;
                    try {
                        s = new Socket("localhost", config.getZooKeeperPort());
                        s.setReuseAddress(true);
                        s.getOutputStream().write("ruok\n".getBytes());
                        s.getOutputStream().flush();
                        byte[] buffer = new byte[100];
                        int n = s.getInputStream().read(buffer);
                        if (n >= 4 && new String(buffer, 0, 4).equals("imok"))
                            break;
                    } catch (Exception e) {
                        if (System.currentTimeMillis() - startTime >= config.getZooKeeperStartupTime()) {
                            throw new ZooKeeperBindException("Zookeeper did not start within " + (config.getZooKeeperStartupTime() / 1000) + " seconds. Check the logs in " + config.getLogDir() + " for errors.  Last exception: " + e);
                        }
                        // Don't spin absurdly fast
                        Thread.sleep(250);
                    } finally {
                        if (s != null)
                            s.close();
                    }
                }
            }
            LinkedList<String> args = new LinkedList<>();
            args.add("--instance-name");
            args.add(config.getInstanceName());
            args.add("--user");
            args.add(config.getRootUserName());
            args.add("--clear-instance-name");
            // If we aren't using SASL, add in the root password
            final String saslEnabled = config.getSiteConfig().get(Property.INSTANCE_RPC_SASL_ENABLED.getKey());
            if (null == saslEnabled || !Boolean.parseBoolean(saslEnabled)) {
                args.add("--password");
                args.add(config.getRootPassword());
            }
            Process initProcess = exec(Initialize.class, args.toArray(new String[0]));
            int ret = initProcess.waitFor();
            if (ret != 0) {
                throw new RuntimeException("Initialize process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
            }
            initialized = true;
        }
    }
    log.info("Starting MAC against instance {} and zookeeper(s) {}.", config.getInstanceName(), config.getZooKeepers());
    control.start(ServerType.TABLET_SERVER);
    int ret = 0;
    for (int i = 0; i < 5; i++) {
        ret = exec(Main.class, SetGoalState.class.getName(), MasterGoalState.NORMAL.toString()).waitFor();
        if (ret == 0)
            break;
        sleepUninterruptibly(1, TimeUnit.SECONDS);
    }
    if (ret != 0) {
        throw new RuntimeException("Could not set master goal state, process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
    }
    control.start(ServerType.MASTER);
    control.start(ServerType.GARBAGE_COLLECTOR);
    if (null == executor) {
        executor = Executors.newSingleThreadExecutor();
    }
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Configuration(org.apache.hadoop.conf.Configuration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) CachedConfiguration(org.apache.accumulo.core.util.CachedConfiguration) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) ZooReaderWriterFactory(org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory) Stat(org.apache.zookeeper.data.Stat) SetGoalState(org.apache.accumulo.master.state.SetGoalState) Path(org.apache.hadoop.fs.Path) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) TimeoutException(java.util.concurrent.TimeoutException) ThriftNotActiveServiceException(org.apache.accumulo.core.client.impl.thrift.ThriftNotActiveServiceException) ThriftSecurityException(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) KeeperException(org.apache.zookeeper.KeeperException) TException(org.apache.thrift.TException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ExecutionException(java.util.concurrent.ExecutionException) LinkedList(java.util.LinkedList) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) KeeperException(org.apache.zookeeper.KeeperException) Socket(java.net.Socket)

Example 3 with IZooReaderWriter

use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.

the class Master method upgradeZookeeper.

private void upgradeZookeeper() {
    // 1.5.1 and 1.6.0 both do some state checking after obtaining the zoolock for the
    // monitor and before starting up. It's not tied to the data version at all (and would
    // introduce unnecessary complexity to try to make the master do it), but be aware
    // that the master is not the only thing that may alter zookeeper before starting.
    final int accumuloPersistentVersion = Accumulo.getAccumuloPersistentVersion(fs);
    if (Accumulo.persistentVersionNeedsUpgrade(accumuloPersistentVersion)) {
        // Change to Guava's Verify once we use Guava 17.
        if (null != fate) {
            throw new IllegalStateException("Access to Fate should not have been initialized prior to the Master transitioning to active. Please save all logs and file a bug.");
        }
        Accumulo.abortIfFateTransactions();
        try {
            log.info("Upgrading zookeeper");
            IZooReaderWriter zoo = ZooReaderWriter.getInstance();
            final String zooRoot = ZooUtil.getRoot(getInstance());
            log.debug("Handling updates for version {}", accumuloPersistentVersion);
            log.debug("Cleaning out remnants of logger role.");
            zoo.recursiveDelete(zooRoot + "/loggers", NodeMissingPolicy.SKIP);
            zoo.recursiveDelete(zooRoot + "/dead/loggers", NodeMissingPolicy.SKIP);
            final byte[] zero = new byte[] { '0' };
            log.debug("Initializing recovery area.");
            zoo.putPersistentData(zooRoot + Constants.ZRECOVERY, zero, NodeExistsPolicy.SKIP);
            for (String id : zoo.getChildren(zooRoot + Constants.ZTABLES)) {
                log.debug("Prepping table {} for compaction cancellations.", id);
                zoo.putPersistentData(zooRoot + Constants.ZTABLES + "/" + id + Constants.ZTABLE_COMPACT_CANCEL_ID, zero, NodeExistsPolicy.SKIP);
            }
            @SuppressWarnings("deprecation") String zpath = zooRoot + Constants.ZCONFIG + "/" + Property.TSERV_WAL_SYNC_METHOD.getKey();
            // is the entire instance set to use flushing vs sync?
            boolean flushDefault = false;
            try {
                byte[] data = zoo.getData(zpath, null);
                if (new String(data, UTF_8).endsWith("flush")) {
                    flushDefault = true;
                }
            } catch (KeeperException.NoNodeException ex) {
            // skip
            }
            for (String id : zoo.getChildren(zooRoot + Constants.ZTABLES)) {
                log.debug("Converting table {} WALog setting to Durability", id);
                try {
                    @SuppressWarnings("deprecation") String path = zooRoot + Constants.ZTABLES + "/" + id + Constants.ZTABLE_CONF + "/" + Property.TABLE_WALOG_ENABLED.getKey();
                    byte[] data = zoo.getData(path, null);
                    boolean useWAL = Boolean.parseBoolean(new String(data, UTF_8));
                    zoo.recursiveDelete(path, NodeMissingPolicy.FAIL);
                    path = zooRoot + Constants.ZTABLES + "/" + id + Constants.ZTABLE_CONF + "/" + Property.TABLE_DURABILITY.getKey();
                    if (useWAL) {
                        if (flushDefault) {
                            zoo.putPersistentData(path, "flush".getBytes(), NodeExistsPolicy.SKIP);
                        } else {
                            zoo.putPersistentData(path, "sync".getBytes(), NodeExistsPolicy.SKIP);
                        }
                    } else {
                        zoo.putPersistentData(path, "none".getBytes(), NodeExistsPolicy.SKIP);
                    }
                } catch (KeeperException.NoNodeException ex) {
                // skip it
                }
            }
            // create initial namespaces
            String namespaces = ZooUtil.getRoot(getInstance()) + Constants.ZNAMESPACES;
            zoo.putPersistentData(namespaces, new byte[0], NodeExistsPolicy.SKIP);
            for (Pair<String, Namespace.ID> namespace : Iterables.concat(Collections.singleton(new Pair<>(Namespace.ACCUMULO, Namespace.ID.ACCUMULO)), Collections.singleton(new Pair<>(Namespace.DEFAULT, Namespace.ID.DEFAULT)))) {
                String ns = namespace.getFirst();
                Namespace.ID id = namespace.getSecond();
                log.debug("Upgrade creating namespace \"{}\" (ID: {})", ns, id);
                if (!Namespaces.exists(getInstance(), id))
                    TableManager.prepareNewNamespaceState(getInstance().getInstanceID(), id, ns, NodeExistsPolicy.SKIP);
            }
            // create replication table in zk
            log.debug("Upgrade creating table {} (ID: {})", ReplicationTable.NAME, ReplicationTable.ID);
            TableManager.prepareNewTableState(getInstance().getInstanceID(), ReplicationTable.ID, Namespace.ID.ACCUMULO, ReplicationTable.NAME, TableState.OFFLINE, NodeExistsPolicy.SKIP);
            // create root table
            log.debug("Upgrade creating table {} (ID: {})", RootTable.NAME, RootTable.ID);
            TableManager.prepareNewTableState(getInstance().getInstanceID(), RootTable.ID, Namespace.ID.ACCUMULO, RootTable.NAME, TableState.ONLINE, NodeExistsPolicy.SKIP);
            Initialize.initSystemTablesConfig();
            // ensure root user can flush root table
            security.grantTablePermission(rpcCreds(), security.getRootUsername(), RootTable.ID, TablePermission.ALTER_TABLE, Namespace.ID.ACCUMULO);
            // put existing tables in the correct namespaces
            String tables = ZooUtil.getRoot(getInstance()) + Constants.ZTABLES;
            for (String tableId : zoo.getChildren(tables)) {
                Namespace.ID targetNamespace = (MetadataTable.ID.canonicalID().equals(tableId) || RootTable.ID.canonicalID().equals(tableId)) ? Namespace.ID.ACCUMULO : Namespace.ID.DEFAULT;
                log.debug("Upgrade moving table {} (ID: {}) into namespace with ID {}", new String(zoo.getData(tables + "/" + tableId + Constants.ZTABLE_NAME, null), UTF_8), tableId, targetNamespace);
                zoo.putPersistentData(tables + "/" + tableId + Constants.ZTABLE_NAMESPACE, targetNamespace.getUtf8(), NodeExistsPolicy.SKIP);
            }
            // rename metadata table
            log.debug("Upgrade renaming table {} (ID: {}) to {}", MetadataTable.OLD_NAME, MetadataTable.ID, MetadataTable.NAME);
            zoo.putPersistentData(tables + "/" + MetadataTable.ID + Constants.ZTABLE_NAME, Tables.qualify(MetadataTable.NAME).getSecond().getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
            moveRootTabletToRootTable(zoo);
            // add system namespace permissions to existing users
            ZKPermHandler perm = new ZKPermHandler();
            perm.initialize(getInstance().getInstanceID(), true);
            String users = ZooUtil.getRoot(getInstance()) + "/users";
            for (String user : zoo.getChildren(users)) {
                zoo.putPersistentData(users + "/" + user + "/Namespaces", new byte[0], NodeExistsPolicy.SKIP);
                perm.grantNamespacePermission(user, Namespace.ID.ACCUMULO, NamespacePermission.READ);
            }
            perm.grantNamespacePermission("root", Namespace.ID.ACCUMULO, NamespacePermission.ALTER_TABLE);
            // add the currlog location for root tablet current logs
            zoo.putPersistentData(ZooUtil.getRoot(getInstance()) + RootTable.ZROOT_TABLET_CURRENT_LOGS, new byte[0], NodeExistsPolicy.SKIP);
            // create tablet server wal logs node in ZK
            zoo.putPersistentData(ZooUtil.getRoot(getInstance()) + WalStateManager.ZWALS, new byte[0], NodeExistsPolicy.SKIP);
            haveUpgradedZooKeeper = true;
        } catch (Exception ex) {
            // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility
            log.error("FATAL: Error performing upgrade", ex);
            System.exit(1);
        }
    }
}
Also used : ZKPermHandler(org.apache.accumulo.server.security.handler.ZKPermHandler) Namespace(org.apache.accumulo.core.client.impl.Namespace) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) NoAuthException(org.apache.zookeeper.KeeperException.NoAuthException) WalMarkerException(org.apache.accumulo.server.log.WalStateManager.WalMarkerException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) TTransportException(org.apache.thrift.transport.TTransportException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) KeeperException(org.apache.zookeeper.KeeperException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) KeeperException(org.apache.zookeeper.KeeperException) Pair(org.apache.accumulo.core.util.Pair)

Example 4 with IZooReaderWriter

use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.

the class TableChangeStateIT method findFate.

/**
 * Checks fates in zookeeper looking for transaction associated with a compaction as a double check that the test will be valid because the running compaction
 * does have a fate transaction lock.
 *
 * @return true if corresponding fate transaction found, false otherwise
 */
private boolean findFate(final String tableName) {
    Instance instance = connector.getInstance();
    AdminUtil<String> admin = new AdminUtil<>(false);
    try {
        Table.ID tableId = Tables.getTableId(instance, tableName);
        log.trace("tid: {}", tableId);
        String secret = cluster.getSiteConfiguration().get(Property.INSTANCE_SECRET);
        IZooReaderWriter zk = new ZooReaderWriterFactory().getZooReaderWriter(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut(), secret);
        ZooStore<String> zs = new ZooStore<>(ZooUtil.getRoot(instance) + Constants.ZFATE, zk);
        AdminUtil.FateStatus fateStatus = admin.getStatus(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS + "/" + tableId, null, null);
        for (AdminUtil.TransactionStatus tx : fateStatus.getTransactions()) {
            if (tx.getTop().contains("CompactionDriver") && tx.getDebug().contains("CompactRange")) {
                return true;
            }
        }
    } catch (KeeperException | TableNotFoundException | InterruptedException ex) {
        throw new IllegalStateException(ex);
    }
    // did not find appropriate fate transaction for compaction.
    return Boolean.FALSE;
}
Also used : Table(org.apache.accumulo.core.client.impl.Table) Instance(org.apache.accumulo.core.client.Instance) ZooStore(org.apache.accumulo.fate.ZooStore) ZooReaderWriterFactory(org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory) AdminUtil(org.apache.accumulo.fate.AdminUtil) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) KeeperException(org.apache.zookeeper.KeeperException)

Example 5 with IZooReaderWriter

use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.

the class SplitRecoveryIT method run.

private void run() throws Exception {
    Instance inst = HdfsZooInstance.getInstance();
    AccumuloServerContext c = new AccumuloServerContext(inst, new ServerConfigurationFactory(inst));
    String zPath = ZooUtil.getRoot(inst) + "/testLock";
    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
    zoo.putPersistentData(zPath, new byte[0], NodeExistsPolicy.OVERWRITE);
    ZooLock zl = new ZooLock(zPath);
    boolean gotLock = zl.tryLock(new LockWatcher() {

        @Override
        public void lostLock(LockLossReason reason) {
            System.exit(-1);
        }

        @Override
        public void unableToMonitorLockNode(Throwable e) {
            System.exit(-1);
        }
    }, "foo".getBytes(UTF_8));
    if (!gotLock) {
        System.err.println("Failed to get lock " + zPath);
    }
    // run test for a table with one tablet
    runSplitRecoveryTest(c, 0, "sp", 0, zl, nke("foo0", null, null));
    runSplitRecoveryTest(c, 1, "sp", 0, zl, nke("foo1", null, null));
    // run test for tables with two tablets, run test on first and last tablet
    runSplitRecoveryTest(c, 0, "k", 0, zl, nke("foo2", "m", null), nke("foo2", null, "m"));
    runSplitRecoveryTest(c, 1, "k", 0, zl, nke("foo3", "m", null), nke("foo3", null, "m"));
    runSplitRecoveryTest(c, 0, "o", 1, zl, nke("foo4", "m", null), nke("foo4", null, "m"));
    runSplitRecoveryTest(c, 1, "o", 1, zl, nke("foo5", "m", null), nke("foo5", null, "m"));
    // run test for table w/ three tablets, run test on middle tablet
    runSplitRecoveryTest(c, 0, "o", 1, zl, nke("foo6", "m", null), nke("foo6", "r", "m"), nke("foo6", null, "r"));
    runSplitRecoveryTest(c, 1, "o", 1, zl, nke("foo7", "m", null), nke("foo7", "r", "m"), nke("foo7", null, "r"));
    // run test for table w/ three tablets, run test on first
    runSplitRecoveryTest(c, 0, "g", 0, zl, nke("foo8", "m", null), nke("foo8", "r", "m"), nke("foo8", null, "r"));
    runSplitRecoveryTest(c, 1, "g", 0, zl, nke("foo9", "m", null), nke("foo9", "r", "m"), nke("foo9", null, "r"));
    // run test for table w/ three tablets, run test on last tablet
    runSplitRecoveryTest(c, 0, "w", 2, zl, nke("fooa", "m", null), nke("fooa", "r", "m"), nke("fooa", null, "r"));
    runSplitRecoveryTest(c, 1, "w", 2, zl, nke("foob", "m", null), nke("foob", "r", "m"), nke("foob", null, "r"));
}
Also used : AccumuloServerContext(org.apache.accumulo.server.AccumuloServerContext) Instance(org.apache.accumulo.core.client.Instance) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) HdfsZooInstance(org.apache.accumulo.server.client.HdfsZooInstance) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) LockWatcher(org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher) ServerConfigurationFactory(org.apache.accumulo.server.conf.ServerConfigurationFactory) LockLossReason(org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason) ZooLock(org.apache.accumulo.server.zookeeper.ZooLock)

Aggregations

IZooReaderWriter (org.apache.accumulo.fate.zookeeper.IZooReaderWriter)57 KeeperException (org.apache.zookeeper.KeeperException)25 IOException (java.io.IOException)13 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)11 Instance (org.apache.accumulo.core.client.Instance)11 AcceptableThriftTableOperationException (org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException)8 Mutator (org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator)6 HdfsZooInstance (org.apache.accumulo.server.client.HdfsZooInstance)6 AccumuloException (org.apache.accumulo.core.client.AccumuloException)5 TException (org.apache.thrift.TException)5 NoNodeException (org.apache.zookeeper.KeeperException.NoNodeException)5 ArrayList (java.util.ArrayList)4 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)4 ZooReaderWriterFactory (org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory)4 File (java.io.File)3 Entry (java.util.Map.Entry)3 Connector (org.apache.accumulo.core.client.Connector)3 Scanner (org.apache.accumulo.core.client.Scanner)3 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)3 Key (org.apache.accumulo.core.data.Key)3