Search in sources :

Example 1 with ZooReaderWriterFactory

use of org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory in project accumulo by apache.

the class BackupMasterIT method test.

@Test
public void test() throws Exception {
    // wait for master
    UtilWaitThread.sleep(1000);
    // create a backup
    Process backup = exec(Master.class);
    try {
        String secret = getCluster().getSiteConfiguration().get(Property.INSTANCE_SECRET);
        IZooReaderWriter writer = new ZooReaderWriterFactory().getZooReaderWriter(cluster.getZooKeepers(), 30 * 1000, secret);
        String root = "/accumulo/" + getConnector().getInstance().getInstanceID();
        List<String> children = Collections.emptyList();
        // wait for 2 lock entries
        do {
            UtilWaitThread.sleep(100);
            children = writer.getChildren(root + "/masters/lock");
        } while (children.size() != 2);
        Collections.sort(children);
        // wait for the backup master to learn to be the backup
        UtilWaitThread.sleep(1000);
        // generate a false zookeeper event
        String lockPath = root + "/masters/lock/" + children.get(0);
        byte[] data = writer.getData(lockPath, null);
        writer.getZooKeeper().setData(lockPath, data, -1);
        // let it propagate
        UtilWaitThread.sleep(500);
        // kill the master by removing its lock
        writer.recursiveDelete(lockPath, NodeMissingPolicy.FAIL);
        // ensure the backup becomes the master
        getConnector().tableOperations().create(getUniqueNames(1)[0]);
    } finally {
        backup.destroy();
    }
}
Also used : IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) ZooReaderWriterFactory(org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory) Test(org.junit.Test)

Example 2 with ZooReaderWriterFactory

use of org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory in project accumulo by apache.

the class MiniAccumuloClusterImpl method start.

/**
 * Starts Accumulo and Zookeeper processes. Can only be called once.
 */
@Override
public synchronized void start() throws IOException, InterruptedException {
    if (config.useMiniDFS() && miniDFS == null) {
        throw new IllegalStateException("Cannot restart mini when using miniDFS");
    }
    MiniAccumuloClusterControl control = getClusterControl();
    if (config.useExistingInstance()) {
        Configuration acuConf = config.getAccumuloConfiguration();
        Configuration hadoopConf = config.getHadoopConfiguration();
        ConfigurationCopy cc = new ConfigurationCopy(acuConf);
        VolumeManager fs;
        try {
            fs = VolumeManagerImpl.get(cc, hadoopConf);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        Path instanceIdPath = Accumulo.getAccumuloInstanceIdPath(fs);
        String instanceIdFromFile = ZooUtil.getInstanceIDFromHdfs(instanceIdPath, cc, hadoopConf);
        IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter(cc.get(Property.INSTANCE_ZK_HOST), (int) cc.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT), cc.get(Property.INSTANCE_SECRET));
        String rootPath = ZooUtil.getRoot(instanceIdFromFile);
        String instanceName = null;
        try {
            for (String name : zrw.getChildren(Constants.ZROOT + Constants.ZINSTANCES)) {
                String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + name;
                byte[] bytes = zrw.getData(instanceNamePath, new Stat());
                String iid = new String(bytes, UTF_8);
                if (iid.equals(instanceIdFromFile)) {
                    instanceName = name;
                }
            }
        } catch (KeeperException e) {
            throw new RuntimeException("Unable to read instance name from zookeeper.", e);
        }
        if (instanceName == null)
            throw new RuntimeException("Unable to read instance name from zookeeper.");
        config.setInstanceName(instanceName);
        if (!AccumuloStatus.isAccumuloOffline(zrw, rootPath))
            throw new RuntimeException("The Accumulo instance being used is already running. Aborting.");
    } else {
        if (!initialized) {
            Runtime.getRuntime().addShutdownHook(new Thread() {

                @Override
                public void run() {
                    try {
                        MiniAccumuloClusterImpl.this.stop();
                    } catch (IOException e) {
                        log.error("IOException while attempting to stop the MiniAccumuloCluster.", e);
                    } catch (InterruptedException e) {
                        log.error("The stopping of MiniAccumuloCluster was interrupted.", e);
                    }
                }
            });
        }
        if (!config.useExistingZooKeepers())
            control.start(ServerType.ZOOKEEPER);
        if (!initialized) {
            if (!config.useExistingZooKeepers()) {
                // sleep a little bit to let zookeeper come up before calling init, seems to work better
                long startTime = System.currentTimeMillis();
                while (true) {
                    Socket s = null;
                    try {
                        s = new Socket("localhost", config.getZooKeeperPort());
                        s.setReuseAddress(true);
                        s.getOutputStream().write("ruok\n".getBytes());
                        s.getOutputStream().flush();
                        byte[] buffer = new byte[100];
                        int n = s.getInputStream().read(buffer);
                        if (n >= 4 && new String(buffer, 0, 4).equals("imok"))
                            break;
                    } catch (Exception e) {
                        if (System.currentTimeMillis() - startTime >= config.getZooKeeperStartupTime()) {
                            throw new ZooKeeperBindException("Zookeeper did not start within " + (config.getZooKeeperStartupTime() / 1000) + " seconds. Check the logs in " + config.getLogDir() + " for errors.  Last exception: " + e);
                        }
                        // Don't spin absurdly fast
                        Thread.sleep(250);
                    } finally {
                        if (s != null)
                            s.close();
                    }
                }
            }
            LinkedList<String> args = new LinkedList<>();
            args.add("--instance-name");
            args.add(config.getInstanceName());
            args.add("--user");
            args.add(config.getRootUserName());
            args.add("--clear-instance-name");
            // If we aren't using SASL, add in the root password
            final String saslEnabled = config.getSiteConfig().get(Property.INSTANCE_RPC_SASL_ENABLED.getKey());
            if (null == saslEnabled || !Boolean.parseBoolean(saslEnabled)) {
                args.add("--password");
                args.add(config.getRootPassword());
            }
            Process initProcess = exec(Initialize.class, args.toArray(new String[0]));
            int ret = initProcess.waitFor();
            if (ret != 0) {
                throw new RuntimeException("Initialize process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
            }
            initialized = true;
        }
    }
    log.info("Starting MAC against instance {} and zookeeper(s) {}.", config.getInstanceName(), config.getZooKeepers());
    control.start(ServerType.TABLET_SERVER);
    int ret = 0;
    for (int i = 0; i < 5; i++) {
        ret = exec(Main.class, SetGoalState.class.getName(), MasterGoalState.NORMAL.toString()).waitFor();
        if (ret == 0)
            break;
        sleepUninterruptibly(1, TimeUnit.SECONDS);
    }
    if (ret != 0) {
        throw new RuntimeException("Could not set master goal state, process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
    }
    control.start(ServerType.MASTER);
    control.start(ServerType.GARBAGE_COLLECTOR);
    if (null == executor) {
        executor = Executors.newSingleThreadExecutor();
    }
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Configuration(org.apache.hadoop.conf.Configuration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) CachedConfiguration(org.apache.accumulo.core.util.CachedConfiguration) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) ZooReaderWriterFactory(org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory) Stat(org.apache.zookeeper.data.Stat) SetGoalState(org.apache.accumulo.master.state.SetGoalState) Path(org.apache.hadoop.fs.Path) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) TimeoutException(java.util.concurrent.TimeoutException) ThriftNotActiveServiceException(org.apache.accumulo.core.client.impl.thrift.ThriftNotActiveServiceException) ThriftSecurityException(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) KeeperException(org.apache.zookeeper.KeeperException) TException(org.apache.thrift.TException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ExecutionException(java.util.concurrent.ExecutionException) LinkedList(java.util.LinkedList) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) KeeperException(org.apache.zookeeper.KeeperException) Socket(java.net.Socket)

Example 3 with ZooReaderWriterFactory

use of org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory in project accumulo by apache.

the class TableChangeStateIT method findFate.

/**
 * Checks fates in zookeeper looking for transaction associated with a compaction as a double check that the test will be valid because the running compaction
 * does have a fate transaction lock.
 *
 * @return true if corresponding fate transaction found, false otherwise
 */
private boolean findFate(final String tableName) {
    Instance instance = connector.getInstance();
    AdminUtil<String> admin = new AdminUtil<>(false);
    try {
        Table.ID tableId = Tables.getTableId(instance, tableName);
        log.trace("tid: {}", tableId);
        String secret = cluster.getSiteConfiguration().get(Property.INSTANCE_SECRET);
        IZooReaderWriter zk = new ZooReaderWriterFactory().getZooReaderWriter(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut(), secret);
        ZooStore<String> zs = new ZooStore<>(ZooUtil.getRoot(instance) + Constants.ZFATE, zk);
        AdminUtil.FateStatus fateStatus = admin.getStatus(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS + "/" + tableId, null, null);
        for (AdminUtil.TransactionStatus tx : fateStatus.getTransactions()) {
            if (tx.getTop().contains("CompactionDriver") && tx.getDebug().contains("CompactRange")) {
                return true;
            }
        }
    } catch (KeeperException | TableNotFoundException | InterruptedException ex) {
        throw new IllegalStateException(ex);
    }
    // did not find appropriate fate transaction for compaction.
    return Boolean.FALSE;
}
Also used : Table(org.apache.accumulo.core.client.impl.Table) Instance(org.apache.accumulo.core.client.Instance) ZooStore(org.apache.accumulo.fate.ZooStore) ZooReaderWriterFactory(org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory) AdminUtil(org.apache.accumulo.fate.AdminUtil) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) KeeperException(org.apache.zookeeper.KeeperException)

Example 4 with ZooReaderWriterFactory

use of org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory in project accumulo by apache.

the class ExistingMacIT method testExistingInstance.

@Test
public void testExistingInstance() throws Exception {
    Connector conn = getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
    conn.tableOperations().create("table1");
    BatchWriter bw = conn.createBatchWriter("table1", new BatchWriterConfig());
    Mutation m1 = new Mutation("00081");
    m1.put("math", "sqroot", "9");
    m1.put("math", "sq", "6560");
    bw.addMutation(m1);
    bw.close();
    conn.tableOperations().flush("table1", null, null, true);
    // TOOD use constants
    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
    conn.tableOperations().flush(RootTable.NAME, null, null, true);
    Set<Entry<ServerType, Collection<ProcessReference>>> procs = getCluster().getProcesses().entrySet();
    for (Entry<ServerType, Collection<ProcessReference>> entry : procs) {
        if (entry.getKey() == ServerType.ZOOKEEPER)
            continue;
        for (ProcessReference pr : entry.getValue()) getCluster().killProcess(entry.getKey(), pr);
    }
    final DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
    final long zkTimeout = ConfigurationTypeHelper.getTimeInMillis(getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey()));
    IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter(getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET));
    final String zInstanceRoot = Constants.ZROOT + "/" + conn.getInstance().getInstanceID();
    while (!AccumuloStatus.isAccumuloOffline(zrw, zInstanceRoot)) {
        log.debug("Accumulo services still have their ZK locks held");
        Thread.sleep(1000);
    }
    File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf");
    FileUtils.deleteQuietly(hadoopConfDir);
    assertTrue(hadoopConfDir.mkdirs());
    createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
    createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
    File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_2");
    FileUtils.deleteQuietly(testDir2);
    MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
    macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
    MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
    accumulo2.start();
    conn = accumulo2.getConnector("root", new PasswordToken(ROOT_PASSWORD));
    try (Scanner scanner = conn.createScanner("table1", Authorizations.EMPTY)) {
        int sum = 0;
        for (Entry<Key, Value> entry : scanner) {
            sum += Integer.parseInt(entry.getValue().toString());
        }
        Assert.assertEquals(6569, sum);
    }
    accumulo2.stop();
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ServerType(org.apache.accumulo.minicluster.ServerType) Scanner(org.apache.accumulo.core.client.Scanner) ProcessReference(org.apache.accumulo.minicluster.impl.ProcessReference) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) ZooReaderWriterFactory(org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory) MiniAccumuloConfigImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Entry(java.util.Map.Entry) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Collection(java.util.Collection) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) File(java.io.File) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 5 with ZooReaderWriterFactory

use of org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory in project accumulo by apache.

the class FunctionalTestUtils method getFateStatus.

private static FateStatus getFateStatus(Instance instance, AccumuloCluster cluster) {
    try {
        AdminUtil<String> admin = new AdminUtil<>(false);
        String secret = cluster.getSiteConfiguration().get(Property.INSTANCE_SECRET);
        IZooReaderWriter zk = new ZooReaderWriterFactory().getZooReaderWriter(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut(), secret);
        ZooStore<String> zs = new ZooStore<>(ZooUtil.getRoot(instance) + Constants.ZFATE, zk);
        FateStatus fateStatus = admin.getStatus(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS, null, null);
        return fateStatus;
    } catch (KeeperException | InterruptedException e) {
        throw new RuntimeException(e);
    }
}
Also used : AdminUtil(org.apache.accumulo.fate.AdminUtil) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) ZooStore(org.apache.accumulo.fate.ZooStore) ZooReaderWriterFactory(org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory) FateStatus(org.apache.accumulo.fate.AdminUtil.FateStatus) KeeperException(org.apache.zookeeper.KeeperException)

Aggregations

IZooReaderWriter (org.apache.accumulo.fate.zookeeper.IZooReaderWriter)5 ZooReaderWriterFactory (org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory)5 KeeperException (org.apache.zookeeper.KeeperException)3 DefaultConfiguration (org.apache.accumulo.core.conf.DefaultConfiguration)2 AdminUtil (org.apache.accumulo.fate.AdminUtil)2 ZooStore (org.apache.accumulo.fate.ZooStore)2 Test (org.junit.Test)2 File (java.io.File)1 IOException (java.io.IOException)1 Socket (java.net.Socket)1 URISyntaxException (java.net.URISyntaxException)1 Collection (java.util.Collection)1 LinkedList (java.util.LinkedList)1 Entry (java.util.Map.Entry)1 ExecutionException (java.util.concurrent.ExecutionException)1 TimeoutException (java.util.concurrent.TimeoutException)1 AccumuloException (org.apache.accumulo.core.client.AccumuloException)1 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)1 BatchWriter (org.apache.accumulo.core.client.BatchWriter)1 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)1