Search in sources :

Example 6 with MiniAccumuloClusterImpl

use of org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl in project accumulo by apache.

the class TableIT method test.

@Test
public void test() throws Exception {
    assumeTrue(getClusterType() == ClusterType.MINI);
    AccumuloCluster cluster = getCluster();
    MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
    String rootPath = mac.getConfig().getDir().getAbsolutePath();
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        TableOperations to = c.tableOperations();
        String tableName = getUniqueNames(1)[0];
        to.create(tableName);
        VerifyParams params = new VerifyParams(getClientProps(), tableName);
        TestIngest.ingest(c, params);
        to.flush(tableName, null, null, true);
        VerifyIngest.verifyIngest(c, params);
        TableId id = TableId.of(to.tableIdMap().get(tableName));
        try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
            s.setRange(new KeyExtent(id, null, null).toMetaRange());
            s.fetchColumnFamily(DataFileColumnFamily.NAME);
            assertTrue(Iterators.size(s.iterator()) > 0);
            FileSystem fs = getCluster().getFileSystem();
            assertTrue(fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length > 0);
            to.delete(tableName);
            assertEquals(0, Iterators.size(s.iterator()));
            try {
                assertEquals(0, fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length);
            } catch (FileNotFoundException ex) {
            // that's fine, too
            }
            assertNull(to.tableIdMap().get(tableName));
            to.create(tableName);
            TestIngest.ingest(c, params);
            VerifyIngest.verifyIngest(c, params);
            to.delete(tableName);
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) TableId(org.apache.accumulo.core.data.TableId) Path(org.apache.hadoop.fs.Path) Scanner(org.apache.accumulo.core.client.Scanner) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) AccumuloCluster(org.apache.accumulo.cluster.AccumuloCluster) FileSystem(org.apache.hadoop.fs.FileSystem) VerifyParams(org.apache.accumulo.test.VerifyIngest.VerifyParams) FileNotFoundException(java.io.FileNotFoundException) MiniAccumuloClusterImpl(org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Test(org.junit.Test)

Example 7 with MiniAccumuloClusterImpl

use of org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl in project accumulo by apache.

the class WALSunnyDayIT method test.

@Test
public void test() throws Exception {
    MiniAccumuloClusterImpl mac = getCluster();
    MiniAccumuloClusterControl control = mac.getClusterControl();
    control.stop(GARBAGE_COLLECTOR);
    ServerContext context = getServerContext();
    try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
        String tableName = getUniqueNames(1)[0];
        c.tableOperations().create(tableName);
        writeSomeData(c, tableName, 1, 1);
        // wal markers are added lazily
        Map<String, WalState> wals = getWALsAndAssertCount(context, 2);
        assertEquals("all WALs should be in use", 2, countInUse(wals.values()));
        // roll log, get a new next
        writeSomeData(c, tableName, 1001, 50);
        Map<String, WalState> walsAfterRoll = getWALsAndAssertCount(context, 3);
        assertTrue("new WALs should be a superset of the old WALs", walsAfterRoll.keySet().containsAll(wals.keySet()));
        assertEquals("all WALs should be in use", 3, countInUse(walsAfterRoll.values()));
        // flush the tables
        for (String table : new String[] { tableName, MetadataTable.NAME, RootTable.NAME }) {
            c.tableOperations().flush(table, null, null, true);
        }
        sleepUninterruptibly(1, TimeUnit.SECONDS);
        // rolled WAL is no longer in use, but needs to be GC'd
        Map<String, WalState> walsAfterflush = getWALsAndAssertCount(context, 3);
        assertEquals("inUse should be 2", 2, countInUse(walsAfterflush.values()));
        // let the GC run for a little bit
        control.start(GARBAGE_COLLECTOR);
        sleepUninterruptibly(5, TimeUnit.SECONDS);
        // make sure the unused WAL goes away
        getWALsAndAssertCount(context, 2);
        control.stop(GARBAGE_COLLECTOR);
        // restart the tserver, but don't run recovery on all tablets
        control.stop(TABLET_SERVER);
        // this delays recovery on the normal tables
        assertEquals(0, cluster.exec(SetGoalState.class, "SAFE_MODE").getProcess().waitFor());
        control.start(TABLET_SERVER);
        // wait for the metadata table to go back online
        getRecoveryMarkers(c);
        // allow a little time for the manager to notice ASSIGNED_TO_DEAD_SERVER tablets
        sleepUninterruptibly(5, TimeUnit.SECONDS);
        Map<KeyExtent, List<String>> markers = getRecoveryMarkers(c);
        // log.debug("markers " + markers);
        assertEquals("one tablet should have markers", 1, markers.size());
        assertEquals("tableId of the keyExtent should be 1", "1", markers.keySet().iterator().next().tableId().canonical());
        // put some data in the WAL
        assertEquals(0, cluster.exec(SetGoalState.class, "NORMAL").getProcess().waitFor());
        verifySomeData(c, tableName, 1001 * 50 + 1);
        writeSomeData(c, tableName, 100, 100);
        Map<String, WalState> walsAfterRestart = getWALsAndAssertCount(context, 4);
        // log.debug("wals after " + walsAfterRestart);
        assertEquals("used WALs after restart should be 4", 4, countInUse(walsAfterRestart.values()));
        control.start(GARBAGE_COLLECTOR);
        sleepUninterruptibly(5, TimeUnit.SECONDS);
        Map<String, WalState> walsAfterRestartAndGC = getWALsAndAssertCount(context, 2);
        assertEquals("logs in use should be 2", 2, countInUse(walsAfterRestartAndGC.values()));
    }
}
Also used : MiniAccumuloClusterControl(org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterControl) AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) ServerContext(org.apache.accumulo.server.ServerContext) WalState(org.apache.accumulo.server.log.WalStateManager.WalState) List(java.util.List) ArrayList(java.util.ArrayList) SetGoalState(org.apache.accumulo.manager.state.SetGoalState) MiniAccumuloClusterImpl(org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Test(org.junit.Test)

Example 8 with MiniAccumuloClusterImpl

use of org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl in project accumulo by apache.

the class MiniClusterHarness method create.

public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token, MiniClusterConfigurationCallback configCallback, TestingKdc kdc) throws Exception {
    requireNonNull(token);
    checkArgument(token instanceof PasswordToken || token instanceof KerberosToken, "A PasswordToken or KerberosToken is required");
    String rootPasswd;
    if (token instanceof PasswordToken) {
        rootPasswd = new String(((PasswordToken) token).getPassword(), UTF_8);
    } else {
        rootPasswd = UUID.randomUUID().toString();
    }
    File baseDir = AccumuloClusterHarness.createTestDir(testClassName + "_" + testMethodName);
    MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, rootPasswd);
    // Enable native maps by default
    cfg.setNativeLibPaths(NativeMapIT.nativeMapLocation().getAbsolutePath());
    cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
    Configuration coreSite = new Configuration(false);
    // Setup SSL and credential providers if the properties request such
    configureForEnvironment(cfg, AccumuloClusterHarness.getSslDir(baseDir), coreSite, kdc);
    // Invoke the callback for tests to configure MAC before it starts
    configCallback.configureMiniCluster(cfg, coreSite);
    MiniAccumuloClusterImpl miniCluster = new MiniAccumuloClusterImpl(cfg);
    // classpath)
    if (coreSite.size() > 0) {
        File csFile = new File(miniCluster.getConfig().getConfDir(), "core-site.xml");
        if (csFile.exists())
            throw new RuntimeException(csFile + " already exist");
        OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(miniCluster.getConfig().getConfDir(), "core-site.xml")));
        coreSite.writeXml(out);
        out.close();
    }
    return miniCluster;
}
Also used : PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Configuration(org.apache.hadoop.conf.Configuration) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) FileOutputStream(java.io.FileOutputStream) MiniAccumuloClusterImpl(org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl) File(java.io.File) BufferedOutputStream(java.io.BufferedOutputStream) MiniAccumuloConfigImpl(org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl)

Example 9 with MiniAccumuloClusterImpl

use of org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl in project accumulo by apache.

the class CloneTestIT method testDeleteClone.

@Test
public void testDeleteClone() throws Exception {
    String[] tableNames = getUniqueNames(3);
    String table1 = tableNames[0];
    String table2 = tableNames[1];
    String table3 = tableNames[2];
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        AccumuloCluster cluster = getCluster();
        assumeTrue(cluster instanceof MiniAccumuloClusterImpl);
        MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
        String rootPath = mac.getConfig().getDir().getAbsolutePath();
        // verify that deleting a new table removes the files
        c.tableOperations().create(table3);
        writeData(table3, c).close();
        c.tableOperations().flush(table3, null, null, true);
        // check for files
        FileSystem fs = getCluster().getFileSystem();
        String id = c.tableOperations().tableIdMap().get(table3);
        FileStatus[] status = fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id));
        assertTrue(status.length > 0);
        // verify disk usage
        List<DiskUsage> diskUsage = c.tableOperations().getDiskUsage(Collections.singleton(table3));
        assertEquals(1, diskUsage.size());
        assertTrue(diskUsage.get(0).getUsage() > 100);
        // delete the table
        c.tableOperations().delete(table3);
        // verify its gone from the file system
        Path tablePath = new Path(rootPath + "/accumulo/tables/" + id);
        if (fs.exists(tablePath)) {
            status = fs.listStatus(tablePath);
            assertTrue(status == null || status.length == 0);
        }
        c.tableOperations().create(table1);
        writeDataAndClone(c, table1, table2);
        // delete source table, should not affect clone
        c.tableOperations().delete(table1);
        checkData(table2, c);
        c.tableOperations().compact(table2, null, null, true, true);
        checkData(table2, c);
        c.tableOperations().delete(table2);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) AccumuloCluster(org.apache.accumulo.cluster.AccumuloCluster) FileSystem(org.apache.hadoop.fs.FileSystem) DiskUsage(org.apache.accumulo.core.client.admin.DiskUsage) MiniAccumuloClusterImpl(org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl) Test(org.junit.Test)

Example 10 with MiniAccumuloClusterImpl

use of org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl in project accumulo by apache.

the class ExistingMacIT method testExistingInstance.

@Test
public void testExistingInstance() throws Exception {
    AccumuloClient client = getCluster().createAccumuloClient("root", new PasswordToken(ROOT_PASSWORD));
    client.tableOperations().create("table1");
    try (BatchWriter bw = client.createBatchWriter("table1")) {
        Mutation m1 = new Mutation("00081");
        m1.put("math", "sqroot", "9");
        m1.put("math", "sq", "6560");
        bw.addMutation(m1);
    }
    client.tableOperations().flush("table1", null, null, true);
    // TODO use constants
    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
    client.tableOperations().flush(RootTable.NAME, null, null, true);
    Set<Entry<ServerType, Collection<ProcessReference>>> procs = getCluster().getProcesses().entrySet();
    for (Entry<ServerType, Collection<ProcessReference>> entry : procs) {
        if (entry.getKey() == ServerType.ZOOKEEPER)
            continue;
        for (ProcessReference pr : entry.getValue()) getCluster().killProcess(entry.getKey(), pr);
    }
    final DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
    final long zkTimeout = ConfigurationTypeHelper.getTimeInMillis(getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey()));
    ZooReaderWriter zrw = new ZooReaderWriter(getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET));
    final String zInstanceRoot = Constants.ZROOT + "/" + client.instanceOperations().getInstanceId();
    while (!AccumuloStatus.isAccumuloOffline(zrw, zInstanceRoot)) {
        log.debug("Accumulo services still have their ZK locks held");
        Thread.sleep(1000);
    }
    File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf");
    FileUtils.deleteQuietly(hadoopConfDir);
    assertTrue(hadoopConfDir.mkdirs());
    createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
    createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
    File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_2");
    FileUtils.deleteQuietly(testDir2);
    MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
    macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo.properties"), hadoopConfDir);
    MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
    accumulo2.start();
    client = accumulo2.createAccumuloClient("root", new PasswordToken(ROOT_PASSWORD));
    try (Scanner scanner = client.createScanner("table1", Authorizations.EMPTY)) {
        int sum = 0;
        for (Entry<Key, Value> entry : scanner) {
            sum += Integer.parseInt(entry.getValue().toString());
        }
        assertEquals(6569, sum);
    }
    accumulo2.stop();
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) ServerType(org.apache.accumulo.minicluster.ServerType) Scanner(org.apache.accumulo.core.client.Scanner) ProcessReference(org.apache.accumulo.miniclusterImpl.ProcessReference) ZooReaderWriter(org.apache.accumulo.fate.zookeeper.ZooReaderWriter) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) MiniAccumuloConfigImpl(org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Entry(java.util.Map.Entry) Value(org.apache.accumulo.core.data.Value) Collection(java.util.Collection) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MiniAccumuloClusterImpl(org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl) File(java.io.File) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

MiniAccumuloClusterImpl (org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl)25 Test (org.junit.Test)22 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)20 BatchWriter (org.apache.accumulo.core.client.BatchWriter)13 Mutation (org.apache.accumulo.core.data.Mutation)13 MiniAccumuloConfigImpl (org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl)13 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)12 ProcessReference (org.apache.accumulo.miniclusterImpl.ProcessReference)12 Scanner (org.apache.accumulo.core.client.Scanner)11 Key (org.apache.accumulo.core.data.Key)10 Value (org.apache.accumulo.core.data.Value)10 HashMap (java.util.HashMap)9 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)9 AccumuloReplicaSystem (org.apache.accumulo.tserver.replication.AccumuloReplicaSystem)9 File (java.io.File)8 PartialKey (org.apache.accumulo.core.data.PartialKey)8 Entry (java.util.Map.Entry)5 SuppressFBWarnings (edu.umd.cs.findbugs.annotations.SuppressFBWarnings)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 Path (org.apache.hadoop.fs.Path)4