Search in sources :

Example 16 with MiniAccumuloClusterImpl

use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.

the class ConditionalWriterIT method testTrace.

@Test
public void testTrace() throws Exception {
    // Need to add a getClientConfig() to AccumuloCluster
    Assume.assumeTrue(getClusterType() == ClusterType.MINI);
    Process tracer = null;
    Connector conn = getConnector();
    AccumuloCluster cluster = getCluster();
    MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
    if (!conn.tableOperations().exists("trace")) {
        tracer = mac.exec(TraceServer.class);
        while (!conn.tableOperations().exists("trace")) {
            sleepUninterruptibly(1, TimeUnit.SECONDS);
        }
    }
    String tableName = getUniqueNames(1)[0];
    conn.tableOperations().create(tableName);
    DistributedTrace.enable("localhost", "testTrace", mac.getClientConfig());
    sleepUninterruptibly(1, TimeUnit.SECONDS);
    Span root = Trace.on("traceTest");
    try (ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig())) {
        // mutation conditional on column tx:seq not exiting
        ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq"));
        cm0.put("name", "last", "doe");
        cm0.put("name", "first", "john");
        cm0.put("tx", "seq", "1");
        Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
        root.stop();
    }
    try (Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY)) {
        scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
        loop: while (true) {
            final StringBuilder finalBuffer = new StringBuilder();
            int traceCount = TraceDump.printTrace(scanner, new Printer() {

                @Override
                public void print(final String line) {
                    try {
                        finalBuffer.append(line).append("\n");
                    } catch (Exception ex) {
                        throw new RuntimeException(ex);
                    }
                }
            });
            String traceOutput = finalBuffer.toString();
            log.info("Trace output:" + traceOutput);
            if (traceCount > 0) {
                int lastPos = 0;
                for (String part : "traceTest, startScan,startConditionalUpdate,conditionalUpdate,Check conditions,apply conditional mutations".split(",")) {
                    log.info("Looking in trace output for '" + part + "'");
                    int pos = traceOutput.indexOf(part);
                    if (-1 == pos) {
                        log.info("Trace output doesn't contain '" + part + "'");
                        Thread.sleep(1000);
                        break loop;
                    }
                    assertTrue("Did not find '" + part + "' in output", pos > 0);
                    assertTrue("'" + part + "' occurred earlier than the previous element unexpectedly", pos > lastPos);
                    lastPos = pos;
                }
                break;
            } else {
                log.info("Ignoring trace output as traceCount not greater than zero: " + traceCount);
                Thread.sleep(1000);
            }
        }
        if (tracer != null) {
            tracer.destroy();
        }
    }
}
Also used : Condition(org.apache.accumulo.core.data.Condition) Connector(org.apache.accumulo.core.client.Connector) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloCluster(org.apache.accumulo.cluster.AccumuloCluster) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) Printer(org.apache.accumulo.tracer.TraceDump.Printer) Span(org.apache.accumulo.core.trace.Span) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) TableExistsException(org.apache.accumulo.core.client.TableExistsException) TableDeletedException(org.apache.accumulo.core.client.TableDeletedException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ConditionalWriter(org.apache.accumulo.core.client.ConditionalWriter) ConditionalMutation(org.apache.accumulo.core.data.ConditionalMutation) TraceServer(org.apache.accumulo.tracer.TraceServer) ConditionalWriterConfig(org.apache.accumulo.core.client.ConditionalWriterConfig) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) Test(org.junit.Test)

Example 17 with MiniAccumuloClusterImpl

use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.

the class MiniClusterHarness method create.

public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token, MiniClusterConfigurationCallback configCallback, TestingKdc kdc) throws Exception {
    requireNonNull(token);
    checkArgument(token instanceof PasswordToken || token instanceof KerberosToken, "A PasswordToken or KerberosToken is required");
    String rootPasswd;
    if (token instanceof PasswordToken) {
        rootPasswd = new String(((PasswordToken) token).getPassword(), UTF_8);
    } else {
        rootPasswd = UUID.randomUUID().toString();
    }
    File baseDir = AccumuloClusterHarness.createTestDir(testClassName + "_" + testMethodName);
    MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, rootPasswd);
    // Enable native maps by default
    cfg.setNativeLibPaths(NativeMapIT.nativeMapLocation().getAbsolutePath());
    cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
    Configuration coreSite = new Configuration(false);
    // Setup SSL and credential providers if the properties request such
    configureForEnvironment(cfg, getClass(), AccumuloClusterHarness.getSslDir(baseDir), coreSite, kdc);
    // Invoke the callback for tests to configure MAC before it starts
    configCallback.configureMiniCluster(cfg, coreSite);
    MiniAccumuloClusterImpl miniCluster = new MiniAccumuloClusterImpl(cfg);
    // Write out any configuration items to a file so HDFS will pick them up automatically (from the classpath)
    if (coreSite.size() > 0) {
        File csFile = new File(miniCluster.getConfig().getConfDir(), "core-site.xml");
        if (csFile.exists())
            throw new RuntimeException(csFile + " already exist");
        OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(miniCluster.getConfig().getConfDir(), "core-site.xml")));
        coreSite.writeXml(out);
        out.close();
    }
    return miniCluster;
}
Also used : PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Configuration(org.apache.hadoop.conf.Configuration) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) FileOutputStream(java.io.FileOutputStream) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) File(java.io.File) BufferedOutputStream(java.io.BufferedOutputStream) MiniAccumuloConfigImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl)

Example 18 with MiniAccumuloClusterImpl

use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.

the class ExistingMacIT method testExistingInstance.

@Test
public void testExistingInstance() throws Exception {
    Connector conn = getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
    conn.tableOperations().create("table1");
    BatchWriter bw = conn.createBatchWriter("table1", new BatchWriterConfig());
    Mutation m1 = new Mutation("00081");
    m1.put("math", "sqroot", "9");
    m1.put("math", "sq", "6560");
    bw.addMutation(m1);
    bw.close();
    conn.tableOperations().flush("table1", null, null, true);
    // TOOD use constants
    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
    conn.tableOperations().flush(RootTable.NAME, null, null, true);
    Set<Entry<ServerType, Collection<ProcessReference>>> procs = getCluster().getProcesses().entrySet();
    for (Entry<ServerType, Collection<ProcessReference>> entry : procs) {
        if (entry.getKey() == ServerType.ZOOKEEPER)
            continue;
        for (ProcessReference pr : entry.getValue()) getCluster().killProcess(entry.getKey(), pr);
    }
    final DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
    final long zkTimeout = ConfigurationTypeHelper.getTimeInMillis(getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey()));
    IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter(getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET));
    final String zInstanceRoot = Constants.ZROOT + "/" + conn.getInstance().getInstanceID();
    while (!AccumuloStatus.isAccumuloOffline(zrw, zInstanceRoot)) {
        log.debug("Accumulo services still have their ZK locks held");
        Thread.sleep(1000);
    }
    File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf");
    FileUtils.deleteQuietly(hadoopConfDir);
    assertTrue(hadoopConfDir.mkdirs());
    createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
    createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
    File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_2");
    FileUtils.deleteQuietly(testDir2);
    MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
    macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
    MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
    accumulo2.start();
    conn = accumulo2.getConnector("root", new PasswordToken(ROOT_PASSWORD));
    try (Scanner scanner = conn.createScanner("table1", Authorizations.EMPTY)) {
        int sum = 0;
        for (Entry<Key, Value> entry : scanner) {
            sum += Integer.parseInt(entry.getValue().toString());
        }
        Assert.assertEquals(6569, sum);
    }
    accumulo2.stop();
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ServerType(org.apache.accumulo.minicluster.ServerType) Scanner(org.apache.accumulo.core.client.Scanner) ProcessReference(org.apache.accumulo.minicluster.impl.ProcessReference) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) ZooReaderWriterFactory(org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory) MiniAccumuloConfigImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Entry(java.util.Map.Entry) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Collection(java.util.Collection) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) File(java.io.File) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 19 with MiniAccumuloClusterImpl

use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.

the class ExistingMacIT method testExistingRunningInstance.

@Test
public void testExistingRunningInstance() throws Exception {
    final String table = getUniqueNames(1)[0];
    Connector conn = getConnector();
    // Ensure that a master and tserver are up so the existing instance check won't fail.
    conn.tableOperations().create(table);
    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
    Mutation m = new Mutation("foo");
    m.put("cf", "cq", "value");
    bw.addMutation(m);
    bw.close();
    File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf_2");
    FileUtils.deleteQuietly(hadoopConfDir);
    assertTrue(hadoopConfDir.mkdirs());
    createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
    createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
    File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_3");
    FileUtils.deleteQuietly(testDir2);
    MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
    macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
    System.out.println("conf " + new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"));
    MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
    try {
        accumulo2.start();
        Assert.fail("A 2nd MAC instance should not be able to start over an existing MAC instance");
    } catch (RuntimeException e) {
    // TODO check message or throw more explicit exception
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) File(java.io.File) MiniAccumuloConfigImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl) Test(org.junit.Test)

Example 20 with MiniAccumuloClusterImpl

use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.

the class MissingWalHeaderCompletesRecoveryIT method testEmptyWalRecoveryCompletes.

@Test
public void testEmptyWalRecoveryCompletes() throws Exception {
    Connector conn = getConnector();
    MiniAccumuloClusterImpl cluster = getCluster();
    FileSystem fs = cluster.getFileSystem();
    // Fake out something that looks like host:port, it's irrelevant
    String fakeServer = "127.0.0.1:12345";
    File walogs = new File(cluster.getConfig().getAccumuloDir(), ServerConstants.WAL_DIR);
    File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
    File emptyWalog = new File(walogServerDir, UUID.randomUUID().toString());
    log.info("Created empty WAL at {}", emptyWalog.toURI());
    fs.create(new Path(emptyWalog.toURI())).close();
    Assert.assertTrue("root user did not have write permission to metadata table", conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE));
    String tableName = getUniqueNames(1)[0];
    conn.tableOperations().create(tableName);
    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
    Assert.assertNotNull("Table ID was null", tableId);
    LogEntry logEntry = new LogEntry(new KeyExtent(tableId, null, null), 0, "127.0.0.1:12345", emptyWalog.toURI().toString());
    log.info("Taking {} offline", tableName);
    conn.tableOperations().offline(tableName, true);
    log.info("{} is offline", tableName);
    Text row = MetadataSchema.TabletsSection.getRow(tableId, null);
    Mutation m = new Mutation(row);
    m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    bw.addMutation(m);
    bw.close();
    log.info("Bringing {} online", tableName);
    conn.tableOperations().online(tableName, true);
    log.info("{} is online", tableName);
    // otherwise the tablet will never come online and we won't be able to read it.
    try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
        Assert.assertEquals(0, Iterables.size(s));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Mutation(org.apache.accumulo.core.data.Mutation) BatchWriter(org.apache.accumulo.core.client.BatchWriter) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) File(java.io.File) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Test(org.junit.Test)

Aggregations

MiniAccumuloClusterImpl (org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl)34 Test (org.junit.Test)29 Connector (org.apache.accumulo.core.client.Connector)19 BatchWriter (org.apache.accumulo.core.client.BatchWriter)15 Mutation (org.apache.accumulo.core.data.Mutation)15 MiniAccumuloConfigImpl (org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl)14 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)13 Scanner (org.apache.accumulo.core.client.Scanner)13 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)12 Value (org.apache.accumulo.core.data.Value)11 ProcessReference (org.apache.accumulo.minicluster.impl.ProcessReference)11 File (java.io.File)10 Key (org.apache.accumulo.core.data.Key)10 AccumuloReplicaSystem (org.apache.accumulo.tserver.replication.AccumuloReplicaSystem)9 Path (org.apache.hadoop.fs.Path)9 PartialKey (org.apache.accumulo.core.data.PartialKey)8 IOException (java.io.IOException)5 Entry (java.util.Map.Entry)5 FileSystem (org.apache.hadoop.fs.FileSystem)5 Text (org.apache.hadoop.io.Text)4