Search in sources :

Example 26 with NewTableConfiguration

use of org.apache.accumulo.core.client.admin.NewTableConfiguration in project accumulo by apache.

the class NewTableConfigurationIT method testNtcChaining.

/**
 * Test NewTableConfiguration chaining.
 */
@Test
public void testNtcChaining() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
    Connector conn = getConnector();
    String tableName = getUniqueNames(2)[0];
    IteratorSetting setting = new IteratorSetting(10, "anIterator", "it.class", Collections.emptyMap());
    Map<String, Set<Text>> lgroups = new HashMap<>();
    lgroups.put("lgp", ImmutableSet.of(new Text("col")));
    NewTableConfiguration ntc = new NewTableConfiguration().withoutDefaultIterators().attachIterator(setting, EnumSet.of(IteratorScope.scan)).setLocalityGroups(lgroups);
    conn.tableOperations().create(tableName, ntc);
    Map<String, EnumSet<IteratorScope>> iteratorList = conn.tableOperations().listIterators(tableName);
    assertEquals(1, iteratorList.size());
    verifyIterators(conn, tableName, new String[] { "table.iterator.scan.anIterator=10,it.class" }, false);
    conn.tableOperations().removeIterator(tableName, "anIterator", EnumSet.of(IteratorScope.scan));
    verifyIterators(conn, tableName, new String[] {}, false);
    iteratorList = conn.tableOperations().listIterators(tableName);
    assertEquals(0, iteratorList.size());
    int count = 0;
    for (Entry<String, String> property : conn.tableOperations().getProperties(tableName)) {
        if (property.getKey().equals("table.group.lgp")) {
            assertEquals(property.getValue(), "col");
            count++;
        }
        if (property.getKey().equals("table.groups.enabled")) {
            assertEquals(property.getValue(), "lgp");
            count++;
        }
    }
    assertEquals(2, count);
    Map<String, Set<Text>> createdLocalityGroups = conn.tableOperations().getLocalityGroups(tableName);
    assertEquals(1, createdLocalityGroups.size());
    assertEquals(createdLocalityGroups.get("lgp"), ImmutableSet.of(new Text("col")));
}
Also used : Connector(org.apache.accumulo.core.client.Connector) EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) HashMap(java.util.HashMap) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) EnumSet(java.util.EnumSet) Text(org.apache.hadoop.io.Text) Test(org.junit.Test)

Example 27 with NewTableConfiguration

use of org.apache.accumulo.core.client.admin.NewTableConfiguration in project vertexium by visallo.

the class AccumuloGraphTestUtils method createTable.

private static void createTable(Connector connector, String tableName) {
    try {
        NewTableConfiguration config = new NewTableConfiguration().withoutDefaultIterators().setTimeType(TimeType.MILLIS);
        connector.tableOperations().create(tableName, config);
    } catch (Exception e) {
        throw new VertexiumException("Unable to create table " + tableName);
    }
}
Also used : NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) VertexiumException(org.vertexium.VertexiumException) VertexiumException(org.vertexium.VertexiumException)

Example 28 with NewTableConfiguration

use of org.apache.accumulo.core.client.admin.NewTableConfiguration in project accumulo by apache.

the class CreateTableCommand method execute.

@Override
public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException, ClassNotFoundException {
    final String testTableName = cl.getArgs()[0];
    final HashMap<String, String> props = new HashMap<>();
    NewTableConfiguration ntc = new NewTableConfiguration();
    if (!testTableName.matches(Tables.VALID_NAME_REGEX)) {
        shellState.getReader().println("Only letters, numbers and underscores are allowed for use in table names.");
        throw new IllegalArgumentException();
    }
    final String tableName = cl.getArgs()[0];
    if (shellState.getConnector().tableOperations().exists(tableName)) {
        throw new TableExistsException(null, tableName, null);
    }
    final SortedSet<Text> partitions = new TreeSet<>();
    final boolean decode = cl.hasOption(base64Opt.getOpt());
    if (cl.hasOption(createTableOptSplit.getOpt())) {
        partitions.addAll(ShellUtil.scanFile(cl.getOptionValue(createTableOptSplit.getOpt()), decode));
    } else if (cl.hasOption(createTableOptCopySplits.getOpt())) {
        final String oldTable = cl.getOptionValue(createTableOptCopySplits.getOpt());
        if (!shellState.getConnector().tableOperations().exists(oldTable)) {
            throw new TableNotFoundException(null, oldTable, null);
        }
        partitions.addAll(shellState.getConnector().tableOperations().listSplits(oldTable));
    }
    if (cl.hasOption(createTableOptCopyConfig.getOpt())) {
        final String oldTable = cl.getOptionValue(createTableOptCopyConfig.getOpt());
        if (!shellState.getConnector().tableOperations().exists(oldTable)) {
            throw new TableNotFoundException(null, oldTable, null);
        }
    }
    TimeType timeType = TimeType.MILLIS;
    if (cl.hasOption(createTableOptTimeLogical.getOpt())) {
        timeType = TimeType.LOGICAL;
    }
    if (cl.hasOption(createTableOptInitProp.getOpt())) {
        String[] keyVals = StringUtils.split(cl.getOptionValue(createTableOptInitProp.getOpt()), ',');
        for (String keyVal : keyVals) {
            String[] sa = StringUtils.split(keyVal, '=');
            props.put(sa[0], sa[1]);
        }
    }
    // Set iterator if supplied
    if (cl.hasOption(createTableOptIteratorProps.getOpt())) {
        ntc = attachIteratorToNewTable(cl, shellState, ntc);
    }
    // Set up locality groups, if supplied
    if (cl.hasOption(createTableOptLocalityProps.getOpt())) {
        ntc = setLocalityForNewTable(cl, ntc);
    }
    // create table
    shellState.getConnector().tableOperations().create(tableName, ntc.setTimeType(timeType).setProperties(props));
    if (partitions.size() > 0) {
        shellState.getConnector().tableOperations().addSplits(tableName, partitions);
    }
    // switch shell to new table context
    shellState.setTableName(tableName);
    if (cl.hasOption(createTableNoDefaultIters.getOpt())) {
        for (String key : IteratorUtil.generateInitialTableProperties(true).keySet()) {
            shellState.getConnector().tableOperations().removeProperty(tableName, key);
        }
    }
    // Copy options if flag was set
    if (cl.hasOption(createTableOptCopyConfig.getOpt())) {
        if (shellState.getConnector().tableOperations().exists(tableName)) {
            final Iterable<Entry<String, String>> configuration = shellState.getConnector().tableOperations().getProperties(cl.getOptionValue(createTableOptCopyConfig.getOpt()));
            for (Entry<String, String> entry : configuration) {
                if (Property.isValidTablePropertyKey(entry.getKey())) {
                    shellState.getConnector().tableOperations().setProperty(tableName, entry.getKey(), entry.getValue());
                }
            }
        }
    }
    if (cl.hasOption(createTableOptEVC.getOpt())) {
        try {
            shellState.getConnector().tableOperations().addConstraint(tableName, VisibilityConstraint.class.getName());
        } catch (AccumuloException e) {
            Shell.log.warn(e.getMessage() + " while setting visibility constraint, but table was created");
        }
    }
    // Load custom formatter if set
    if (cl.hasOption(createTableOptFormatter.getOpt())) {
        final String formatterClass = cl.getOptionValue(createTableOptFormatter.getOpt());
        shellState.getConnector().tableOperations().setProperty(tableName, Property.TABLE_FORMATTER_CLASS.toString(), formatterClass);
    }
    return 0;
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) HashMap(java.util.HashMap) VisibilityConstraint(org.apache.accumulo.core.constraints.VisibilityConstraint) Text(org.apache.hadoop.io.Text) TimeType(org.apache.accumulo.core.client.admin.TimeType) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Entry(java.util.Map.Entry) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) TreeSet(java.util.TreeSet) TableExistsException(org.apache.accumulo.core.client.TableExistsException)

Example 29 with NewTableConfiguration

use of org.apache.accumulo.core.client.admin.NewTableConfiguration in project accumulo by apache.

the class CyclicReplicationIT method dataIsNotOverReplicated.

@Test
public void dataIsNotOverReplicated() throws Exception {
    File master1Dir = createTestDir("master1"), master2Dir = createTestDir("master2");
    String password = "password";
    MiniAccumuloConfigImpl master1Cfg;
    MiniAccumuloClusterImpl master1Cluster;
    while (true) {
        master1Cfg = new MiniAccumuloConfigImpl(master1Dir, password);
        master1Cfg.setNumTservers(1);
        master1Cfg.setInstanceName("master1");
        // Set up SSL if needed
        ConfigurableMacBase.configureForEnvironment(master1Cfg, this.getClass(), ConfigurableMacBase.getSslDir(master1Dir));
        master1Cfg.setProperty(Property.REPLICATION_NAME, master1Cfg.getInstanceName());
        master1Cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "5M");
        master1Cfg.setProperty(Property.REPLICATION_THREADCHECK, "5m");
        master1Cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
        master1Cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
        master1Cluster = new MiniAccumuloClusterImpl(master1Cfg);
        setCoreSite(master1Cluster);
        try {
            master1Cluster.start();
            break;
        } catch (ZooKeeperBindException e) {
            log.warn("Failed to start ZooKeeper on {}, will retry", master1Cfg.getZooKeeperPort());
        }
    }
    MiniAccumuloConfigImpl master2Cfg;
    MiniAccumuloClusterImpl master2Cluster;
    while (true) {
        master2Cfg = new MiniAccumuloConfigImpl(master2Dir, password);
        master2Cfg.setNumTservers(1);
        master2Cfg.setInstanceName("master2");
        // Set up SSL if needed. Need to share the same SSL truststore as master1
        this.updatePeerConfigFromPrimary(master1Cfg, master2Cfg);
        master2Cfg.setProperty(Property.REPLICATION_NAME, master2Cfg.getInstanceName());
        master2Cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "5M");
        master2Cfg.setProperty(Property.REPLICATION_THREADCHECK, "5m");
        master2Cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
        master2Cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
        master2Cluster = new MiniAccumuloClusterImpl(master2Cfg);
        setCoreSite(master2Cluster);
        try {
            master2Cluster.start();
            break;
        } catch (ZooKeeperBindException e) {
            log.warn("Failed to start ZooKeeper on {}, will retry", master2Cfg.getZooKeeperPort());
        }
    }
    try {
        Connector connMaster1 = master1Cluster.getConnector("root", new PasswordToken(password)), connMaster2 = master2Cluster.getConnector("root", new PasswordToken(password));
        String master1UserName = "master1", master1Password = "foo";
        String master2UserName = "master2", master2Password = "bar";
        String master1Table = master1Cluster.getInstanceName(), master2Table = master2Cluster.getInstanceName();
        connMaster1.securityOperations().createLocalUser(master1UserName, new PasswordToken(master1Password));
        connMaster2.securityOperations().createLocalUser(master2UserName, new PasswordToken(master2Password));
        // Configure the credentials we should use to authenticate ourselves to the peer for replication
        connMaster1.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + master2Cluster.getInstanceName(), master2UserName);
        connMaster1.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + master2Cluster.getInstanceName(), master2Password);
        connMaster2.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + master1Cluster.getInstanceName(), master1UserName);
        connMaster2.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + master1Cluster.getInstanceName(), master1Password);
        connMaster1.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + master2Cluster.getInstanceName(), ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(master2Cluster.getInstanceName(), master2Cluster.getZooKeepers())));
        connMaster2.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + master1Cluster.getInstanceName(), ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(master1Cluster.getInstanceName(), master1Cluster.getZooKeepers())));
        connMaster1.tableOperations().create(master1Table, new NewTableConfiguration().withoutDefaultIterators());
        String master1TableId = connMaster1.tableOperations().tableIdMap().get(master1Table);
        Assert.assertNotNull(master1TableId);
        connMaster2.tableOperations().create(master2Table, new NewTableConfiguration().withoutDefaultIterators());
        String master2TableId = connMaster2.tableOperations().tableIdMap().get(master2Table);
        Assert.assertNotNull(master2TableId);
        // Replicate master1 in the master1 cluster to master2 in the master2 cluster
        connMaster1.tableOperations().setProperty(master1Table, Property.TABLE_REPLICATION.getKey(), "true");
        connMaster1.tableOperations().setProperty(master1Table, Property.TABLE_REPLICATION_TARGET.getKey() + master2Cluster.getInstanceName(), master2TableId);
        // Replicate master2 in the master2 cluster to master1 in the master2 cluster
        connMaster2.tableOperations().setProperty(master2Table, Property.TABLE_REPLICATION.getKey(), "true");
        connMaster2.tableOperations().setProperty(master2Table, Property.TABLE_REPLICATION_TARGET.getKey() + master1Cluster.getInstanceName(), master1TableId);
        // Give our replication user the ability to write to the respective table
        connMaster1.securityOperations().grantTablePermission(master1UserName, master1Table, TablePermission.WRITE);
        connMaster2.securityOperations().grantTablePermission(master2UserName, master2Table, TablePermission.WRITE);
        IteratorSetting summingCombiner = new IteratorSetting(50, SummingCombiner.class);
        SummingCombiner.setEncodingType(summingCombiner, Type.STRING);
        SummingCombiner.setCombineAllColumns(summingCombiner, true);
        // Set a combiner on both instances that will sum multiple values
        // We can use this to verify that the mutation was not sent multiple times
        connMaster1.tableOperations().attachIterator(master1Table, summingCombiner);
        connMaster2.tableOperations().attachIterator(master2Table, summingCombiner);
        // Write a single entry
        BatchWriter bw = connMaster1.createBatchWriter(master1Table, new BatchWriterConfig());
        Mutation m = new Mutation("row");
        m.put("count", "", "1");
        bw.addMutation(m);
        bw.close();
        Set<String> files = connMaster1.replicationOperations().referencedFiles(master1Table);
        log.info("Found {} that need replication from master1", files);
        // Kill and restart the tserver to close the WAL on master1
        for (ProcessReference proc : master1Cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
            master1Cluster.killProcess(ServerType.TABLET_SERVER, proc);
        }
        master1Cluster.exec(TabletServer.class);
        log.info("Restarted tserver on master1");
        // Try to avoid ACCUMULO-2964
        Thread.sleep(1000);
        // Sanity check that the element is there on master1
        Entry<Key, Value> entry;
        try (Scanner s = connMaster1.createScanner(master1Table, Authorizations.EMPTY)) {
            entry = Iterables.getOnlyElement(s);
            Assert.assertEquals("1", entry.getValue().toString());
            // Wait for this table to replicate
            connMaster1.replicationOperations().drain(master1Table, files);
            Thread.sleep(5000);
        }
        // Check that the element made it to master2 only once
        try (Scanner s = connMaster2.createScanner(master2Table, Authorizations.EMPTY)) {
            entry = Iterables.getOnlyElement(s);
            Assert.assertEquals("1", entry.getValue().toString());
            // Wait for master2 to finish replicating it back
            files = connMaster2.replicationOperations().referencedFiles(master2Table);
            // Kill and restart the tserver to close the WAL on master2
            for (ProcessReference proc : master2Cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
                master2Cluster.killProcess(ServerType.TABLET_SERVER, proc);
            }
            master2Cluster.exec(TabletServer.class);
            // Try to avoid ACCUMULO-2964
            Thread.sleep(1000);
        }
        // Check that the element made it to master2 only once
        try (Scanner s = connMaster2.createScanner(master2Table, Authorizations.EMPTY)) {
            entry = Iterables.getOnlyElement(s);
            Assert.assertEquals("1", entry.getValue().toString());
            connMaster2.replicationOperations().drain(master2Table, files);
            Thread.sleep(5000);
        }
        // Verify that the entry wasn't sent back to master1
        try (Scanner s = connMaster1.createScanner(master1Table, Authorizations.EMPTY)) {
            entry = Iterables.getOnlyElement(s);
            Assert.assertEquals("1", entry.getValue().toString());
        }
    } finally {
        master1Cluster.stop();
        master2Cluster.stop();
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) ProcessReference(org.apache.accumulo.minicluster.impl.ProcessReference) ZooKeeperBindException(org.apache.accumulo.minicluster.impl.ZooKeeperBindException) MiniAccumuloConfigImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Value(org.apache.accumulo.core.data.Value) AccumuloReplicaSystem(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) File(java.io.File) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 30 with NewTableConfiguration

use of org.apache.accumulo.core.client.admin.NewTableConfiguration in project accumulo by apache.

the class RFileTest method testScannerTableProperties.

@Test
public void testScannerTableProperties() throws Exception {
    NewTableConfiguration ntc = new NewTableConfiguration();
    LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
    String testFile = createTmpTestFile();
    RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build();
    Key k1 = new Key("r1", "f1", "q1");
    k1.setTimestamp(3);
    Key k2 = new Key("r1", "f1", "q1");
    k2.setTimestamp(6);
    Value v1 = new Value("p".getBytes());
    Value v2 = new Value("q".getBytes());
    writer.append(k2, v2);
    writer.append(k1, v1);
    writer.close();
    // pass in table config that has versioning iterator configured
    Scanner scanner = RFile.newScanner().from(testFile).withFileSystem(localFs).withTableProperties(ntc.getProperties()).build();
    Assert.assertEquals(ImmutableMap.of(k2, v2), toMap(scanner));
    scanner.close();
    scanner = RFile.newScanner().from(testFile).withFileSystem(localFs).build();
    Assert.assertEquals(ImmutableMap.of(k2, v2, k1, v1), toMap(scanner));
    scanner.close();
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)54 Test (org.junit.Test)47 Connector (org.apache.accumulo.core.client.Connector)40 HashMap (java.util.HashMap)22 Text (org.apache.hadoop.io.Text)22 BatchWriter (org.apache.accumulo.core.client.BatchWriter)20 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)18 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)17 EnumSet (java.util.EnumSet)13 Mutation (org.apache.accumulo.core.data.Mutation)13 Value (org.apache.accumulo.core.data.Value)13 Set (java.util.Set)12 ImmutableSet (com.google.common.collect.ImmutableSet)11 Scanner (org.apache.accumulo.core.client.Scanner)10 Key (org.apache.accumulo.core.data.Key)10 SummarizerConfiguration (org.apache.accumulo.core.client.summary.SummarizerConfiguration)9 Summary (org.apache.accumulo.core.client.summary.Summary)8 TreeSet (java.util.TreeSet)7 CounterSummary (org.apache.accumulo.core.client.summary.CounterSummary)7 Range (org.apache.accumulo.core.data.Range)5