Search in sources :

Example 6 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class KerberosIT method testUserPrivilegesForTable.

@Test
public void testUserPrivilegesForTable() throws Exception {
    String user1 = testName.getMethodName();
    final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
    if (user1Keytab.exists() && !user1Keytab.delete()) {
        log.warn("Unable to delete {}", user1Keytab);
    }
    // Create some new users -- cannot contain realm
    kdc.createPrincipal(user1Keytab, user1);
    final String qualifiedUser1 = kdc.qualifyUser(user1);
    // Log in as user1
    UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(qualifiedUser1, user1Keytab.getAbsolutePath());
    log.info("Logged in as {}", user1);
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            // Indirectly creates this user when we use it
            Connector conn = mac.getConnector(qualifiedUser1, new KerberosToken());
            log.info("Created connector as {}", qualifiedUser1);
            // The new user should have no system permissions
            for (SystemPermission perm : SystemPermission.values()) {
                assertFalse(conn.securityOperations().hasSystemPermission(qualifiedUser1, perm));
            }
            return null;
        }
    });
    final String table = testName.getMethodName() + "_user_table";
    final String viz = "viz";
    ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
            conn.tableOperations().create(table);
            // Give our unprivileged user permission on the table we made for them
            conn.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.READ);
            conn.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.WRITE);
            conn.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.ALTER_TABLE);
            conn.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.DROP_TABLE);
            conn.securityOperations().changeUserAuthorizations(qualifiedUser1, new Authorizations(viz));
            return null;
        }
    });
    // Switch back to the original user
    ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(qualifiedUser1, user1Keytab.getAbsolutePath());
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            Connector conn = mac.getConnector(qualifiedUser1, new KerberosToken());
            // Make sure we can actually use the table we made
            // Write data
            final long ts = 1000l;
            BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
            Mutation m = new Mutation("a");
            m.put("b", "c", new ColumnVisibility(viz.getBytes()), ts, "d");
            bw.addMutation(m);
            bw.close();
            // Compact
            conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
            // Alter
            conn.tableOperations().setProperty(table, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
            // Read (and proper authorizations)
            try (Scanner s = conn.createScanner(table, new Authorizations(viz))) {
                Iterator<Entry<Key, Value>> iter = s.iterator();
                assertTrue("No results from iterator", iter.hasNext());
                Entry<Key, Value> entry = iter.next();
                assertEquals(new Key("a", "b", "c", viz, ts), entry.getKey());
                assertEquals(new Value("d".getBytes()), entry.getValue());
                assertFalse("Had more results from iterator", iter.hasNext());
                return null;
            }
        }
    });
}
Also used : Connector(org.apache.accumulo.core.client.Connector) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Scanner(org.apache.accumulo.core.client.Scanner) Authorizations(org.apache.accumulo.core.security.Authorizations) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) TableExistsException(org.apache.accumulo.core.client.TableExistsException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) UndeclaredThrowableException(java.lang.reflect.UndeclaredThrowableException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) SystemPermission(org.apache.accumulo.core.security.SystemPermission) Entry(java.util.Map.Entry) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Iterator(java.util.Iterator) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ColumnVisibility(org.apache.accumulo.core.security.ColumnVisibility) File(java.io.File) Key(org.apache.accumulo.core.data.Key) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 7 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class KerberosIT method createTableWithDataAndCompact.

/**
 * Creates a table, adds a record to it, and then compacts the table. A simple way to make sure that the system user exists (since the master does an RPC to
 * the tserver which will create the system user if it doesn't already exist).
 */
private void createTableWithDataAndCompact(Connector conn) throws TableNotFoundException, AccumuloSecurityException, AccumuloException, TableExistsException {
    final String table = testName.getMethodName() + "_table";
    conn.tableOperations().create(table);
    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
    Mutation m = new Mutation("a");
    m.put("b", "c", "d");
    bw.addMutation(m);
    bw.close();
    conn.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
}
Also used : CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 8 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class KerberosRenewalIT method createReadWriteDrop.

/**
 * Creates a table, adds a record to it, and then compacts the table. A simple way to make sure that the system user exists (since the master does an RPC to
 * the tserver which will create the system user if it doesn't already exist).
 */
private void createReadWriteDrop(Connector conn) throws TableNotFoundException, AccumuloSecurityException, AccumuloException, TableExistsException {
    final String table = testName.getMethodName() + "_table";
    conn.tableOperations().create(table);
    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
    Mutation m = new Mutation("a");
    m.put("b", "c", "d");
    bw.addMutation(m);
    bw.close();
    conn.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
    try (Scanner s = conn.createScanner(table, Authorizations.EMPTY)) {
        Entry<Key, Value> entry = Iterables.getOnlyElement(s);
        assertEquals("Did not find the expected key", 0, new Key("a", "b", "c").compareTo(entry.getKey(), PartialKey.ROW_COLFAM_COLQUAL));
        assertEquals("d", entry.getValue().toString());
        conn.tableOperations().delete(table);
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 9 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class SummaryIT method basicSummaryTest.

@Test
public void basicSummaryTest() throws Exception {
    final String table = getUniqueNames(1)[0];
    Connector c = getConnector();
    NewTableConfiguration ntc = new NewTableConfiguration();
    SummarizerConfiguration sc1 = SummarizerConfiguration.builder(BasicSummarizer.class.getName()).build();
    ntc.enableSummarization(sc1);
    c.tableOperations().create(table, ntc);
    BatchWriter bw = writeData(table, c);
    Collection<Summary> summaries = c.tableOperations().summaries(table).flush(false).retrieve();
    Assert.assertEquals(0, summaries.size());
    LongSummaryStatistics stats = getTimestampStats(table, c);
    summaries = c.tableOperations().summaries(table).flush(true).retrieve();
    checkSummaries(summaries, sc1, 1, 0, 0, TOTAL_STAT, 100_000l, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0l);
    Mutation m = new Mutation(String.format("r%09x", 999));
    m.put("f1", "q1", "999-0");
    m.putDelete("f1", "q2");
    bw.addMutation(m);
    bw.flush();
    c.tableOperations().flush(table, null, null, true);
    stats = getTimestampStats(table, c);
    summaries = c.tableOperations().summaries(table).retrieve();
    checkSummaries(summaries, sc1, 2, 0, 0, TOTAL_STAT, 100_002l, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 1l);
    bw.close();
    c.tableOperations().compact(table, new CompactionConfig().setWait(true));
    summaries = c.tableOperations().summaries(table).retrieve();
    checkSummaries(summaries, sc1, 1, 0, 0, TOTAL_STAT, 100_000l, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0l);
    // split tablet into two
    String sp1 = String.format("r%09x", 50_000);
    addSplits(table, c, sp1);
    summaries = c.tableOperations().summaries(table).retrieve();
    checkSummaries(summaries, sc1, 1, 0, 0, TOTAL_STAT, 100_000l, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0l);
    // compact 2nd tablet
    c.tableOperations().compact(table, new CompactionConfig().setStartRow(new Text(sp1)).setWait(true));
    summaries = c.tableOperations().summaries(table).retrieve();
    checkSummaries(summaries, sc1, 2, 0, 1, TOTAL_STAT, 113_999l, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0l);
    // get summaries for first tablet
    stats = getTimestampStats(table, c, sp1, null);
    summaries = c.tableOperations().summaries(table).startRow(sp1).retrieve();
    checkSummaries(summaries, sc1, 1, 0, 0, TOTAL_STAT, 49_999l, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0l);
    // compact all tablets and regenerate all summaries
    c.tableOperations().compact(table, new CompactionConfig());
    summaries = c.tableOperations().summaries(table).retrieve();
    stats = getTimestampStats(table, c);
    checkSummaries(summaries, sc1, 2, 0, 0, TOTAL_STAT, 100_000l, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0l);
    summaries = c.tableOperations().summaries(table).startRow(String.format("r%09x", 75_000)).endRow(String.format("r%09x", 80_000)).retrieve();
    Summary summary = Iterables.getOnlyElement(summaries);
    Assert.assertEquals(1, summary.getFileStatistics().getTotal());
    Assert.assertEquals(1, summary.getFileStatistics().getExtra());
    long total = summary.getStatistics().get(TOTAL_STAT);
    Assert.assertTrue("Total " + total + " out of expected range", total > 0 && total <= 10_000);
    // test adding and removing
    c.tableOperations().removeSummarizers(table, sc -> sc.getClassName().contains("foo"));
    List<SummarizerConfiguration> summarizers = c.tableOperations().listSummarizers(table);
    Assert.assertEquals(1, summarizers.size());
    Assert.assertTrue(summarizers.contains(sc1));
    c.tableOperations().removeSummarizers(table, sc -> sc.getClassName().equals(BasicSummarizer.class.getName()));
    summarizers = c.tableOperations().listSummarizers(table);
    Assert.assertEquals(0, summarizers.size());
    c.tableOperations().compact(table, new CompactionConfig().setWait(true));
    summaries = c.tableOperations().summaries(table).retrieve();
    Assert.assertEquals(0, summaries.size());
    c.tableOperations().addSummarizers(table, sc1);
    c.tableOperations().compact(table, new CompactionConfig().setWait(true));
    summaries = c.tableOperations().summaries(table).retrieve();
    checkSummaries(summaries, sc1, 2, 0, 0, TOTAL_STAT, 100_000l, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0l);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Text(org.apache.hadoop.io.Text) LongSummaryStatistics(java.util.LongSummaryStatistics) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) CounterSummary(org.apache.accumulo.core.client.summary.CounterSummary) Summary(org.apache.accumulo.core.client.summary.Summary) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) Test(org.junit.Test)

Example 10 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class SummaryIT method tooLargeTest.

@Test
public void tooLargeTest() throws Exception {
    final String table = getUniqueNames(1)[0];
    Connector c = getConnector();
    NewTableConfiguration ntc = new NewTableConfiguration();
    SummarizerConfiguration sc1 = SummarizerConfiguration.builder(BigSummarizer.class).build();
    ntc.enableSummarization(sc1);
    c.tableOperations().create(table, ntc);
    try (BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig())) {
        write(bw, "a_large", "f1", "q1", "v1");
        write(bw, "v_small", "f1", "q1", "v2");
    }
    c.tableOperations().flush(table, null, null, true);
    Summary summary = c.tableOperations().summaries(table).retrieve().get(0);
    Assert.assertEquals(1, summary.getFileStatistics().getLarge());
    Assert.assertEquals(0, summary.getFileStatistics().getMissing());
    Assert.assertEquals(0, summary.getFileStatistics().getExtra());
    Assert.assertEquals(0, summary.getFileStatistics().getDeleted());
    Assert.assertEquals(1, summary.getFileStatistics().getInaccurate());
    Assert.assertEquals(1, summary.getFileStatistics().getTotal());
    Assert.assertEquals(Collections.emptyMap(), summary.getStatistics());
    // create situation where one tablet has summary data and one does not because the summary data was too large
    c.tableOperations().addSplits(table, new TreeSet<>(Collections.singleton(new Text("m"))));
    c.tableOperations().compact(table, new CompactionConfig().setWait(true));
    summary = c.tableOperations().summaries(table).retrieve().get(0);
    Assert.assertEquals(1, summary.getFileStatistics().getLarge());
    Assert.assertEquals(0, summary.getFileStatistics().getMissing());
    Assert.assertEquals(0, summary.getFileStatistics().getExtra());
    Assert.assertEquals(0, summary.getFileStatistics().getDeleted());
    Assert.assertEquals(1, summary.getFileStatistics().getInaccurate());
    Assert.assertEquals(2, summary.getFileStatistics().getTotal());
    HashMap<String, Long> expected = new HashMap<>();
    for (int i = 0; i < 10; i++) {
        expected.put(String.format("%09x", i), i * 19l);
    }
    Assert.assertEquals(expected, summary.getStatistics());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) CounterSummary(org.apache.accumulo.core.client.summary.CounterSummary) Summary(org.apache.accumulo.core.client.summary.Summary) BatchWriter(org.apache.accumulo.core.client.BatchWriter) SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) Test(org.junit.Test)

Aggregations

CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)18 Connector (org.apache.accumulo.core.client.Connector)14 Test (org.junit.Test)12 BatchWriter (org.apache.accumulo.core.client.BatchWriter)9 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)7 CompactionStrategyConfig (org.apache.accumulo.core.client.admin.CompactionStrategyConfig)7 AccumuloException (org.apache.accumulo.core.client.AccumuloException)6 Mutation (org.apache.accumulo.core.data.Mutation)6 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)5 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)5 Text (org.apache.hadoop.io.Text)5 Scanner (org.apache.accumulo.core.client.Scanner)4 File (java.io.File)3 HashMap (java.util.HashMap)3 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)3 Key (org.apache.accumulo.core.data.Key)3 Value (org.apache.accumulo.core.data.Value)3 UndeclaredThrowableException (java.lang.reflect.UndeclaredThrowableException)2 LongSummaryStatistics (java.util.LongSummaryStatistics)2 Entry (java.util.Map.Entry)2