Search in sources :

Example 31 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class Tablet method getCompactionID.

public Pair<Long, CompactionConfig> getCompactionID() throws NoNodeException {
    try {
        String zTablePath = Constants.ZROOT + "/" + tabletServer.getInstanceID() + Constants.ZTABLES + "/" + extent.tableId() + Constants.ZTABLE_COMPACT_ID;
        String[] tokens = new String(context.getZooReaderWriter().getData(zTablePath), UTF_8).split(",");
        long compactID = Long.parseLong(tokens[0]);
        CompactionConfig overlappingConfig = null;
        if (tokens.length > 1) {
            Hex hex = new Hex();
            ByteArrayInputStream bais = new ByteArrayInputStream(hex.decode(tokens[1].split("=")[1].getBytes(UTF_8)));
            DataInputStream dis = new DataInputStream(bais);
            var compactionConfig = UserCompactionUtils.decodeCompactionConfig(dis);
            KeyExtent ke = new KeyExtent(extent.tableId(), compactionConfig.getEndRow(), compactionConfig.getStartRow());
            if (ke.overlaps(extent)) {
                overlappingConfig = compactionConfig;
            }
        }
        if (overlappingConfig == null)
            // no config present, set to default
            overlappingConfig = new CompactionConfig();
        return new Pair<>(compactID, overlappingConfig);
    } catch (InterruptedException | DecoderException | NumberFormatException e) {
        throw new RuntimeException("Exception on " + extent + " getting compaction ID", e);
    } catch (KeeperException ke) {
        if (ke instanceof NoNodeException) {
            throw (NoNodeException) ke;
        } else {
            throw new RuntimeException("Exception on " + extent + " getting compaction ID", ke);
        }
    }
}
Also used : NoNodeException(org.apache.zookeeper.KeeperException.NoNodeException) DataInputStream(java.io.DataInputStream) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) IterationInterruptedException(org.apache.accumulo.core.iteratorsImpl.system.IterationInterruptedException) DecoderException(org.apache.commons.codec.DecoderException) ByteArrayInputStream(java.io.ByteArrayInputStream) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Hex(org.apache.commons.codec.binary.Hex) KeeperException(org.apache.zookeeper.KeeperException) Pair(org.apache.accumulo.core.util.Pair)

Example 32 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class SampleIT method testSampleNotPresent.

@Test
public void testSampleNotPresent() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
        String tableName = getUniqueNames(1)[0];
        String clone = tableName + "_clone";
        client.tableOperations().create(tableName);
        TreeMap<Key, Value> expected = new TreeMap<>();
        try (BatchWriter bw = client.createBatchWriter(tableName)) {
            writeData(bw, SC1, expected);
        }
        Scanner scanner = client.createScanner(tableName);
        Scanner isoScanner = new IsolatedScanner(client.createScanner(tableName));
        isoScanner.setBatchSize(10);
        Scanner csiScanner = new ClientSideIteratorScanner(client.createScanner(tableName));
        try (BatchScanner bScanner = client.createBatchScanner(tableName)) {
            bScanner.setRanges(Arrays.asList(new Range()));
            // ensure sample not present exception occurs when sampling is not configured
            assertSampleNotPresent(SC1, scanner, isoScanner, bScanner, csiScanner);
            client.tableOperations().flush(tableName, null, null, true);
            Scanner oScanner = newOfflineScanner(client, tableName, clone, SC1);
            assertSampleNotPresent(SC1, scanner, isoScanner, bScanner, csiScanner, oScanner);
            // configure sampling, however there exist an rfile w/o sample data... so should still see
            // sample not present exception
            updateSamplingConfig(client, tableName, SC1);
            // create clone with new config
            oScanner = newOfflineScanner(client, tableName, clone, SC1);
            assertSampleNotPresent(SC1, scanner, isoScanner, bScanner, csiScanner, oScanner);
            // create rfile with sample data present
            client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
            // should be able to scan sample now
            oScanner = newOfflineScanner(client, tableName, clone, SC1);
            setSamplerConfig(SC1, scanner, csiScanner, isoScanner, bScanner, oScanner);
            check(expected, scanner, isoScanner, bScanner, csiScanner, oScanner);
            // change sampling config
            updateSamplingConfig(client, tableName, SC2);
            // create clone with new config
            oScanner = newOfflineScanner(client, tableName, clone, SC2);
            // rfile should have different sample config than table, and scan should not work
            assertSampleNotPresent(SC2, scanner, isoScanner, bScanner, csiScanner, oScanner);
            // create rfile that has same sample data as table config
            client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
            // should be able to scan sample now
            updateExpected(SC2, expected);
            oScanner = newOfflineScanner(client, tableName, clone, SC2);
            setSamplerConfig(SC2, scanner, csiScanner, isoScanner, bScanner, oScanner);
            check(expected, scanner, isoScanner, bScanner, csiScanner, oScanner);
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) ClientSideIteratorScanner(org.apache.accumulo.core.client.ClientSideIteratorScanner) BatchScanner(org.apache.accumulo.core.client.BatchScanner) OfflineScanner(org.apache.accumulo.core.clientImpl.OfflineScanner) ClientSideIteratorScanner(org.apache.accumulo.core.client.ClientSideIteratorScanner) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) BatchScanner(org.apache.accumulo.core.client.BatchScanner) TreeMap(java.util.TreeMap) Range(org.apache.accumulo.core.data.Range) Value(org.apache.accumulo.core.data.Value) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 33 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class VolumeIT method testRemoveVolumes.

@Test
public void testRemoveVolumes() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        String[] tableNames = getUniqueNames(2);
        verifyVolumesUsed(client, tableNames[0], false, v1, v2);
        assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
        cluster.stop();
        updateConfig(config -> config.setProperty(Property.INSTANCE_VOLUMES.getKey(), v2.toString()));
        // start cluster and verify that volume was decommissioned
        cluster.start();
        client.tableOperations().compact(tableNames[0], null, null, true, true);
        verifyVolumesUsed(client, tableNames[0], true, v2);
        client.tableOperations().compact(RootTable.NAME, new CompactionConfig().setWait(true));
        // check that root tablet is not on volume 1
        int count = 0;
        for (StoredTabletFile file : ((ClientContext) client).getAmple().readTablet(RootTable.EXTENT).getFiles()) {
            assertTrue(file.getMetaUpdateDelete().startsWith(v2.toString()));
            count++;
        }
        assertTrue(count > 0);
        client.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(), new HashSet<>());
        client.tableOperations().flush(MetadataTable.NAME, null, null, true);
        client.tableOperations().flush(RootTable.NAME, null, null, true);
        verifyVolumesUsed(client, tableNames[0], true, v2);
        verifyVolumesUsed(client, tableNames[1], true, v2);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) Admin(org.apache.accumulo.server.util.Admin) Test(org.junit.Test)

Example 34 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class IteratorEnvIT method testCompact.

public void testCompact(String tableName, Class<? extends SortedKeyValueIterator<Key, Value>> iteratorClass) throws Exception {
    writeData(tableName);
    IteratorSetting cfg = new IteratorSetting(1, iteratorClass);
    cfg.addOption("expected.table.id", client.tableOperations().tableIdMap().get(tableName));
    CompactionConfig config = new CompactionConfig();
    config.setIterators(Collections.singletonList(cfg));
    client.tableOperations().compact(tableName, config);
}
Also used : IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig)

Example 35 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class UserCompactionStrategyIT method testDropA.

@Test
public void testDropA() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    writeFlush(c, tableName, "a");
    writeFlush(c, tableName, "b");
    // create a file that starts with A containing rows 'a' and 'b'
    c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
    writeFlush(c, tableName, "c");
    writeFlush(c, tableName, "d");
    // drop files that start with A
    CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
    csConfig.setOptions(ImmutableMap.of("dropPrefix", "A", "inputPrefix", "F"));
    c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
    Assert.assertEquals(ImmutableSet.of("c", "d"), getRows(c, tableName));
    // this compaction should not drop files starting with A
    c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
    c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
    Assert.assertEquals(ImmutableSet.of("c", "d"), getRows(c, tableName));
}
Also used : CompactionStrategyConfig(org.apache.accumulo.core.client.admin.CompactionStrategyConfig) Connector(org.apache.accumulo.core.client.Connector) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Test(org.junit.Test)

Aggregations

CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)57 Test (org.junit.Test)36 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)32 Mutation (org.apache.accumulo.core.data.Mutation)21 BatchWriter (org.apache.accumulo.core.client.BatchWriter)20 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)17 Value (org.apache.accumulo.core.data.Value)14 PluginConfig (org.apache.accumulo.core.client.admin.PluginConfig)12 Scanner (org.apache.accumulo.core.client.Scanner)11 CompactionStrategyConfig (org.apache.accumulo.core.client.admin.CompactionStrategyConfig)11 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)11 Text (org.apache.hadoop.io.Text)10 AccumuloException (org.apache.accumulo.core.client.AccumuloException)9 Connector (org.apache.accumulo.core.client.Connector)9 Key (org.apache.accumulo.core.data.Key)9 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)6 File (java.io.File)5 IOException (java.io.IOException)5 HashMap (java.util.HashMap)5 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)5