Search in sources :

Example 41 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class ThriftClientHandler method compact.

@Override
public void compact(TInfo tinfo, TCredentials credentials, String lock, String tableId, ByteBuffer startRow, ByteBuffer endRow) {
    try {
        checkPermission(credentials, lock, "compact");
    } catch (ThriftSecurityException e) {
        log.error("Caller doesn't have permission to compact a table", e);
        throw new RuntimeException(e);
    }
    KeyExtent ke = new KeyExtent(TableId.of(tableId), ByteBufferUtil.toText(endRow), ByteBufferUtil.toText(startRow));
    Pair<Long, CompactionConfig> compactionInfo = null;
    for (Tablet tablet : server.getOnlineTablets().values()) {
        if (ke.overlaps(tablet.getExtent())) {
            // compaction id once
            if (compactionInfo == null) {
                try {
                    compactionInfo = tablet.getCompactionID();
                } catch (NoNodeException e) {
                    log.info("Asked to compact table with no compaction id {} {}", ke, e.getMessage());
                    return;
                }
            }
            tablet.compactAll(compactionInfo.getFirst(), compactionInfo.getSecond());
        }
    }
}
Also used : NoNodeException(org.apache.zookeeper.KeeperException.NoNodeException) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Tablet(org.apache.accumulo.tserver.tablet.Tablet) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent)

Example 42 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class VolumeIT method testReplaceVolume.

private void testReplaceVolume(AccumuloClient client, boolean cleanShutdown) throws Exception {
    String[] tableNames = getUniqueNames(3);
    verifyVolumesUsed(client, tableNames[0], false, v1, v2);
    // write to 2nd table, but do not flush data to disk before shutdown
    try (AccumuloClient c2 = cluster.createAccumuloClient("root", new PasswordToken(ROOT_PASSWORD))) {
        writeData(tableNames[1], c2);
    }
    if (cleanShutdown)
        assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
    cluster.stop();
    File v1f = new File(v1.toUri());
    File v8f = new File(new File(v1.getParent().toUri()), "v8");
    assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
    Path v8 = new Path(v8f.toURI());
    File v2f = new File(v2.toUri());
    File v9f = new File(new File(v2.getParent().toUri()), "v9");
    assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
    Path v9 = new Path(v9f.toURI());
    updateConfig(config -> {
        config.setProperty(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
        config.setProperty(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
    });
    // start cluster and verify that volumes were replaced
    cluster.start();
    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);
    // verify writes to new dir
    client.tableOperations().compact(tableNames[0], null, null, true, true);
    client.tableOperations().compact(tableNames[1], null, null, true, true);
    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);
    client.tableOperations().compact(RootTable.NAME, new CompactionConfig().setWait(true));
    // check that root tablet is not on volume 1 or 2
    int count = 0;
    for (StoredTabletFile file : ((ClientContext) client).getAmple().readTablet(RootTable.EXTENT).getFiles()) {
        assertTrue(file.getMetaUpdateDelete().startsWith(v8.toString()) || file.getMetaUpdateDelete().startsWith(v9.toString()));
        count++;
    }
    assertTrue(count > 0);
    client.tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<>(), new HashSet<>());
    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
    client.tableOperations().flush(RootTable.NAME, null, null, true);
    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);
    verifyVolumesUsed(client, tableNames[2], true, v8, v9);
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) File(java.io.File)

Example 43 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class ExternalCompaction_1_IT method testPartialCompaction.

@Test
public void testPartialCompaction() throws Exception {
    String tableName = getUniqueNames(1)[0];
    try (final AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
        getCluster().getClusterControl().startCompactors(Compactor.class, 1, QUEUE8);
        getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
        createTable(client, tableName, "cs8");
        writeData(client, tableName);
        // This should create an A file
        compact(client, tableName, 17, QUEUE8, true);
        verify(client, tableName, 17);
        try (BatchWriter bw = client.createBatchWriter(tableName)) {
            for (int i = MAX_DATA; i < MAX_DATA * 2; i++) {
                Mutation m = new Mutation(row(i));
                m.put("", "", "" + i);
                bw.addMutation(m);
            }
        }
        // this should create an F file
        client.tableOperations().flush(tableName);
        // run a compaction that only compacts F files
        IteratorSetting iterSetting = new IteratorSetting(100, TestFilter.class);
        // make sure iterator options make it to compactor process
        iterSetting.addOption("expectedQ", QUEUE8);
        // compact F file w/ different modulus and user pmodulus option for partial compaction
        iterSetting.addOption("pmodulus", 19 + "");
        CompactionConfig config = new CompactionConfig().setIterators(List.of(iterSetting)).setWait(true).setSelector(new PluginConfig(FSelector.class.getName()));
        client.tableOperations().compact(tableName, config);
        try (Scanner scanner = client.createScanner(tableName)) {
            int count = 0;
            for (Entry<Key, Value> entry : scanner) {
                int v = Integer.parseInt(entry.getValue().toString());
                int modulus = v < MAX_DATA ? 17 : 19;
                assertTrue(String.format("%s %s %d != 0", entry.getValue(), "%", modulus), Integer.parseInt(entry.getValue().toString()) % modulus == 0);
                count++;
            }
            int expectedCount = 0;
            for (int i = 0; i < MAX_DATA * 2; i++) {
                int modulus = i < MAX_DATA ? 17 : 19;
                if (i % modulus == 0) {
                    expectedCount++;
                }
            }
            assertEquals(expectedCount, count);
        }
        // We need to cancel the compaction or delete the table here because we initiate a user
        // compaction above in the test. Even though the external compaction was cancelled
        // because we split the table, FaTE will continue to queue up a compaction
        client.tableOperations().cancelCompaction(tableName);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) PluginConfig(org.apache.accumulo.core.client.admin.PluginConfig) Scanner(org.apache.accumulo.core.client.Scanner) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 44 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class ExternalCompaction_1_IT method testConfigurer.

@Test
public void testConfigurer() throws Exception {
    String tableName = this.getUniqueNames(1)[0];
    getCluster().getClusterControl().startCompactors(Compactor.class, 1, QUEUE5);
    getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
    try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
        Map<String, String> props = Map.of("table.compaction.dispatcher", SimpleCompactionDispatcher.class.getName(), "table.compaction.dispatcher.opts.service", "cs5", Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
        NewTableConfiguration ntc = new NewTableConfiguration().setProperties(props);
        client.tableOperations().create(tableName, ntc);
        byte[] data = new byte[100000];
        Arrays.fill(data, (byte) 65);
        try (var writer = client.createBatchWriter(tableName)) {
            for (int row = 0; row < 10; row++) {
                Mutation m = new Mutation(row + "");
                m.at().family("big").qualifier("stuff").put(data);
                writer.addMutation(m);
            }
        }
        client.tableOperations().flush(tableName, null, null, true);
        // without compression, expect file to be large
        long sizes = CompactionExecutorIT.getFileSizes(client, tableName);
        assertTrue("Unexpected files sizes : " + sizes, sizes > data.length * 10 && sizes < data.length * 11);
        client.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setConfigurer(new PluginConfig(CompressionConfigurer.class.getName(), Map.of(CompressionConfigurer.LARGE_FILE_COMPRESSION_TYPE, "gz", CompressionConfigurer.LARGE_FILE_COMPRESSION_THRESHOLD, data.length + ""))));
        // after compacting with compression, expect small file
        sizes = CompactionExecutorIT.getFileSizes(client, tableName);
        assertTrue("Unexpected files sizes: data: " + data.length + ", file:" + sizes, sizes < data.length);
        client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
        // after compacting without compression, expect big files again
        sizes = CompactionExecutorIT.getFileSizes(client, tableName);
        assertTrue("Unexpected files sizes : " + sizes, sizes > data.length * 10 && sizes < data.length * 11);
        // We need to cancel the compaction or delete the table here because we initiate a user
        // compaction above in the test. Even though the external compaction was cancelled
        // because we split the table, FaTE will continue to queue up a compaction
        client.tableOperations().cancelCompaction(tableName);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) CompressionConfigurer(org.apache.accumulo.core.client.admin.compaction.CompressionConfigurer) PluginConfig(org.apache.accumulo.core.client.admin.PluginConfig) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Mutation(org.apache.accumulo.core.data.Mutation) SimpleCompactionDispatcher(org.apache.accumulo.core.spi.compaction.SimpleCompactionDispatcher) Test(org.junit.Test)

Example 45 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class UserCompactionStrategyIT method testConcurrent.

@Test
public void testConcurrent() throws Exception {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        String tableName = getUniqueNames(1)[0];
        c.tableOperations().create(tableName);
        // write random data because its very unlikely it will compress
        writeRandomValue(c, tableName, 1 << 16);
        writeRandomValue(c, tableName, 1 << 16);
        c.tableOperations().compact(tableName, new CompactionConfig().setWait(false));
        c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
        assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
        writeRandomValue(c, tableName, 1 << 16);
        IteratorSetting iterConfig = new IteratorSetting(30, SlowIterator.class);
        SlowIterator.setSleepTime(iterConfig, 1000);
        long t1 = System.currentTimeMillis();
        c.tableOperations().compact(tableName, new CompactionConfig().setWait(false).setIterators(Arrays.asList(iterConfig)));
        try {
            // this compaction should fail because previous one set iterators
            c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
            if (System.currentTimeMillis() - t1 < 2000) {
                fail("Expected compaction to fail because another concurrent compaction set iterators");
            }
        } catch (AccumuloException e) {
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) AccumuloException(org.apache.accumulo.core.client.AccumuloException) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Test(org.junit.Test)

Aggregations

CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)57 Test (org.junit.Test)36 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)32 Mutation (org.apache.accumulo.core.data.Mutation)21 BatchWriter (org.apache.accumulo.core.client.BatchWriter)20 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)17 Value (org.apache.accumulo.core.data.Value)14 PluginConfig (org.apache.accumulo.core.client.admin.PluginConfig)12 Scanner (org.apache.accumulo.core.client.Scanner)11 CompactionStrategyConfig (org.apache.accumulo.core.client.admin.CompactionStrategyConfig)11 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)11 Text (org.apache.hadoop.io.Text)10 AccumuloException (org.apache.accumulo.core.client.AccumuloException)9 Connector (org.apache.accumulo.core.client.Connector)9 Key (org.apache.accumulo.core.data.Key)9 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)6 File (java.io.File)5 IOException (java.io.IOException)5 HashMap (java.util.HashMap)5 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)5