Search in sources :

Example 11 with PluginConfig

use of org.apache.accumulo.core.client.admin.PluginConfig in project accumulo by apache.

the class ExternalCompaction_1_IT method testPartialCompaction.

@Test
public void testPartialCompaction() throws Exception {
    String tableName = getUniqueNames(1)[0];
    try (final AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
        getCluster().getClusterControl().startCompactors(Compactor.class, 1, QUEUE8);
        getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
        createTable(client, tableName, "cs8");
        writeData(client, tableName);
        // This should create an A file
        compact(client, tableName, 17, QUEUE8, true);
        verify(client, tableName, 17);
        try (BatchWriter bw = client.createBatchWriter(tableName)) {
            for (int i = MAX_DATA; i < MAX_DATA * 2; i++) {
                Mutation m = new Mutation(row(i));
                m.put("", "", "" + i);
                bw.addMutation(m);
            }
        }
        // this should create an F file
        client.tableOperations().flush(tableName);
        // run a compaction that only compacts F files
        IteratorSetting iterSetting = new IteratorSetting(100, TestFilter.class);
        // make sure iterator options make it to compactor process
        iterSetting.addOption("expectedQ", QUEUE8);
        // compact F file w/ different modulus and user pmodulus option for partial compaction
        iterSetting.addOption("pmodulus", 19 + "");
        CompactionConfig config = new CompactionConfig().setIterators(List.of(iterSetting)).setWait(true).setSelector(new PluginConfig(FSelector.class.getName()));
        client.tableOperations().compact(tableName, config);
        try (Scanner scanner = client.createScanner(tableName)) {
            int count = 0;
            for (Entry<Key, Value> entry : scanner) {
                int v = Integer.parseInt(entry.getValue().toString());
                int modulus = v < MAX_DATA ? 17 : 19;
                assertTrue(String.format("%s %s %d != 0", entry.getValue(), "%", modulus), Integer.parseInt(entry.getValue().toString()) % modulus == 0);
                count++;
            }
            int expectedCount = 0;
            for (int i = 0; i < MAX_DATA * 2; i++) {
                int modulus = i < MAX_DATA ? 17 : 19;
                if (i % modulus == 0) {
                    expectedCount++;
                }
            }
            assertEquals(expectedCount, count);
        }
        // We need to cancel the compaction or delete the table here because we initiate a user
        // compaction above in the test. Even though the external compaction was cancelled
        // because we split the table, FaTE will continue to queue up a compaction
        client.tableOperations().cancelCompaction(tableName);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) PluginConfig(org.apache.accumulo.core.client.admin.PluginConfig) Scanner(org.apache.accumulo.core.client.Scanner) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 12 with PluginConfig

use of org.apache.accumulo.core.client.admin.PluginConfig in project accumulo by apache.

the class ExternalCompaction_1_IT method testConfigurer.

@Test
public void testConfigurer() throws Exception {
    String tableName = this.getUniqueNames(1)[0];
    getCluster().getClusterControl().startCompactors(Compactor.class, 1, QUEUE5);
    getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
    try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
        Map<String, String> props = Map.of("table.compaction.dispatcher", SimpleCompactionDispatcher.class.getName(), "table.compaction.dispatcher.opts.service", "cs5", Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
        NewTableConfiguration ntc = new NewTableConfiguration().setProperties(props);
        client.tableOperations().create(tableName, ntc);
        byte[] data = new byte[100000];
        Arrays.fill(data, (byte) 65);
        try (var writer = client.createBatchWriter(tableName)) {
            for (int row = 0; row < 10; row++) {
                Mutation m = new Mutation(row + "");
                m.at().family("big").qualifier("stuff").put(data);
                writer.addMutation(m);
            }
        }
        client.tableOperations().flush(tableName, null, null, true);
        // without compression, expect file to be large
        long sizes = CompactionExecutorIT.getFileSizes(client, tableName);
        assertTrue("Unexpected files sizes : " + sizes, sizes > data.length * 10 && sizes < data.length * 11);
        client.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setConfigurer(new PluginConfig(CompressionConfigurer.class.getName(), Map.of(CompressionConfigurer.LARGE_FILE_COMPRESSION_TYPE, "gz", CompressionConfigurer.LARGE_FILE_COMPRESSION_THRESHOLD, data.length + ""))));
        // after compacting with compression, expect small file
        sizes = CompactionExecutorIT.getFileSizes(client, tableName);
        assertTrue("Unexpected files sizes: data: " + data.length + ", file:" + sizes, sizes < data.length);
        client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
        // after compacting without compression, expect big files again
        sizes = CompactionExecutorIT.getFileSizes(client, tableName);
        assertTrue("Unexpected files sizes : " + sizes, sizes > data.length * 10 && sizes < data.length * 11);
        // We need to cancel the compaction or delete the table here because we initiate a user
        // compaction above in the test. Even though the external compaction was cancelled
        // because we split the table, FaTE will continue to queue up a compaction
        client.tableOperations().cancelCompaction(tableName);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) CompressionConfigurer(org.apache.accumulo.core.client.admin.compaction.CompressionConfigurer) PluginConfig(org.apache.accumulo.core.client.admin.PluginConfig) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Mutation(org.apache.accumulo.core.data.Mutation) SimpleCompactionDispatcher(org.apache.accumulo.core.spi.compaction.SimpleCompactionDispatcher) Test(org.junit.Test)

Example 13 with PluginConfig

use of org.apache.accumulo.core.client.admin.PluginConfig in project accumulo by apache.

the class CompactionExecutorIT method testConfigurer.

@Test
public void testConfigurer() throws Exception {
    String tableName = "tcc";
    try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
        var ntc = new NewTableConfiguration().setProperties(Map.of(Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none"));
        client.tableOperations().create(tableName, ntc);
        byte[] data = new byte[100000];
        Arrays.fill(data, (byte) 65);
        try (var writer = client.createBatchWriter(tableName)) {
            for (int row = 0; row < 10; row++) {
                Mutation m = new Mutation(row + "");
                m.at().family("big").qualifier("stuff").put(data);
                writer.addMutation(m);
            }
        }
        client.tableOperations().flush(tableName, null, null, true);
        // without compression, expect file to be large
        long sizes = getFileSizes(client, tableName);
        assertTrue("Unexpected files sizes : " + sizes, sizes > data.length * 10 && sizes < data.length * 11);
        client.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setConfigurer(new PluginConfig(CompressionConfigurer.class.getName(), Map.of(CompressionConfigurer.LARGE_FILE_COMPRESSION_TYPE, "gz", CompressionConfigurer.LARGE_FILE_COMPRESSION_THRESHOLD, data.length + ""))));
        // after compacting with compression, expect small file
        sizes = getFileSizes(client, tableName);
        assertTrue("Unexpected files sizes : " + sizes, sizes < data.length);
        client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
        // after compacting without compression, expect big files again
        sizes = getFileSizes(client, tableName);
        assertTrue("Unexpected files sizes : " + sizes, sizes > data.length * 10 && sizes < data.length * 11);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) PluginConfig(org.apache.accumulo.core.client.admin.PluginConfig) CompressionConfigurer(org.apache.accumulo.core.client.admin.compaction.CompressionConfigurer) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 14 with PluginConfig

use of org.apache.accumulo.core.client.admin.PluginConfig in project accumulo by apache.

the class CompactionIT method testPartialCompaction.

@Test
public void testPartialCompaction() throws Exception {
    String tableName = getUniqueNames(1)[0];
    try (final AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
        client.tableOperations().create(tableName);
        // Insert MAX_DATA rows
        try (BatchWriter bw = client.createBatchWriter(tableName)) {
            for (int i = 0; i < MAX_DATA; i++) {
                Mutation m = new Mutation(String.format("r:%04d", i));
                m.put("", "", "" + i);
                bw.addMutation(m);
            }
        }
        client.tableOperations().flush(tableName);
        IteratorSetting iterSetting = new IteratorSetting(100, TestFilter.class);
        // make sure iterator options make it to compactor process
        iterSetting.addOption("modulus", 17 + "");
        CompactionConfig config = new CompactionConfig().setIterators(List.of(iterSetting)).setWait(true);
        client.tableOperations().compact(tableName, config);
        // Insert 2 * MAX_DATA rows
        try (BatchWriter bw = client.createBatchWriter(tableName)) {
            for (int i = MAX_DATA; i < MAX_DATA * 2; i++) {
                Mutation m = new Mutation(String.format("r:%04d", i));
                m.put("", "", "" + i);
                bw.addMutation(m);
            }
        }
        // this should create an F file
        client.tableOperations().flush(tableName);
        // run a compaction that only compacts F files
        iterSetting = new IteratorSetting(100, TestFilter.class);
        // compact F file w/ different modulus and user pmodulus option for partial compaction
        iterSetting.addOption("pmodulus", 19 + "");
        config = new CompactionConfig().setIterators(List.of(iterSetting)).setWait(true).setSelector(new PluginConfig(FSelector.class.getName()));
        client.tableOperations().compact(tableName, config);
        try (Scanner scanner = client.createScanner(tableName)) {
            int count = 0;
            for (Entry<Key, Value> entry : scanner) {
                int v = Integer.parseInt(entry.getValue().toString());
                int modulus = v < MAX_DATA ? 17 : 19;
                assertTrue(String.format("%s %s %d != 0", entry.getValue(), "%", modulus), Integer.parseInt(entry.getValue().toString()) % modulus == 0);
                count++;
            }
            // Verify
            int expectedCount = 0;
            for (int i = 0; i < MAX_DATA * 2; i++) {
                int modulus = i < MAX_DATA ? 17 : 19;
                if (i % modulus == 0) {
                    expectedCount++;
                }
            }
            assertEquals(expectedCount, count);
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Scanner(org.apache.accumulo.core.client.Scanner) PluginConfig(org.apache.accumulo.core.client.admin.PluginConfig) FSelector(org.apache.accumulo.test.compaction.ExternalCompaction_1_IT.FSelector) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

PluginConfig (org.apache.accumulo.core.client.admin.PluginConfig)14 CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)12 Test (org.junit.Test)10 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)9 Mutation (org.apache.accumulo.core.data.Mutation)6 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)5 CompressionConfigurer (org.apache.accumulo.core.client.admin.compaction.CompressionConfigurer)4 Value (org.apache.accumulo.core.data.Value)4 BatchWriter (org.apache.accumulo.core.client.BatchWriter)3 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)3 HashMap (java.util.HashMap)2 AccumuloException (org.apache.accumulo.core.client.AccumuloException)2 Scanner (org.apache.accumulo.core.client.Scanner)2 TooManyDeletesSelector (org.apache.accumulo.core.client.admin.compaction.TooManyDeletesSelector)2 Key (org.apache.accumulo.core.data.Key)2 Preconditions (com.google.common.base.Preconditions)1 Cache (com.google.common.cache.Cache)1 CacheBuilder (com.google.common.cache.CacheBuilder)1 Collections2 (com.google.common.collect.Collections2)1 IOException (java.io.IOException)1