use of org.apache.accumulo.core.client.admin.PluginConfig in project accumulo by apache.
the class ExternalCompaction_1_IT method testPartialCompaction.
@Test
public void testPartialCompaction() throws Exception {
String tableName = getUniqueNames(1)[0];
try (final AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
getCluster().getClusterControl().startCompactors(Compactor.class, 1, QUEUE8);
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
createTable(client, tableName, "cs8");
writeData(client, tableName);
// This should create an A file
compact(client, tableName, 17, QUEUE8, true);
verify(client, tableName, 17);
try (BatchWriter bw = client.createBatchWriter(tableName)) {
for (int i = MAX_DATA; i < MAX_DATA * 2; i++) {
Mutation m = new Mutation(row(i));
m.put("", "", "" + i);
bw.addMutation(m);
}
}
// this should create an F file
client.tableOperations().flush(tableName);
// run a compaction that only compacts F files
IteratorSetting iterSetting = new IteratorSetting(100, TestFilter.class);
// make sure iterator options make it to compactor process
iterSetting.addOption("expectedQ", QUEUE8);
// compact F file w/ different modulus and user pmodulus option for partial compaction
iterSetting.addOption("pmodulus", 19 + "");
CompactionConfig config = new CompactionConfig().setIterators(List.of(iterSetting)).setWait(true).setSelector(new PluginConfig(FSelector.class.getName()));
client.tableOperations().compact(tableName, config);
try (Scanner scanner = client.createScanner(tableName)) {
int count = 0;
for (Entry<Key, Value> entry : scanner) {
int v = Integer.parseInt(entry.getValue().toString());
int modulus = v < MAX_DATA ? 17 : 19;
assertTrue(String.format("%s %s %d != 0", entry.getValue(), "%", modulus), Integer.parseInt(entry.getValue().toString()) % modulus == 0);
count++;
}
int expectedCount = 0;
for (int i = 0; i < MAX_DATA * 2; i++) {
int modulus = i < MAX_DATA ? 17 : 19;
if (i % modulus == 0) {
expectedCount++;
}
}
assertEquals(expectedCount, count);
}
// We need to cancel the compaction or delete the table here because we initiate a user
// compaction above in the test. Even though the external compaction was cancelled
// because we split the table, FaTE will continue to queue up a compaction
client.tableOperations().cancelCompaction(tableName);
}
}
use of org.apache.accumulo.core.client.admin.PluginConfig in project accumulo by apache.
the class ExternalCompaction_1_IT method testConfigurer.
@Test
public void testConfigurer() throws Exception {
String tableName = this.getUniqueNames(1)[0];
getCluster().getClusterControl().startCompactors(Compactor.class, 1, QUEUE5);
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
Map<String, String> props = Map.of("table.compaction.dispatcher", SimpleCompactionDispatcher.class.getName(), "table.compaction.dispatcher.opts.service", "cs5", Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
NewTableConfiguration ntc = new NewTableConfiguration().setProperties(props);
client.tableOperations().create(tableName, ntc);
byte[] data = new byte[100000];
Arrays.fill(data, (byte) 65);
try (var writer = client.createBatchWriter(tableName)) {
for (int row = 0; row < 10; row++) {
Mutation m = new Mutation(row + "");
m.at().family("big").qualifier("stuff").put(data);
writer.addMutation(m);
}
}
client.tableOperations().flush(tableName, null, null, true);
// without compression, expect file to be large
long sizes = CompactionExecutorIT.getFileSizes(client, tableName);
assertTrue("Unexpected files sizes : " + sizes, sizes > data.length * 10 && sizes < data.length * 11);
client.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setConfigurer(new PluginConfig(CompressionConfigurer.class.getName(), Map.of(CompressionConfigurer.LARGE_FILE_COMPRESSION_TYPE, "gz", CompressionConfigurer.LARGE_FILE_COMPRESSION_THRESHOLD, data.length + ""))));
// after compacting with compression, expect small file
sizes = CompactionExecutorIT.getFileSizes(client, tableName);
assertTrue("Unexpected files sizes: data: " + data.length + ", file:" + sizes, sizes < data.length);
client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
// after compacting without compression, expect big files again
sizes = CompactionExecutorIT.getFileSizes(client, tableName);
assertTrue("Unexpected files sizes : " + sizes, sizes > data.length * 10 && sizes < data.length * 11);
// We need to cancel the compaction or delete the table here because we initiate a user
// compaction above in the test. Even though the external compaction was cancelled
// because we split the table, FaTE will continue to queue up a compaction
client.tableOperations().cancelCompaction(tableName);
}
}
use of org.apache.accumulo.core.client.admin.PluginConfig in project accumulo by apache.
the class CompactionExecutorIT method testConfigurer.
@Test
public void testConfigurer() throws Exception {
String tableName = "tcc";
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
var ntc = new NewTableConfiguration().setProperties(Map.of(Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none"));
client.tableOperations().create(tableName, ntc);
byte[] data = new byte[100000];
Arrays.fill(data, (byte) 65);
try (var writer = client.createBatchWriter(tableName)) {
for (int row = 0; row < 10; row++) {
Mutation m = new Mutation(row + "");
m.at().family("big").qualifier("stuff").put(data);
writer.addMutation(m);
}
}
client.tableOperations().flush(tableName, null, null, true);
// without compression, expect file to be large
long sizes = getFileSizes(client, tableName);
assertTrue("Unexpected files sizes : " + sizes, sizes > data.length * 10 && sizes < data.length * 11);
client.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setConfigurer(new PluginConfig(CompressionConfigurer.class.getName(), Map.of(CompressionConfigurer.LARGE_FILE_COMPRESSION_TYPE, "gz", CompressionConfigurer.LARGE_FILE_COMPRESSION_THRESHOLD, data.length + ""))));
// after compacting with compression, expect small file
sizes = getFileSizes(client, tableName);
assertTrue("Unexpected files sizes : " + sizes, sizes < data.length);
client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
// after compacting without compression, expect big files again
sizes = getFileSizes(client, tableName);
assertTrue("Unexpected files sizes : " + sizes, sizes > data.length * 10 && sizes < data.length * 11);
}
}
use of org.apache.accumulo.core.client.admin.PluginConfig in project accumulo by apache.
the class CompactionIT method testPartialCompaction.
@Test
public void testPartialCompaction() throws Exception {
String tableName = getUniqueNames(1)[0];
try (final AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
client.tableOperations().create(tableName);
// Insert MAX_DATA rows
try (BatchWriter bw = client.createBatchWriter(tableName)) {
for (int i = 0; i < MAX_DATA; i++) {
Mutation m = new Mutation(String.format("r:%04d", i));
m.put("", "", "" + i);
bw.addMutation(m);
}
}
client.tableOperations().flush(tableName);
IteratorSetting iterSetting = new IteratorSetting(100, TestFilter.class);
// make sure iterator options make it to compactor process
iterSetting.addOption("modulus", 17 + "");
CompactionConfig config = new CompactionConfig().setIterators(List.of(iterSetting)).setWait(true);
client.tableOperations().compact(tableName, config);
// Insert 2 * MAX_DATA rows
try (BatchWriter bw = client.createBatchWriter(tableName)) {
for (int i = MAX_DATA; i < MAX_DATA * 2; i++) {
Mutation m = new Mutation(String.format("r:%04d", i));
m.put("", "", "" + i);
bw.addMutation(m);
}
}
// this should create an F file
client.tableOperations().flush(tableName);
// run a compaction that only compacts F files
iterSetting = new IteratorSetting(100, TestFilter.class);
// compact F file w/ different modulus and user pmodulus option for partial compaction
iterSetting.addOption("pmodulus", 19 + "");
config = new CompactionConfig().setIterators(List.of(iterSetting)).setWait(true).setSelector(new PluginConfig(FSelector.class.getName()));
client.tableOperations().compact(tableName, config);
try (Scanner scanner = client.createScanner(tableName)) {
int count = 0;
for (Entry<Key, Value> entry : scanner) {
int v = Integer.parseInt(entry.getValue().toString());
int modulus = v < MAX_DATA ? 17 : 19;
assertTrue(String.format("%s %s %d != 0", entry.getValue(), "%", modulus), Integer.parseInt(entry.getValue().toString()) % modulus == 0);
count++;
}
// Verify
int expectedCount = 0;
for (int i = 0; i < MAX_DATA * 2; i++) {
int modulus = i < MAX_DATA ? 17 : 19;
if (i % modulus == 0) {
expectedCount++;
}
}
assertEquals(expectedCount, count);
}
}
}
Aggregations