use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.
the class Tablet method getCompactionID.
public Pair<Long, CompactionConfig> getCompactionID() throws NoNodeException {
try {
String zTablePath = Constants.ZROOT + "/" + tabletServer.getInstanceID() + Constants.ZTABLES + "/" + extent.tableId() + Constants.ZTABLE_COMPACT_ID;
String[] tokens = new String(context.getZooReaderWriter().getData(zTablePath), UTF_8).split(",");
long compactID = Long.parseLong(tokens[0]);
CompactionConfig overlappingConfig = null;
if (tokens.length > 1) {
Hex hex = new Hex();
ByteArrayInputStream bais = new ByteArrayInputStream(hex.decode(tokens[1].split("=")[1].getBytes(UTF_8)));
DataInputStream dis = new DataInputStream(bais);
var compactionConfig = UserCompactionUtils.decodeCompactionConfig(dis);
KeyExtent ke = new KeyExtent(extent.tableId(), compactionConfig.getEndRow(), compactionConfig.getStartRow());
if (ke.overlaps(extent)) {
overlappingConfig = compactionConfig;
}
}
if (overlappingConfig == null)
// no config present, set to default
overlappingConfig = new CompactionConfig();
return new Pair<>(compactID, overlappingConfig);
} catch (InterruptedException | DecoderException | NumberFormatException e) {
throw new RuntimeException("Exception on " + extent + " getting compaction ID", e);
} catch (KeeperException ke) {
if (ke instanceof NoNodeException) {
throw (NoNodeException) ke;
} else {
throw new RuntimeException("Exception on " + extent + " getting compaction ID", ke);
}
}
}
use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.
the class SampleIT method testSampleNotPresent.
@Test
public void testSampleNotPresent() throws Exception {
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
String tableName = getUniqueNames(1)[0];
String clone = tableName + "_clone";
client.tableOperations().create(tableName);
TreeMap<Key, Value> expected = new TreeMap<>();
try (BatchWriter bw = client.createBatchWriter(tableName)) {
writeData(bw, SC1, expected);
}
Scanner scanner = client.createScanner(tableName);
Scanner isoScanner = new IsolatedScanner(client.createScanner(tableName));
isoScanner.setBatchSize(10);
Scanner csiScanner = new ClientSideIteratorScanner(client.createScanner(tableName));
try (BatchScanner bScanner = client.createBatchScanner(tableName)) {
bScanner.setRanges(Arrays.asList(new Range()));
// ensure sample not present exception occurs when sampling is not configured
assertSampleNotPresent(SC1, scanner, isoScanner, bScanner, csiScanner);
client.tableOperations().flush(tableName, null, null, true);
Scanner oScanner = newOfflineScanner(client, tableName, clone, SC1);
assertSampleNotPresent(SC1, scanner, isoScanner, bScanner, csiScanner, oScanner);
// configure sampling, however there exist an rfile w/o sample data... so should still see
// sample not present exception
updateSamplingConfig(client, tableName, SC1);
// create clone with new config
oScanner = newOfflineScanner(client, tableName, clone, SC1);
assertSampleNotPresent(SC1, scanner, isoScanner, bScanner, csiScanner, oScanner);
// create rfile with sample data present
client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
// should be able to scan sample now
oScanner = newOfflineScanner(client, tableName, clone, SC1);
setSamplerConfig(SC1, scanner, csiScanner, isoScanner, bScanner, oScanner);
check(expected, scanner, isoScanner, bScanner, csiScanner, oScanner);
// change sampling config
updateSamplingConfig(client, tableName, SC2);
// create clone with new config
oScanner = newOfflineScanner(client, tableName, clone, SC2);
// rfile should have different sample config than table, and scan should not work
assertSampleNotPresent(SC2, scanner, isoScanner, bScanner, csiScanner, oScanner);
// create rfile that has same sample data as table config
client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
// should be able to scan sample now
updateExpected(SC2, expected);
oScanner = newOfflineScanner(client, tableName, clone, SC2);
setSamplerConfig(SC2, scanner, csiScanner, isoScanner, bScanner, oScanner);
check(expected, scanner, isoScanner, bScanner, csiScanner, oScanner);
}
}
}
use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.
the class VolumeIT method testRemoveVolumes.
@Test
public void testRemoveVolumes() throws Exception {
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
String[] tableNames = getUniqueNames(2);
verifyVolumesUsed(client, tableNames[0], false, v1, v2);
assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
cluster.stop();
updateConfig(config -> config.setProperty(Property.INSTANCE_VOLUMES.getKey(), v2.toString()));
// start cluster and verify that volume was decommissioned
cluster.start();
client.tableOperations().compact(tableNames[0], null, null, true, true);
verifyVolumesUsed(client, tableNames[0], true, v2);
client.tableOperations().compact(RootTable.NAME, new CompactionConfig().setWait(true));
// check that root tablet is not on volume 1
int count = 0;
for (StoredTabletFile file : ((ClientContext) client).getAmple().readTablet(RootTable.EXTENT).getFiles()) {
assertTrue(file.getMetaUpdateDelete().startsWith(v2.toString()));
count++;
}
assertTrue(count > 0);
client.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(), new HashSet<>());
client.tableOperations().flush(MetadataTable.NAME, null, null, true);
client.tableOperations().flush(RootTable.NAME, null, null, true);
verifyVolumesUsed(client, tableNames[0], true, v2);
verifyVolumesUsed(client, tableNames[1], true, v2);
}
}
use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.
the class IteratorEnvIT method testCompact.
public void testCompact(String tableName, Class<? extends SortedKeyValueIterator<Key, Value>> iteratorClass) throws Exception {
writeData(tableName);
IteratorSetting cfg = new IteratorSetting(1, iteratorClass);
cfg.addOption("expected.table.id", client.tableOperations().tableIdMap().get(tableName));
CompactionConfig config = new CompactionConfig();
config.setIterators(Collections.singletonList(cfg));
client.tableOperations().compact(tableName, config);
}
use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.
the class UserCompactionStrategyIT method testDropA.
@Test
public void testDropA() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
writeFlush(c, tableName, "a");
writeFlush(c, tableName, "b");
// create a file that starts with A containing rows 'a' and 'b'
c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
writeFlush(c, tableName, "c");
writeFlush(c, tableName, "d");
// drop files that start with A
CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
csConfig.setOptions(ImmutableMap.of("dropPrefix", "A", "inputPrefix", "F"));
c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
Assert.assertEquals(ImmutableSet.of("c", "d"), getRows(c, tableName));
// this compaction should not drop files starting with A
c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
Assert.assertEquals(ImmutableSet.of("c", "d"), getRows(c, tableName));
}
Aggregations