use of org.apache.accumulo.core.compaction.thrift.TExternalCompactionList in project accumulo by apache.
the class ExternalCompaction_2_IT method testExternalCompactionsSucceedsRunWithTableOffline.
@Test
public void testExternalCompactionsSucceedsRunWithTableOffline() throws Exception {
getCluster().getClusterControl().stop(ServerType.COMPACTION_COORDINATOR);
getCluster().getClusterControl().stop(ServerType.COMPACTOR);
String table1 = this.getUniqueNames(1)[0];
try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
createTable(client, table1, "cs2");
// set compaction ratio to 1 so that majc occurs naturally, not user compaction
// user compaction blocks merge
client.tableOperations().setProperty(table1, Property.TABLE_MAJC_RATIO.toString(), "1.0");
// cause multiple rfiles to be created
writeData(client, table1);
writeData(client, table1);
writeData(client, table1);
writeData(client, table1);
getCluster().getClusterControl().startCoordinator(TestCompactionCoordinatorForOfflineTable.class);
TableId tid = getCluster().getServerContext().getTableId(table1);
// Confirm that no final state is in the metadata table
assertEquals(0, getFinalStatesForTable(getCluster(), tid).count());
// Offline the table when the compaction starts
final AtomicBoolean succeededInTakingOffline = new AtomicBoolean(false);
Thread t = new Thread(() -> {
try (AccumuloClient client2 = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
TExternalCompactionList metrics2 = getRunningCompactions(getCluster().getServerContext());
while (metrics2.getCompactions() == null) {
metrics2 = getRunningCompactions(getCluster().getServerContext());
if (metrics2.getCompactions() == null) {
UtilWaitThread.sleep(50);
}
}
LOG.info("Taking table offline");
client2.tableOperations().offline(table1, false);
succeededInTakingOffline.set(true);
} catch (Exception e) {
LOG.error("Error: ", e);
}
});
t.start();
// Start the compactor
getCluster().getClusterControl().startCompactors(Compactor.class, 1, QUEUE2);
// Wait for the compaction to start by waiting for 1 external compaction column
Set<ExternalCompactionId> ecids = ExternalCompactionTestUtils.waitForCompactionStartAndReturnEcids(getCluster().getServerContext(), tid);
// Confirm that this ECID shows up in RUNNING set
int matches = ExternalCompactionTestUtils.confirmCompactionRunning(getCluster().getServerContext(), ecids);
assertTrue(matches > 0);
t.join();
if (!succeededInTakingOffline.get()) {
fail("Failed to offline table");
}
confirmCompactionCompleted(getCluster().getServerContext(), ecids, TCompactionState.SUCCEEDED);
// Confirm that final state is in the metadata table
assertEquals(1, getFinalStatesForTable(getCluster(), tid).count());
// Online the table
client.tableOperations().online(table1);
// wait for compaction to be committed by tserver or test timeout
long finalStateCount = getFinalStatesForTable(getCluster(), tid).count();
while (finalStateCount > 0) {
finalStateCount = getFinalStatesForTable(getCluster(), tid).count();
if (finalStateCount > 0) {
UtilWaitThread.sleep(50);
}
}
// We need to cancel the compaction or delete the table here because we initiate a user
// compaction above in the test. Even though the external compaction was cancelled
// because we split the table, FaTE will continue to queue up a compaction
client.tableOperations().delete(table1);
getCluster().getClusterControl().stop(ServerType.COMPACTION_COORDINATOR);
getCluster().getClusterControl().stop(ServerType.COMPACTOR);
}
}
Aggregations