use of org.apache.accumulo.core.metadata.schema.ExternalCompactionId in project accumulo by apache.
the class ExternalCompaction_2_IT method testDeleteTableCancelsExternalCompaction.
@Test
public void testDeleteTableCancelsExternalCompaction() throws Exception {
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
getCluster().getClusterControl().startCompactors(ExternalDoNothingCompactor.class, 1, QUEUE5);
String table1 = this.getUniqueNames(1)[0];
try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
createTable(client, table1, "cs5");
// set compaction ratio to 1 so that majc occurs naturally, not user compaction
// user compaction blocks delete
client.tableOperations().setProperty(table1, Property.TABLE_MAJC_RATIO.toString(), "1.0");
// cause multiple rfiles to be created
writeData(client, table1);
writeData(client, table1);
writeData(client, table1);
writeData(client, table1);
TableId tid = getCluster().getServerContext().getTableId(table1);
// Wait for the compaction to start by waiting for 1 external compaction column
Set<ExternalCompactionId> ecids = ExternalCompactionTestUtils.waitForCompactionStartAndReturnEcids(getCluster().getServerContext(), tid);
// Confirm that this ECID shows up in RUNNING set
int matches = ExternalCompactionTestUtils.confirmCompactionRunning(getCluster().getServerContext(), ecids);
assertTrue(matches > 0);
client.tableOperations().delete(table1);
confirmCompactionCompleted(getCluster().getServerContext(), ecids, TCompactionState.CANCELLED);
TabletsMetadata tm = getCluster().getServerContext().getAmple().readTablets().forTable(tid).fetch(ColumnType.ECOMP).build();
assertEquals(0, tm.stream().count());
tm.close();
}
}
use of org.apache.accumulo.core.metadata.schema.ExternalCompactionId in project accumulo by apache.
the class ExternalCompaction_3_IT method testCoordinatorRestartsDuringCompaction.
@Test
public void testCoordinatorRestartsDuringCompaction() throws Exception {
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
getCluster().getClusterControl().startCompactors(ExternalDoNothingCompactor.class, 1, QUEUE2);
String table1 = this.getUniqueNames(1)[0];
try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
createTable(client, table1, "cs2", 2);
writeData(client, table1);
compact(client, table1, 2, QUEUE2, false);
TableId tid = getCluster().getServerContext().getTableId(table1);
// Wait for the compaction to start by waiting for 1 external compaction column
Set<ExternalCompactionId> ecids = waitForCompactionStartAndReturnEcids(getCluster().getServerContext(), tid);
// Stop the Coordinator
getCluster().getClusterControl().stop(ServerType.COMPACTION_COORDINATOR);
// Restart the coordinator while the compaction is running
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
// Confirm compaction is still running
int matches = 0;
while (matches == 0) {
TExternalCompactionList running = getRunningCompactions(getCluster().getServerContext());
if (running.getCompactions() != null) {
for (ExternalCompactionId ecid : ecids) {
TExternalCompaction tec = running.getCompactions().get(ecid.canonical());
if (tec != null && tec.getUpdates() != null && !tec.getUpdates().isEmpty()) {
matches++;
assertEquals(TCompactionState.IN_PROGRESS, getLastState(tec));
}
}
}
UtilWaitThread.sleep(250);
}
assertTrue(matches > 0);
// We need to cancel the compaction or delete the table here because we initiate a user
// compaction above in the test. Even though the external compaction was cancelled
// because we split the table, FaTE will continue to queue up a compaction
client.tableOperations().cancelCompaction(table1);
}
}
use of org.apache.accumulo.core.metadata.schema.ExternalCompactionId in project accumulo by apache.
the class ExternalCompaction_3_IT method testMergeCancelsExternalCompaction.
@Test
public void testMergeCancelsExternalCompaction() throws Exception {
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
getCluster().getClusterControl().startCompactors(ExternalDoNothingCompactor.class, 1, QUEUE1);
String table1 = this.getUniqueNames(1)[0];
try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
createTable(client, table1, "cs1", 2);
// set compaction ratio to 1 so that majc occurs naturally, not user compaction
// user compaction blocks merge
client.tableOperations().setProperty(table1, Property.TABLE_MAJC_RATIO.toString(), "1.0");
// cause multiple rfiles to be created
writeData(client, table1);
writeData(client, table1);
writeData(client, table1);
writeData(client, table1);
TableId tid = getCluster().getServerContext().getTableId(table1);
// Wait for the compaction to start by waiting for 1 external compaction column
Set<ExternalCompactionId> ecids = waitForCompactionStartAndReturnEcids(getCluster().getServerContext(), tid);
var md = new ArrayList<TabletMetadata>();
try (TabletsMetadata tm = getCluster().getServerContext().getAmple().readTablets().forTable(tid).fetch(ColumnType.PREV_ROW).build()) {
tm.forEach(t -> md.add(t));
assertEquals(2, md.size());
}
// Merge - blocking operation
Text start = md.get(0).getPrevEndRow();
Text end = md.get(1).getEndRow();
client.tableOperations().merge(table1, start, end);
confirmCompactionCompleted(getCluster().getServerContext(), ecids, TCompactionState.CANCELLED);
// ensure compaction ids were deleted by merge operation from metadata table
try (TabletsMetadata tm = getCluster().getServerContext().getAmple().readTablets().forTable(tid).fetch(ColumnType.ECOMP).build()) {
Set<ExternalCompactionId> ecids2 = tm.stream().flatMap(t -> t.getExternalCompactions().keySet().stream()).collect(Collectors.toSet());
// keep checking until test times out
while (!Collections.disjoint(ecids, ecids2)) {
UtilWaitThread.sleep(25);
ecids2 = tm.stream().flatMap(t -> t.getExternalCompactions().keySet().stream()).collect(Collectors.toSet());
}
}
// We need to cancel the compaction or delete the table here because we initiate a user
// compaction above in the test. Even though the external compaction was cancelled
// because we split the table, FaTE will continue to queue up a compaction
client.tableOperations().delete(table1);
}
}
use of org.apache.accumulo.core.metadata.schema.ExternalCompactionId in project accumulo by apache.
the class CompactionFinalizer method processPending.
private void processPending() {
while (!Thread.interrupted()) {
try {
ArrayList<ExternalCompactionFinalState> batch = new ArrayList<>();
batch.add(pendingNotifications.take());
pendingNotifications.drainTo(batch);
List<Future<?>> futures = new ArrayList<>();
List<ExternalCompactionId> statusesToDelete = new ArrayList<>();
Map<KeyExtent, TabletMetadata> tabletsMetadata;
var extents = batch.stream().map(ExternalCompactionFinalState::getExtent).collect(toList());
try (TabletsMetadata tablets = context.getAmple().readTablets().forTablets(extents).fetch(ColumnType.LOCATION, ColumnType.PREV_ROW, ColumnType.ECOMP).build()) {
tabletsMetadata = tablets.stream().collect(toMap(TabletMetadata::getExtent, identity()));
}
for (ExternalCompactionFinalState ecfs : batch) {
TabletMetadata tabletMetadata = tabletsMetadata.get(ecfs.getExtent());
if (tabletMetadata == null || !tabletMetadata.getExternalCompactions().keySet().contains(ecfs.getExternalCompactionId())) {
// there is not per tablet external compaction entry, so delete its final state marker
// from metadata table
LOG.debug("Unable to find tablets external compaction entry, deleting completion entry {}", ecfs);
statusesToDelete.add(ecfs.getExternalCompactionId());
} else if (tabletMetadata.getLocation() != null && tabletMetadata.getLocation().getType() == LocationType.CURRENT) {
futures.add(ntfyExecutor.submit(() -> notifyTserver(tabletMetadata.getLocation(), ecfs)));
} else {
LOG.trace("External compaction {} is completed, but there is no location for tablet. Unable to notify tablet, will try again later.", ecfs);
}
}
if (!statusesToDelete.isEmpty()) {
LOG.info("Deleting unresolvable completed external compactions from metadata table, ids: {}", statusesToDelete);
context.getAmple().deleteExternalCompactionFinalStates(statusesToDelete);
}
for (Future<?> future : futures) {
try {
future.get();
} catch (ExecutionException e) {
LOG.debug("Failed to notify tserver", e);
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (RuntimeException e) {
LOG.warn("Failed to process pending notifications", e);
}
}
}
use of org.apache.accumulo.core.metadata.schema.ExternalCompactionId in project accumulo by apache.
the class ThriftClientHandler method reserveCompactionJob.
@Override
public TExternalCompactionJob reserveCompactionJob(TInfo tinfo, TCredentials credentials, String queueName, long priority, String compactor, String externalCompactionId) throws ThriftSecurityException, TException {
if (!security.canPerformSystemActions(credentials)) {
throw new AccumuloSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED).asThriftException();
}
ExternalCompactionId eci = ExternalCompactionId.of(externalCompactionId);
var extCompaction = server.getCompactionManager().reserveExternalCompaction(queueName, priority, compactor, eci);
if (extCompaction != null) {
return extCompaction.toThrift();
}
return new TExternalCompactionJob();
}
Aggregations