use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class RenameNamespaceCommand method execute.
@Override
public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException, NamespaceNotFoundException, NamespaceExistsException {
String old = cl.getArgs()[0];
String newer = cl.getArgs()[1];
boolean resetContext = false;
TableId currentTableId = null;
if (shellState.getTableName() != null && !shellState.getTableName().isEmpty()) {
NamespaceId namespaceId = Namespaces.getNamespaceId(shellState.getContext(), old);
List<TableId> tableIds = Namespaces.getTableIds(shellState.getContext(), namespaceId);
currentTableId = shellState.getContext().getTableId(shellState.getTableName());
resetContext = tableIds.contains(currentTableId);
}
shellState.getAccumuloClient().namespaceOperations().rename(old, newer);
if (resetContext) {
shellState.setTableName(shellState.getContext().getTableName(currentTableId));
}
return 0;
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class ExternalCompaction_2_IT method testSplitCancelsExternalCompaction.
@Test
public void testSplitCancelsExternalCompaction() throws Exception {
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
getCluster().getClusterControl().startCompactors(ExternalDoNothingCompactor.class, 1, QUEUE1);
String table1 = this.getUniqueNames(1)[0];
try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
createTable(client, table1, "cs1");
TableId tid = getCluster().getServerContext().getTableId(table1);
writeData(client, table1);
compact(client, table1, 2, QUEUE1, false);
// Wait for the compaction to start by waiting for 1 external compaction column
Set<ExternalCompactionId> ecids = ExternalCompactionTestUtils.waitForCompactionStartAndReturnEcids(getCluster().getServerContext(), tid);
// Confirm that this ECID shows up in RUNNING set
int matches = ExternalCompactionTestUtils.confirmCompactionRunning(getCluster().getServerContext(), ecids);
assertTrue(matches > 0);
// ExternalDoNothingCompactor will not compact, it will wait, split the table.
SortedSet<Text> splits = new TreeSet<>();
int jump = MAX_DATA / 5;
for (int r = jump; r < MAX_DATA; r += jump) {
splits.add(new Text(row(r)));
}
client.tableOperations().addSplits(table1, splits);
confirmCompactionCompleted(getCluster().getServerContext(), ecids, TCompactionState.CANCELLED);
// ensure compaction ids were deleted by split operation from metadata table
try (TabletsMetadata tm = getCluster().getServerContext().getAmple().readTablets().forTable(tid).fetch(ColumnType.ECOMP).build()) {
Set<ExternalCompactionId> ecids2 = tm.stream().flatMap(t -> t.getExternalCompactions().keySet().stream()).collect(Collectors.toSet());
assertTrue(Collections.disjoint(ecids, ecids2));
}
// We need to cancel the compaction or delete the table here because we initiate a user
// compaction above in the test. Even though the external compaction was cancelled
// because we split the table, FaTE will continue to queue up a compaction
client.tableOperations().cancelCompaction(table1);
}
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class ExternalCompaction_2_IT method testUserCompactionCancellation.
@Test
public void testUserCompactionCancellation() throws Exception {
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
getCluster().getClusterControl().startCompactors(ExternalDoNothingCompactor.class, 1, QUEUE3);
String table1 = this.getUniqueNames(1)[0];
try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
createTable(client, table1, "cs3");
TableId tid = getCluster().getServerContext().getTableId(table1);
writeData(client, table1);
compact(client, table1, 2, QUEUE3, false);
// Wait for the compaction to start by waiting for 1 external compaction column
Set<ExternalCompactionId> ecids = ExternalCompactionTestUtils.waitForCompactionStartAndReturnEcids(getCluster().getServerContext(), tid);
// Confirm that this ECID shows up in RUNNING set
int matches = ExternalCompactionTestUtils.confirmCompactionRunning(getCluster().getServerContext(), ecids);
assertTrue(matches > 0);
client.tableOperations().cancelCompaction(table1);
confirmCompactionCompleted(getCluster().getServerContext(), ecids, TCompactionState.CANCELLED);
// We need to cancel the compaction or delete the table here because we initiate a user
// compaction above in the test. Even though the external compaction was cancelled
// because we split the table, FaTE will continue to queue up a compaction
client.tableOperations().cancelCompaction(table1);
}
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class ExternalCompaction_2_IT method testDeleteTableCancelsExternalCompaction.
@Test
public void testDeleteTableCancelsExternalCompaction() throws Exception {
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
getCluster().getClusterControl().startCompactors(ExternalDoNothingCompactor.class, 1, QUEUE5);
String table1 = this.getUniqueNames(1)[0];
try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
createTable(client, table1, "cs5");
// set compaction ratio to 1 so that majc occurs naturally, not user compaction
// user compaction blocks delete
client.tableOperations().setProperty(table1, Property.TABLE_MAJC_RATIO.toString(), "1.0");
// cause multiple rfiles to be created
writeData(client, table1);
writeData(client, table1);
writeData(client, table1);
writeData(client, table1);
TableId tid = getCluster().getServerContext().getTableId(table1);
// Wait for the compaction to start by waiting for 1 external compaction column
Set<ExternalCompactionId> ecids = ExternalCompactionTestUtils.waitForCompactionStartAndReturnEcids(getCluster().getServerContext(), tid);
// Confirm that this ECID shows up in RUNNING set
int matches = ExternalCompactionTestUtils.confirmCompactionRunning(getCluster().getServerContext(), ecids);
assertTrue(matches > 0);
client.tableOperations().delete(table1);
confirmCompactionCompleted(getCluster().getServerContext(), ecids, TCompactionState.CANCELLED);
TabletsMetadata tm = getCluster().getServerContext().getAmple().readTablets().forTable(tid).fetch(ColumnType.ECOMP).build();
assertEquals(0, tm.stream().count());
tm.close();
}
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class ExternalCompaction_3_IT method testCoordinatorRestartsDuringCompaction.
@Test
public void testCoordinatorRestartsDuringCompaction() throws Exception {
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
getCluster().getClusterControl().startCompactors(ExternalDoNothingCompactor.class, 1, QUEUE2);
String table1 = this.getUniqueNames(1)[0];
try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
createTable(client, table1, "cs2", 2);
writeData(client, table1);
compact(client, table1, 2, QUEUE2, false);
TableId tid = getCluster().getServerContext().getTableId(table1);
// Wait for the compaction to start by waiting for 1 external compaction column
Set<ExternalCompactionId> ecids = waitForCompactionStartAndReturnEcids(getCluster().getServerContext(), tid);
// Stop the Coordinator
getCluster().getClusterControl().stop(ServerType.COMPACTION_COORDINATOR);
// Restart the coordinator while the compaction is running
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
// Confirm compaction is still running
int matches = 0;
while (matches == 0) {
TExternalCompactionList running = getRunningCompactions(getCluster().getServerContext());
if (running.getCompactions() != null) {
for (ExternalCompactionId ecid : ecids) {
TExternalCompaction tec = running.getCompactions().get(ecid.canonical());
if (tec != null && tec.getUpdates() != null && !tec.getUpdates().isEmpty()) {
matches++;
assertEquals(TCompactionState.IN_PROGRESS, getLastState(tec));
}
}
}
UtilWaitThread.sleep(250);
}
assertTrue(matches > 0);
// We need to cancel the compaction or delete the table here because we initiate a user
// compaction above in the test. Even though the external compaction was cancelled
// because we split the table, FaTE will continue to queue up a compaction
client.tableOperations().cancelCompaction(table1);
}
}
Aggregations