use of org.apache.accumulo.core.clientImpl.BatchWriterImpl in project accumulo by apache.
the class MetadataTableUtil method deleteTable.
public static void deleteTable(TableId tableId, boolean insertDeletes, ServerContext context, ServiceLock lock) throws AccumuloException {
try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000L, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
// scan metadata for our table and delete everything we find
Mutation m = null;
Ample ample = context.getAmple();
ms.setRange(new KeyExtent(tableId, null, null).toMetaRange());
// insert deletes before deleting data from metadata... this makes the code fault tolerant
if (insertDeletes) {
ms.fetchColumnFamily(DataFileColumnFamily.NAME);
ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
String ref = TabletFileUtil.validate(key.getColumnQualifierData().toString());
bw.addMutation(ample.createDeleteMutation(ref));
}
if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
String uri = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(tableId, cell.getValue().toString());
bw.addMutation(ample.createDeleteMutation(uri));
}
}
bw.flush();
ms.clearColumns();
}
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (m == null) {
m = new Mutation(key.getRow());
if (lock != null)
putLockID(context, lock, m);
}
if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
bw.addMutation(m);
m = new Mutation(key.getRow());
if (lock != null)
putLockID(context, lock, m);
}
m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
}
if (m != null)
bw.addMutation(m);
}
}
Aggregations