use of org.apache.accumulo.core.metadata.schema.Ample in project accumulo by apache.
the class TabletGroupWatcher method deleteTablets.
private void deleteTablets(MergeInfo info) throws AccumuloException {
KeyExtent extent = info.getExtent();
String targetSystemTable = extent.isMeta() ? RootTable.NAME : MetadataTable.NAME;
Manager.log.debug("Deleting tablets for {}", extent);
MetadataTime metadataTime = null;
KeyExtent followingTablet = null;
if (extent.endRow() != null) {
Key nextExtent = new Key(extent.endRow()).followingKey(PartialKey.ROW);
followingTablet = getHighTablet(new KeyExtent(extent.tableId(), nextExtent.getRow(), extent.endRow()));
Manager.log.debug("Found following tablet {}", followingTablet);
}
try {
AccumuloClient client = manager.getContext();
ServerContext context = manager.getContext();
Ample ample = context.getAmple();
Text start = extent.prevEndRow();
if (start == null) {
start = new Text();
}
Manager.log.debug("Making file deletion entries for {}", extent);
Range deleteRange = new Range(TabletsSection.encodeRow(extent.tableId(), start), false, TabletsSection.encodeRow(extent.tableId(), extent.endRow()), true);
Scanner scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY);
scanner.setRange(deleteRange);
ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
ServerColumnFamily.TIME_COLUMN.fetch(scanner);
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner.fetchColumnFamily(CurrentLocationColumnFamily.NAME);
Set<String> datafiles = new TreeSet<>();
for (Entry<Key, Value> entry : scanner) {
Key key = entry.getKey();
if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
datafiles.add(TabletFileUtil.validate(key.getColumnQualifierData().toString()));
if (datafiles.size() > 1000) {
ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
datafiles.clear();
}
} else if (ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
metadataTime = MetadataTime.parse(entry.getValue().toString());
} else if (key.compareColumnFamily(CurrentLocationColumnFamily.NAME) == 0) {
throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
} else if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
String path = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(extent.tableId(), entry.getValue().toString());
datafiles.add(path);
if (datafiles.size() > 1000) {
ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
datafiles.clear();
}
}
}
ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
BatchWriter bw = client.createBatchWriter(targetSystemTable);
try {
deleteTablets(info, deleteRange, bw, client);
} finally {
bw.close();
}
if (followingTablet != null) {
Manager.log.debug("Updating prevRow of {} to {}", followingTablet, extent.prevEndRow());
bw = client.createBatchWriter(targetSystemTable);
try {
Mutation m = new Mutation(followingTablet.toMetaRow());
TabletColumnFamily.PREV_ROW_COLUMN.put(m, TabletColumnFamily.encodePrevEndRow(extent.prevEndRow()));
ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
bw.addMutation(m);
bw.flush();
} finally {
bw.close();
}
} else {
// Recreate the default tablet to hold the end of the table
MetadataTableUtil.addTablet(new KeyExtent(extent.tableId(), null, extent.prevEndRow()), ServerColumnFamily.DEFAULT_TABLET_DIR_NAME, manager.getContext(), metadataTime.getType(), manager.managerLock);
}
} catch (RuntimeException | TableNotFoundException ex) {
throw new AccumuloException(ex);
}
}
use of org.apache.accumulo.core.metadata.schema.Ample in project accumulo by apache.
the class Upgrader9to10 method upgradeFileDeletes.
/**
* Improve how Delete markers are stored. For more information see:
* <a href="https://github.com/apache/accumulo/issues/1043">#1043</a>
* <a href="https://github.com/apache/accumulo/pull/1366">#1366</a>
*/
public void upgradeFileDeletes(ServerContext context, Ample.DataLevel level) {
String tableName = level.metaTable();
AccumuloClient c = context;
Ample ample = context.getAmple();
// find all deletes
try (BatchWriter writer = c.createBatchWriter(tableName)) {
log.info("looking for candidates in table {}", tableName);
Iterator<String> oldCandidates = getOldCandidates(context, tableName);
String upgradeProp = context.getConfiguration().get(Property.INSTANCE_VOLUMES_UPGRADE_RELATIVE);
while (oldCandidates.hasNext()) {
List<String> deletes = readCandidatesInBatch(oldCandidates);
log.info("found {} deletes to upgrade", deletes.size());
for (String olddelete : deletes) {
// create new formatted delete
log.trace("upgrading delete entry for {}", olddelete);
Path absolutePath = resolveRelativeDelete(olddelete, upgradeProp);
String updatedDel = switchToAllVolumes(absolutePath);
writer.addMutation(ample.createDeleteMutation(updatedDel));
}
writer.flush();
// if nothing thrown then we're good so mark all deleted
log.info("upgrade processing completed so delete old entries");
for (String olddelete : deletes) {
log.trace("deleting old entry for {}", olddelete);
writer.addMutation(deleteOldDeleteMutation(olddelete));
}
writer.flush();
}
} catch (TableNotFoundException | MutationsRejectedException e) {
throw new RuntimeException(e);
}
}
use of org.apache.accumulo.core.metadata.schema.Ample in project accumulo by apache.
the class MetadataTableUtil method deleteTable.
public static void deleteTable(TableId tableId, boolean insertDeletes, ServerContext context, ServiceLock lock) throws AccumuloException {
try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000L, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
// scan metadata for our table and delete everything we find
Mutation m = null;
Ample ample = context.getAmple();
ms.setRange(new KeyExtent(tableId, null, null).toMetaRange());
// insert deletes before deleting data from metadata... this makes the code fault tolerant
if (insertDeletes) {
ms.fetchColumnFamily(DataFileColumnFamily.NAME);
ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
String ref = TabletFileUtil.validate(key.getColumnQualifierData().toString());
bw.addMutation(ample.createDeleteMutation(ref));
}
if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
String uri = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(tableId, cell.getValue().toString());
bw.addMutation(ample.createDeleteMutation(uri));
}
}
bw.flush();
ms.clearColumns();
}
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (m == null) {
m = new Mutation(key.getRow());
if (lock != null)
putLockID(context, lock, m);
}
if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
bw.addMutation(m);
m = new Mutation(key.getRow());
if (lock != null)
putLockID(context, lock, m);
}
m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
}
if (m != null)
bw.addMutation(m);
}
}
use of org.apache.accumulo.core.metadata.schema.Ample in project accumulo by apache.
the class GarbageCollectorIT method addEntries.
private void addEntries(AccumuloClient client) throws Exception {
Ample ample = getServerContext().getAmple();
client.securityOperations().grantTablePermission(client.whoami(), MetadataTable.NAME, TablePermission.WRITE);
try (BatchWriter bw = client.createBatchWriter(MetadataTable.NAME)) {
for (int i = 0; i < 100000; ++i) {
String longpath = "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee" + "ffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj";
Mutation delFlag = ample.createDeleteMutation(String.format("file:/%020d/%s", i, longpath));
bw.addMutation(delFlag);
}
}
}
Aggregations