use of org.apache.accumulo.core.metadata.schema.TabletsMetadata in project accumulo by apache.
the class CompactionFinalizer method processPending.
private void processPending() {
while (!Thread.interrupted()) {
try {
ArrayList<ExternalCompactionFinalState> batch = new ArrayList<>();
batch.add(pendingNotifications.take());
pendingNotifications.drainTo(batch);
List<Future<?>> futures = new ArrayList<>();
List<ExternalCompactionId> statusesToDelete = new ArrayList<>();
Map<KeyExtent, TabletMetadata> tabletsMetadata;
var extents = batch.stream().map(ExternalCompactionFinalState::getExtent).collect(toList());
try (TabletsMetadata tablets = context.getAmple().readTablets().forTablets(extents).fetch(ColumnType.LOCATION, ColumnType.PREV_ROW, ColumnType.ECOMP).build()) {
tabletsMetadata = tablets.stream().collect(toMap(TabletMetadata::getExtent, identity()));
}
for (ExternalCompactionFinalState ecfs : batch) {
TabletMetadata tabletMetadata = tabletsMetadata.get(ecfs.getExtent());
if (tabletMetadata == null || !tabletMetadata.getExternalCompactions().keySet().contains(ecfs.getExternalCompactionId())) {
// there is not per tablet external compaction entry, so delete its final state marker
// from metadata table
LOG.debug("Unable to find tablets external compaction entry, deleting completion entry {}", ecfs);
statusesToDelete.add(ecfs.getExternalCompactionId());
} else if (tabletMetadata.getLocation() != null && tabletMetadata.getLocation().getType() == LocationType.CURRENT) {
futures.add(ntfyExecutor.submit(() -> notifyTserver(tabletMetadata.getLocation(), ecfs)));
} else {
LOG.trace("External compaction {} is completed, but there is no location for tablet. Unable to notify tablet, will try again later.", ecfs);
}
}
if (!statusesToDelete.isEmpty()) {
LOG.info("Deleting unresolvable completed external compactions from metadata table, ids: {}", statusesToDelete);
context.getAmple().deleteExternalCompactionFinalStates(statusesToDelete);
}
for (Future<?> future : futures) {
try {
future.get();
} catch (ExecutionException e) {
LOG.debug("Failed to notify tserver", e);
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (RuntimeException e) {
LOG.warn("Failed to process pending notifications", e);
}
}
}
use of org.apache.accumulo.core.metadata.schema.TabletsMetadata in project accumulo by apache.
the class BulkNewIT method verifyMetadata.
private void verifyMetadata(AccumuloClient client, String tableName, Map<String, Set<String>> expectedHashes) {
Set<String> endRowsSeen = new HashSet<>();
String id = client.tableOperations().tableIdMap().get(tableName);
try (TabletsMetadata tablets = TabletsMetadata.builder(client).forTable(TableId.of(id)).fetch(FILES, LOADED, PREV_ROW).build()) {
for (TabletMetadata tablet : tablets) {
assertTrue(tablet.getLoaded().isEmpty());
Set<String> fileHashes = tablet.getFiles().stream().map(f -> hash(f.getMetaUpdateDelete())).collect(Collectors.toSet());
String endRow = tablet.getEndRow() == null ? "null" : tablet.getEndRow().toString();
assertEquals(expectedHashes.get(endRow), fileHashes);
endRowsSeen.add(endRow);
}
assertEquals(expectedHashes.keySet(), endRowsSeen);
}
}
use of org.apache.accumulo.core.metadata.schema.TabletsMetadata in project accumulo by apache.
the class MetadataIT method testAmpleReadTablets.
@Test
public void testAmpleReadTablets() throws Exception {
try (ClientContext cc = (ClientContext) Accumulo.newClient().from(getClientProps()).build()) {
cc.securityOperations().grantTablePermission(cc.whoami(), MetadataTable.NAME, TablePermission.WRITE);
SortedSet<Text> partitionKeys = new TreeSet<>();
partitionKeys.add(new Text("a"));
partitionKeys.add(new Text("e"));
partitionKeys.add(new Text("j"));
cc.tableOperations().create("t");
cc.tableOperations().addSplits("t", partitionKeys);
Text startRow = new Text("a");
Text endRow = new Text("z");
// Call up Ample from the client context using table "t" and build
TabletsMetadata tablets = cc.getAmple().readTablets().forTable(TableId.of("1")).overlapping(startRow, endRow).fetch(FILES, LOCATION, LAST, PREV_ROW).build();
TabletMetadata tabletMetadata0 = Iterables.get(tablets, 0);
TabletMetadata tabletMetadata1 = Iterables.get(tablets, 1);
String infoTabletId0 = tabletMetadata0.getTableId().toString();
String infoExtent0 = tabletMetadata0.getExtent().toString();
String infoPrevEndRow0 = tabletMetadata0.getPrevEndRow().toString();
String infoEndRow0 = tabletMetadata0.getEndRow().toString();
String infoTabletId1 = tabletMetadata1.getTableId().toString();
String infoExtent1 = tabletMetadata1.getExtent().toString();
String infoPrevEndRow1 = tabletMetadata1.getPrevEndRow().toString();
String infoEndRow1 = tabletMetadata1.getEndRow().toString();
String testInfoTableId = "1";
String testInfoKeyExtent0 = "1;e;a";
String testInfoKeyExtent1 = "1;j;e";
String testInfoPrevEndRow0 = "a";
String testInfoPrevEndRow1 = "e";
String testInfoEndRow0 = "e";
String testInfoEndRow1 = "j";
assertEquals(infoTabletId0, testInfoTableId);
assertEquals(infoTabletId1, testInfoTableId);
assertEquals(infoExtent0, testInfoKeyExtent0);
assertEquals(infoExtent1, testInfoKeyExtent1);
assertEquals(infoPrevEndRow0, testInfoPrevEndRow0);
assertEquals(infoPrevEndRow1, testInfoPrevEndRow1);
assertEquals(infoEndRow0, testInfoEndRow0);
assertEquals(infoEndRow1, testInfoEndRow1);
}
}
use of org.apache.accumulo.core.metadata.schema.TabletsMetadata in project accumulo by apache.
the class Merge method getSizeIterator.
protected Iterator<Size> getSizeIterator(AccumuloClient client, String tablename, Text start, Text end) throws MergeException {
// open up metadata, walk through the tablets.
TableId tableId;
TabletsMetadata tablets;
try {
ClientContext context = (ClientContext) client;
tableId = context.getTableId(tablename);
tablets = TabletsMetadata.builder(context).scanMetadataTable().overRange(new KeyExtent(tableId, end, start).toMetaRange()).fetch(FILES, PREV_ROW).build();
} catch (Exception e) {
throw new MergeException(e);
}
return tablets.stream().map(tm -> {
long size = tm.getFilesMap().values().stream().mapToLong(DataFileValue::getSize).sum();
return new Size(tm.getExtent(), size);
}).iterator();
}
use of org.apache.accumulo.core.metadata.schema.TabletsMetadata in project accumulo by apache.
the class ListVolumesUsed method listTable.
private static void listTable(Ample.DataLevel level, ServerContext context) throws Exception {
System.out.println("Listing volumes referenced in " + level + " tablets section");
TreeSet<String> volumes = new TreeSet<>();
try (TabletsMetadata tablets = TabletsMetadata.builder(context).forLevel(level).fetch(TabletMetadata.ColumnType.FILES, TabletMetadata.ColumnType.LOGS).build()) {
for (TabletMetadata tabletMetadata : tablets) {
tabletMetadata.getFiles().forEach(file -> volumes.add(getTableURI(file.getPathStr())));
tabletMetadata.getLogs().forEach(le -> getLogURIs(volumes, le));
}
}
for (String volume : volumes) {
System.out.println("\tVolume : " + volume);
}
System.out.println("Listing volumes referenced in " + level + " deletes section (volume replacement occurs at deletion time)");
volumes.clear();
Iterator<String> delPaths = context.getAmple().getGcCandidates(level);
while (delPaths.hasNext()) {
volumes.add(getTableURI(delPaths.next()));
}
for (String volume : volumes) {
System.out.println("\tVolume : " + volume);
}
System.out.println("Listing volumes referenced in " + level + " current logs");
volumes.clear();
WalStateManager wals = new WalStateManager(context);
for (Path path : wals.getAllState().keySet()) {
volumes.add(getLogURI(path.toString()));
}
for (String volume : volumes) {
System.out.println("\tVolume : " + volume);
}
}
Aggregations