Search in sources :

Example 6 with TabletsMetadata

use of org.apache.accumulo.core.metadata.schema.TabletsMetadata in project accumulo by apache.

the class CompactionFinalizer method processPending.

private void processPending() {
    while (!Thread.interrupted()) {
        try {
            ArrayList<ExternalCompactionFinalState> batch = new ArrayList<>();
            batch.add(pendingNotifications.take());
            pendingNotifications.drainTo(batch);
            List<Future<?>> futures = new ArrayList<>();
            List<ExternalCompactionId> statusesToDelete = new ArrayList<>();
            Map<KeyExtent, TabletMetadata> tabletsMetadata;
            var extents = batch.stream().map(ExternalCompactionFinalState::getExtent).collect(toList());
            try (TabletsMetadata tablets = context.getAmple().readTablets().forTablets(extents).fetch(ColumnType.LOCATION, ColumnType.PREV_ROW, ColumnType.ECOMP).build()) {
                tabletsMetadata = tablets.stream().collect(toMap(TabletMetadata::getExtent, identity()));
            }
            for (ExternalCompactionFinalState ecfs : batch) {
                TabletMetadata tabletMetadata = tabletsMetadata.get(ecfs.getExtent());
                if (tabletMetadata == null || !tabletMetadata.getExternalCompactions().keySet().contains(ecfs.getExternalCompactionId())) {
                    // there is not per tablet external compaction entry, so delete its final state marker
                    // from metadata table
                    LOG.debug("Unable to find tablets external compaction entry, deleting completion entry {}", ecfs);
                    statusesToDelete.add(ecfs.getExternalCompactionId());
                } else if (tabletMetadata.getLocation() != null && tabletMetadata.getLocation().getType() == LocationType.CURRENT) {
                    futures.add(ntfyExecutor.submit(() -> notifyTserver(tabletMetadata.getLocation(), ecfs)));
                } else {
                    LOG.trace("External compaction {} is completed, but there is no location for tablet.  Unable to notify tablet, will try again later.", ecfs);
                }
            }
            if (!statusesToDelete.isEmpty()) {
                LOG.info("Deleting unresolvable completed external compactions from metadata table, ids: {}", statusesToDelete);
                context.getAmple().deleteExternalCompactionFinalStates(statusesToDelete);
            }
            for (Future<?> future : futures) {
                try {
                    future.get();
                } catch (ExecutionException e) {
                    LOG.debug("Failed to notify tserver", e);
                }
            }
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new RuntimeException(e);
        } catch (RuntimeException e) {
            LOG.warn("Failed to process pending notifications", e);
        }
    }
}
Also used : ExternalCompactionId(org.apache.accumulo.core.metadata.schema.ExternalCompactionId) ArrayList(java.util.ArrayList) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TabletsMetadata(org.apache.accumulo.core.metadata.schema.TabletsMetadata) ExternalCompactionFinalState(org.apache.accumulo.core.metadata.schema.ExternalCompactionFinalState) Future(java.util.concurrent.Future) TabletMetadata(org.apache.accumulo.core.metadata.schema.TabletMetadata) ExecutionException(java.util.concurrent.ExecutionException)

Example 7 with TabletsMetadata

use of org.apache.accumulo.core.metadata.schema.TabletsMetadata in project accumulo by apache.

the class BulkNewIT method verifyMetadata.

private void verifyMetadata(AccumuloClient client, String tableName, Map<String, Set<String>> expectedHashes) {
    Set<String> endRowsSeen = new HashSet<>();
    String id = client.tableOperations().tableIdMap().get(tableName);
    try (TabletsMetadata tablets = TabletsMetadata.builder(client).forTable(TableId.of(id)).fetch(FILES, LOADED, PREV_ROW).build()) {
        for (TabletMetadata tablet : tablets) {
            assertTrue(tablet.getLoaded().isEmpty());
            Set<String> fileHashes = tablet.getFiles().stream().map(f -> hash(f.getMetaUpdateDelete())).collect(Collectors.toSet());
            String endRow = tablet.getEndRow() == null ? "null" : tablet.getEndRow().toString();
            assertEquals(expectedHashes.get(endRow), fileHashes);
            endRowsSeen.add(endRow);
        }
        assertEquals(expectedHashes.keySet(), endRowsSeen);
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Arrays(java.util.Arrays) SortedSet(java.util.SortedSet) FileSystem(org.apache.hadoop.fs.FileSystem) TabletsMetadata(org.apache.accumulo.core.metadata.schema.TabletsMetadata) Text(org.apache.hadoop.io.Text) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FileOperations(org.apache.accumulo.core.file.FileOperations) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) Path(org.apache.hadoop.fs.Path) BigInteger(java.math.BigInteger) Value(org.apache.accumulo.core.data.Value) PREV_ROW(org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.PREV_ROW) Property(org.apache.accumulo.core.conf.Property) LoadPlan(org.apache.accumulo.core.data.LoadPlan) ServerType(org.apache.accumulo.minicluster.ServerType) AfterClass(org.junit.AfterClass) Set(java.util.Set) TimeType(org.apache.accumulo.core.client.admin.TimeType) MiniClusterConfigurationCallback(org.apache.accumulo.harness.MiniClusterConfigurationCallback) RangeType(org.apache.accumulo.core.data.LoadPlan.RangeType) LOADED(org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.LOADED) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) Entry(java.util.Map.Entry) Scanner(org.apache.accumulo.core.client.Scanner) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) BeforeClass(org.junit.BeforeClass) MessageDigest(java.security.MessageDigest) Assert.assertThrows(org.junit.Assert.assertThrows) HashMap(java.util.HashMap) Accumulo(org.apache.accumulo.core.client.Accumulo) TreeSet(java.util.TreeSet) TabletMetadata(org.apache.accumulo.core.metadata.schema.TabletMetadata) MemoryUnit(org.apache.accumulo.minicluster.MemoryUnit) HashSet(java.util.HashSet) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) FileSKVWriter(org.apache.accumulo.core.file.FileSKVWriter) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Key(org.apache.accumulo.core.data.Key) MiniAccumuloConfigImpl(org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl) FILES(org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.FILES) Before(org.junit.Before) Iterator(java.util.Iterator) Files(java.nio.file.Files) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) Authorizations(org.apache.accumulo.core.security.Authorizations) CryptoServiceFactory(org.apache.accumulo.core.crypto.CryptoServiceFactory) AccumuloException(org.apache.accumulo.core.client.AccumuloException) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) ExecutionException(java.util.concurrent.ExecutionException) SharedMiniClusterBase(org.apache.accumulo.harness.SharedMiniClusterBase) RFile(org.apache.accumulo.core.file.rfile.RFile) Paths(java.nio.file.Paths) Assert.assertEquals(org.junit.Assert.assertEquals) TabletsMetadata(org.apache.accumulo.core.metadata.schema.TabletsMetadata) TabletMetadata(org.apache.accumulo.core.metadata.schema.TabletMetadata) HashSet(java.util.HashSet)

Example 8 with TabletsMetadata

use of org.apache.accumulo.core.metadata.schema.TabletsMetadata in project accumulo by apache.

the class MetadataIT method testAmpleReadTablets.

@Test
public void testAmpleReadTablets() throws Exception {
    try (ClientContext cc = (ClientContext) Accumulo.newClient().from(getClientProps()).build()) {
        cc.securityOperations().grantTablePermission(cc.whoami(), MetadataTable.NAME, TablePermission.WRITE);
        SortedSet<Text> partitionKeys = new TreeSet<>();
        partitionKeys.add(new Text("a"));
        partitionKeys.add(new Text("e"));
        partitionKeys.add(new Text("j"));
        cc.tableOperations().create("t");
        cc.tableOperations().addSplits("t", partitionKeys);
        Text startRow = new Text("a");
        Text endRow = new Text("z");
        // Call up Ample from the client context using table "t" and build
        TabletsMetadata tablets = cc.getAmple().readTablets().forTable(TableId.of("1")).overlapping(startRow, endRow).fetch(FILES, LOCATION, LAST, PREV_ROW).build();
        TabletMetadata tabletMetadata0 = Iterables.get(tablets, 0);
        TabletMetadata tabletMetadata1 = Iterables.get(tablets, 1);
        String infoTabletId0 = tabletMetadata0.getTableId().toString();
        String infoExtent0 = tabletMetadata0.getExtent().toString();
        String infoPrevEndRow0 = tabletMetadata0.getPrevEndRow().toString();
        String infoEndRow0 = tabletMetadata0.getEndRow().toString();
        String infoTabletId1 = tabletMetadata1.getTableId().toString();
        String infoExtent1 = tabletMetadata1.getExtent().toString();
        String infoPrevEndRow1 = tabletMetadata1.getPrevEndRow().toString();
        String infoEndRow1 = tabletMetadata1.getEndRow().toString();
        String testInfoTableId = "1";
        String testInfoKeyExtent0 = "1;e;a";
        String testInfoKeyExtent1 = "1;j;e";
        String testInfoPrevEndRow0 = "a";
        String testInfoPrevEndRow1 = "e";
        String testInfoEndRow0 = "e";
        String testInfoEndRow1 = "j";
        assertEquals(infoTabletId0, testInfoTableId);
        assertEquals(infoTabletId1, testInfoTableId);
        assertEquals(infoExtent0, testInfoKeyExtent0);
        assertEquals(infoExtent1, testInfoKeyExtent1);
        assertEquals(infoPrevEndRow0, testInfoPrevEndRow0);
        assertEquals(infoPrevEndRow1, testInfoPrevEndRow1);
        assertEquals(infoEndRow0, testInfoEndRow0);
        assertEquals(infoEndRow1, testInfoEndRow1);
    }
}
Also used : TreeSet(java.util.TreeSet) TabletsMetadata(org.apache.accumulo.core.metadata.schema.TabletsMetadata) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) TabletMetadata(org.apache.accumulo.core.metadata.schema.TabletMetadata) Text(org.apache.hadoop.io.Text) Test(org.junit.Test)

Example 9 with TabletsMetadata

use of org.apache.accumulo.core.metadata.schema.TabletsMetadata in project accumulo by apache.

the class Merge method getSizeIterator.

protected Iterator<Size> getSizeIterator(AccumuloClient client, String tablename, Text start, Text end) throws MergeException {
    // open up metadata, walk through the tablets.
    TableId tableId;
    TabletsMetadata tablets;
    try {
        ClientContext context = (ClientContext) client;
        tableId = context.getTableId(tablename);
        tablets = TabletsMetadata.builder(context).scanMetadataTable().overRange(new KeyExtent(tableId, end, start).toMetaRange()).fetch(FILES, PREV_ROW).build();
    } catch (Exception e) {
        throw new MergeException(e);
    }
    return tablets.stream().map(tm -> {
        long size = tm.getFilesMap().values().stream().mapToLong(DataFileValue::getSize).sum();
        return new Size(tm.getExtent(), size);
    }).iterator();
}
Also used : TableId(org.apache.accumulo.core.data.TableId) TableId(org.apache.accumulo.core.data.TableId) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Parameter(com.beust.jcommander.Parameter) TabletsMetadata(org.apache.accumulo.core.metadata.schema.TabletsMetadata) LoggerFactory(org.slf4j.LoggerFactory) Text(org.apache.hadoop.io.Text) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Accumulo(org.apache.accumulo.core.client.Accumulo) ArrayList(java.util.ArrayList) ConfigurationTypeHelper(org.apache.accumulo.core.conf.ConfigurationTypeHelper) IStringConverter(com.beust.jcommander.IStringConverter) PREV_ROW(org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.PREV_ROW) FILES(org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.FILES) Property(org.apache.accumulo.core.conf.Property) ClientOpts(org.apache.accumulo.core.cli.ClientOpts) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Span(io.opentelemetry.api.trace.Span) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) Scope(io.opentelemetry.context.Scope) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) List(java.util.List) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) TraceUtil(org.apache.accumulo.core.trace.TraceUtil) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) TabletsMetadata(org.apache.accumulo.core.metadata.schema.TabletsMetadata) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent)

Example 10 with TabletsMetadata

use of org.apache.accumulo.core.metadata.schema.TabletsMetadata in project accumulo by apache.

the class ListVolumesUsed method listTable.

private static void listTable(Ample.DataLevel level, ServerContext context) throws Exception {
    System.out.println("Listing volumes referenced in " + level + " tablets section");
    TreeSet<String> volumes = new TreeSet<>();
    try (TabletsMetadata tablets = TabletsMetadata.builder(context).forLevel(level).fetch(TabletMetadata.ColumnType.FILES, TabletMetadata.ColumnType.LOGS).build()) {
        for (TabletMetadata tabletMetadata : tablets) {
            tabletMetadata.getFiles().forEach(file -> volumes.add(getTableURI(file.getPathStr())));
            tabletMetadata.getLogs().forEach(le -> getLogURIs(volumes, le));
        }
    }
    for (String volume : volumes) {
        System.out.println("\tVolume : " + volume);
    }
    System.out.println("Listing volumes referenced in " + level + " deletes section (volume replacement occurs at deletion time)");
    volumes.clear();
    Iterator<String> delPaths = context.getAmple().getGcCandidates(level);
    while (delPaths.hasNext()) {
        volumes.add(getTableURI(delPaths.next()));
    }
    for (String volume : volumes) {
        System.out.println("\tVolume : " + volume);
    }
    System.out.println("Listing volumes referenced in " + level + " current logs");
    volumes.clear();
    WalStateManager wals = new WalStateManager(context);
    for (Path path : wals.getAllState().keySet()) {
        volumes.add(getLogURI(path.toString()));
    }
    for (String volume : volumes) {
        System.out.println("\tVolume : " + volume);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TreeSet(java.util.TreeSet) TabletsMetadata(org.apache.accumulo.core.metadata.schema.TabletsMetadata) WalStateManager(org.apache.accumulo.server.log.WalStateManager) TabletMetadata(org.apache.accumulo.core.metadata.schema.TabletMetadata)

Aggregations

TabletsMetadata (org.apache.accumulo.core.metadata.schema.TabletsMetadata)14 TabletMetadata (org.apache.accumulo.core.metadata.schema.TabletMetadata)9 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)7 TableId (org.apache.accumulo.core.data.TableId)7 Text (org.apache.hadoop.io.Text)7 Test (org.junit.Test)7 Property (org.apache.accumulo.core.conf.Property)5 ArrayList (java.util.ArrayList)4 TreeSet (java.util.TreeSet)4 Accumulo (org.apache.accumulo.core.client.Accumulo)4 ExternalCompactionId (org.apache.accumulo.core.metadata.schema.ExternalCompactionId)4 Set (java.util.Set)3 Collectors (java.util.stream.Collectors)3 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)3 MiniClusterConfigurationCallback (org.apache.accumulo.harness.MiniClusterConfigurationCallback)3 SharedMiniClusterBase (org.apache.accumulo.harness.SharedMiniClusterBase)3 ServerType (org.apache.accumulo.minicluster.ServerType)3 MiniAccumuloConfigImpl (org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl)3 Configuration (org.apache.hadoop.conf.Configuration)3 Assert.assertEquals (org.junit.Assert.assertEquals)3