Search in sources :

Example 1 with Ample

use of org.apache.accumulo.core.metadata.schema.Ample in project accumulo by apache.

the class TabletGroupWatcher method deleteTablets.

private void deleteTablets(MergeInfo info) throws AccumuloException {
    KeyExtent extent = info.getExtent();
    String targetSystemTable = extent.isMeta() ? RootTable.NAME : MetadataTable.NAME;
    Manager.log.debug("Deleting tablets for {}", extent);
    MetadataTime metadataTime = null;
    KeyExtent followingTablet = null;
    if (extent.endRow() != null) {
        Key nextExtent = new Key(extent.endRow()).followingKey(PartialKey.ROW);
        followingTablet = getHighTablet(new KeyExtent(extent.tableId(), nextExtent.getRow(), extent.endRow()));
        Manager.log.debug("Found following tablet {}", followingTablet);
    }
    try {
        AccumuloClient client = manager.getContext();
        ServerContext context = manager.getContext();
        Ample ample = context.getAmple();
        Text start = extent.prevEndRow();
        if (start == null) {
            start = new Text();
        }
        Manager.log.debug("Making file deletion entries for {}", extent);
        Range deleteRange = new Range(TabletsSection.encodeRow(extent.tableId(), start), false, TabletsSection.encodeRow(extent.tableId(), extent.endRow()), true);
        Scanner scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY);
        scanner.setRange(deleteRange);
        ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
        ServerColumnFamily.TIME_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        scanner.fetchColumnFamily(CurrentLocationColumnFamily.NAME);
        Set<String> datafiles = new TreeSet<>();
        for (Entry<Key, Value> entry : scanner) {
            Key key = entry.getKey();
            if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
                datafiles.add(TabletFileUtil.validate(key.getColumnQualifierData().toString()));
                if (datafiles.size() > 1000) {
                    ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
                    datafiles.clear();
                }
            } else if (ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
                metadataTime = MetadataTime.parse(entry.getValue().toString());
            } else if (key.compareColumnFamily(CurrentLocationColumnFamily.NAME) == 0) {
                throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
            } else if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                String path = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(extent.tableId(), entry.getValue().toString());
                datafiles.add(path);
                if (datafiles.size() > 1000) {
                    ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
                    datafiles.clear();
                }
            }
        }
        ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
        BatchWriter bw = client.createBatchWriter(targetSystemTable);
        try {
            deleteTablets(info, deleteRange, bw, client);
        } finally {
            bw.close();
        }
        if (followingTablet != null) {
            Manager.log.debug("Updating prevRow of {} to {}", followingTablet, extent.prevEndRow());
            bw = client.createBatchWriter(targetSystemTable);
            try {
                Mutation m = new Mutation(followingTablet.toMetaRow());
                TabletColumnFamily.PREV_ROW_COLUMN.put(m, TabletColumnFamily.encodePrevEndRow(extent.prevEndRow()));
                ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
                bw.addMutation(m);
                bw.flush();
            } finally {
                bw.close();
            }
        } else {
            // Recreate the default tablet to hold the end of the table
            MetadataTableUtil.addTablet(new KeyExtent(extent.tableId(), null, extent.prevEndRow()), ServerColumnFamily.DEFAULT_TABLET_DIR_NAME, manager.getContext(), metadataTime.getType(), manager.managerLock);
        }
    } catch (RuntimeException | TableNotFoundException ex) {
        throw new AccumuloException(ex);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ServerContext(org.apache.accumulo.server.ServerContext) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) Ample(org.apache.accumulo.core.metadata.schema.Ample) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MetadataTime(org.apache.accumulo.core.metadata.schema.MetadataTime) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 2 with Ample

use of org.apache.accumulo.core.metadata.schema.Ample in project accumulo by apache.

the class Upgrader9to10 method upgradeFileDeletes.

/**
 * Improve how Delete markers are stored. For more information see:
 * <a href="https://github.com/apache/accumulo/issues/1043">#1043</a>
 * <a href="https://github.com/apache/accumulo/pull/1366">#1366</a>
 */
public void upgradeFileDeletes(ServerContext context, Ample.DataLevel level) {
    String tableName = level.metaTable();
    AccumuloClient c = context;
    Ample ample = context.getAmple();
    // find all deletes
    try (BatchWriter writer = c.createBatchWriter(tableName)) {
        log.info("looking for candidates in table {}", tableName);
        Iterator<String> oldCandidates = getOldCandidates(context, tableName);
        String upgradeProp = context.getConfiguration().get(Property.INSTANCE_VOLUMES_UPGRADE_RELATIVE);
        while (oldCandidates.hasNext()) {
            List<String> deletes = readCandidatesInBatch(oldCandidates);
            log.info("found {} deletes to upgrade", deletes.size());
            for (String olddelete : deletes) {
                // create new formatted delete
                log.trace("upgrading delete entry for {}", olddelete);
                Path absolutePath = resolveRelativeDelete(olddelete, upgradeProp);
                String updatedDel = switchToAllVolumes(absolutePath);
                writer.addMutation(ample.createDeleteMutation(updatedDel));
            }
            writer.flush();
            // if nothing thrown then we're good so mark all deleted
            log.info("upgrade processing completed so delete old entries");
            for (String olddelete : deletes) {
                log.trace("deleting old entry for {}", olddelete);
                writer.addMutation(deleteOldDeleteMutation(olddelete));
            }
            writer.flush();
        }
    } catch (TableNotFoundException | MutationsRejectedException e) {
        throw new RuntimeException(e);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Ample(org.apache.accumulo.core.metadata.schema.Ample) BatchWriter(org.apache.accumulo.core.client.BatchWriter) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 3 with Ample

use of org.apache.accumulo.core.metadata.schema.Ample in project accumulo by apache.

the class MetadataTableUtil method deleteTable.

public static void deleteTable(TableId tableId, boolean insertDeletes, ServerContext context, ServiceLock lock) throws AccumuloException {
    try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
        BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000L, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
        // scan metadata for our table and delete everything we find
        Mutation m = null;
        Ample ample = context.getAmple();
        ms.setRange(new KeyExtent(tableId, null, null).toMetaRange());
        // insert deletes before deleting data from metadata... this makes the code fault tolerant
        if (insertDeletes) {
            ms.fetchColumnFamily(DataFileColumnFamily.NAME);
            ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
            for (Entry<Key, Value> cell : ms) {
                Key key = cell.getKey();
                if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                    String ref = TabletFileUtil.validate(key.getColumnQualifierData().toString());
                    bw.addMutation(ample.createDeleteMutation(ref));
                }
                if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                    String uri = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(tableId, cell.getValue().toString());
                    bw.addMutation(ample.createDeleteMutation(uri));
                }
            }
            bw.flush();
            ms.clearColumns();
        }
        for (Entry<Key, Value> cell : ms) {
            Key key = cell.getKey();
            if (m == null) {
                m = new Mutation(key.getRow());
                if (lock != null)
                    putLockID(context, lock, m);
            }
            if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
                bw.addMutation(m);
                m = new Mutation(key.getRow());
                if (lock != null)
                    putLockID(context, lock, m);
            }
            m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
        }
        if (m != null)
            bw.addMutation(m);
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) ScannerImpl(org.apache.accumulo.core.clientImpl.ScannerImpl) BatchWriterImpl(org.apache.accumulo.core.clientImpl.BatchWriterImpl) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Ample(org.apache.accumulo.core.metadata.schema.Ample) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Key(org.apache.accumulo.core.data.Key)

Example 4 with Ample

use of org.apache.accumulo.core.metadata.schema.Ample in project accumulo by apache.

the class GarbageCollectorIT method addEntries.

private void addEntries(AccumuloClient client) throws Exception {
    Ample ample = getServerContext().getAmple();
    client.securityOperations().grantTablePermission(client.whoami(), MetadataTable.NAME, TablePermission.WRITE);
    try (BatchWriter bw = client.createBatchWriter(MetadataTable.NAME)) {
        for (int i = 0; i < 100000; ++i) {
            String longpath = "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee" + "ffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj";
            Mutation delFlag = ample.createDeleteMutation(String.format("file:/%020d/%s", i, longpath));
            bw.addMutation(delFlag);
        }
    }
}
Also used : Ample(org.apache.accumulo.core.metadata.schema.Ample) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Aggregations

BatchWriter (org.apache.accumulo.core.client.BatchWriter)4 Ample (org.apache.accumulo.core.metadata.schema.Ample)4 Mutation (org.apache.accumulo.core.data.Mutation)3 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)2 Scanner (org.apache.accumulo.core.client.Scanner)2 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)2 Key (org.apache.accumulo.core.data.Key)2 Value (org.apache.accumulo.core.data.Value)2 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)2 TreeSet (java.util.TreeSet)1 AccumuloException (org.apache.accumulo.core.client.AccumuloException)1 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)1 IsolatedScanner (org.apache.accumulo.core.client.IsolatedScanner)1 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)1 BatchWriterImpl (org.apache.accumulo.core.clientImpl.BatchWriterImpl)1 ScannerImpl (org.apache.accumulo.core.clientImpl.ScannerImpl)1 PartialKey (org.apache.accumulo.core.data.PartialKey)1 Range (org.apache.accumulo.core.data.Range)1 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)1 MetadataTime (org.apache.accumulo.core.metadata.schema.MetadataTime)1