Search in sources :

Example 1 with ScannerImpl

use of org.apache.accumulo.core.client.impl.ScannerImpl in project accumulo by apache.

the class MasterMetadataUtil method fixSplit.

private static KeyExtent fixSplit(ClientContext context, Table.ID tableId, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
    if (metadataPrevEndRow == null)
        // prev end row....
        throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
    // check to see if prev tablet exist in metadata tablet
    Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(tableId, metadataPrevEndRow)));
    try (ScannerImpl scanner2 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
        VolumeManager fs = VolumeManagerImpl.get();
        if (!scanner2.iterator().hasNext()) {
            log.info("Rolling back incomplete split {} {}", metadataEntry, metadataPrevEndRow);
            MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, lock);
            return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
        } else {
            log.info("Finishing incomplete split {} {}", metadataEntry, metadataPrevEndRow);
            List<FileRef> highDatafilesToRemove = new ArrayList<>();
            SortedMap<FileRef, DataFileValue> origDatafileSizes = new TreeMap<>();
            SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
            SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
            try (Scanner scanner3 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
                Key rowKey = new Key(metadataEntry);
                scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
                scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
                for (Entry<Key, Value> entry : scanner3) {
                    if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
                        origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
                    }
                }
            }
            MetadataTableUtil.splitDatafiles(metadataPrevEndRow, splitRatio, new HashMap<>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
            MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, context, lock);
            return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
        }
    }
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 2 with ScannerImpl

use of org.apache.accumulo.core.client.impl.ScannerImpl in project accumulo by apache.

the class MetadataTableUtil method moveMetaDeleteMarkers.

/**
 * During an upgrade we need to move deletion requests for files under the !METADATA table to the root tablet.
 */
public static void moveMetaDeleteMarkers(ClientContext context) {
    String oldDeletesPrefix = "!!~del";
    Range oldDeletesRange = new Range(oldDeletesPrefix, true, "!!~dem", false);
    // move old delete markers to new location, to standardize table schema between all metadata tables
    try (Scanner scanner = new ScannerImpl(context, RootTable.ID, Authorizations.EMPTY)) {
        scanner.setRange(oldDeletesRange);
        for (Entry<Key, Value> entry : scanner) {
            String row = entry.getKey().getRow().toString();
            if (row.startsWith(oldDeletesPrefix)) {
                moveDeleteEntry(context, RootTable.OLD_EXTENT, entry, row, oldDeletesPrefix);
            } else {
                break;
            }
        }
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 3 with ScannerImpl

use of org.apache.accumulo.core.client.impl.ScannerImpl in project accumulo by apache.

the class MetadataTableUtil method getTabletLogScanner.

private static Scanner getTabletLogScanner(ClientContext context, KeyExtent extent) {
    Table.ID tableId = MetadataTable.ID;
    if (extent.isMeta())
        tableId = RootTable.ID;
    Scanner scanner = new ScannerImpl(context, tableId, Authorizations.EMPTY);
    scanner.fetchColumnFamily(LogColumnFamily.NAME);
    Text start = extent.getMetadataEntry();
    Key endKey = new Key(start, LogColumnFamily.NAME);
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
    scanner.setRange(new Range(new Key(start), endKey));
    return scanner;
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 4 with ScannerImpl

use of org.apache.accumulo.core.client.impl.ScannerImpl in project accumulo by apache.

the class MetadataTableUtil method getDataFileSizes.

public static SortedMap<FileRef, DataFileValue> getDataFileSizes(KeyExtent extent, ClientContext context) throws IOException {
    TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
    try (Scanner mdScanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        Text row = extent.getMetadataEntry();
        VolumeManager fs = VolumeManagerImpl.get();
        Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text(""));
        endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
        mdScanner.setRange(new Range(new Key(row), endKey));
        for (Entry<Key, Value> entry : mdScanner) {
            if (!entry.getKey().getRow().equals(row))
                break;
            DataFileValue dfv = new DataFileValue(entry.getValue().get());
            sizes.put(new FileRef(fs, entry.getKey()), dfv);
        }
        return sizes;
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) TreeMap(java.util.TreeMap) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 5 with ScannerImpl

use of org.apache.accumulo.core.client.impl.ScannerImpl in project accumulo by apache.

the class MetadataTableUtil method deleteTable.

public static void deleteTable(Table.ID tableId, boolean insertDeletes, ClientContext context, ZooLock lock) throws AccumuloException, IOException {
    try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
        BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
        // scan metadata for our table and delete everything we find
        Mutation m = null;
        ms.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
        // insert deletes before deleting data from metadata... this makes the code fault tolerant
        if (insertDeletes) {
            ms.fetchColumnFamily(DataFileColumnFamily.NAME);
            TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
            for (Entry<Key, Value> cell : ms) {
                Key key = cell.getKey();
                if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                    FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
                    bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
                }
                if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                    bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
                }
            }
            bw.flush();
            ms.clearColumns();
        }
        for (Entry<Key, Value> cell : ms) {
            Key key = cell.getKey();
            if (m == null) {
                m = new Mutation(key.getRow());
                if (lock != null)
                    putLockID(lock, m);
            }
            if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
                bw.addMutation(m);
                m = new Mutation(key.getRow());
                if (lock != null)
                    putLockID(lock, m);
            }
            m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
        }
        if (m != null)
            bw.addMutation(m);
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) BatchWriterImpl(org.apache.accumulo.core.client.impl.BatchWriterImpl) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Aggregations

ScannerImpl (org.apache.accumulo.core.client.impl.ScannerImpl)9 Key (org.apache.accumulo.core.data.Key)9 Scanner (org.apache.accumulo.core.client.Scanner)8 PartialKey (org.apache.accumulo.core.data.PartialKey)7 Value (org.apache.accumulo.core.data.Value)7 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)7 IsolatedScanner (org.apache.accumulo.core.client.IsolatedScanner)6 FileRef (org.apache.accumulo.server.fs.FileRef)6 Text (org.apache.hadoop.io.Text)6 Range (org.apache.accumulo.core.data.Range)5 TreeMap (java.util.TreeMap)4 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)4 ArrayList (java.util.ArrayList)3 Table (org.apache.accumulo.core.client.impl.Table)3 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)3 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)3 RootTable (org.apache.accumulo.core.metadata.RootTable)3 AccumuloException (org.apache.accumulo.core.client.AccumuloException)2 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)2 ColumnFQ (org.apache.accumulo.core.util.ColumnFQ)2