Search in sources :

Example 21 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class MasterMetadataUtil method replaceDatafiles.

public static void replaceDatafiles(ClientContext context, KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId, DataFileValue size, String address, TServerInstance lastLocation, ZooLock zooLock, boolean insertDeleteFlags) throws IOException {
    if (insertDeleteFlags) {
        // add delete flags for those paths before the data file reference is removed
        MetadataTableUtil.addDeleteEntries(extent, datafilesToDelete, context);
    }
    // replace data file references to old mapfiles with the new mapfiles
    Mutation m = new Mutation(extent.getMetadataEntry());
    for (FileRef pathToRemove : datafilesToDelete) m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
    for (FileRef scanFile : scanFiles) m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0]));
    if (size.getNumEntries() > 0)
        m.put(DataFileColumnFamily.NAME, path.meta(), new Value(size.encode()));
    if (compactionId != null)
        TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes()));
    TServerInstance self = getTServerInstance(address, zooLock);
    self.putLastLocation(m);
    // remove the old location
    if (lastLocation != null && !lastLocation.equals(self))
        lastLocation.clearLastLocation(m);
    MetadataTableUtil.update(context, zooLock, m, extent);
}
Also used : FileRef(org.apache.accumulo.server.fs.FileRef) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Mutation(org.apache.accumulo.core.data.Mutation) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance)

Example 22 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class MasterMetadataUtil method fixSplit.

private static KeyExtent fixSplit(ClientContext context, Table.ID tableId, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
    if (metadataPrevEndRow == null)
        // prev end row....
        throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
    // check to see if prev tablet exist in metadata tablet
    Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(tableId, metadataPrevEndRow)));
    try (ScannerImpl scanner2 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
        VolumeManager fs = VolumeManagerImpl.get();
        if (!scanner2.iterator().hasNext()) {
            log.info("Rolling back incomplete split {} {}", metadataEntry, metadataPrevEndRow);
            MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, lock);
            return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
        } else {
            log.info("Finishing incomplete split {} {}", metadataEntry, metadataPrevEndRow);
            List<FileRef> highDatafilesToRemove = new ArrayList<>();
            SortedMap<FileRef, DataFileValue> origDatafileSizes = new TreeMap<>();
            SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
            SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
            try (Scanner scanner3 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
                Key rowKey = new Key(metadataEntry);
                scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
                scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
                for (Entry<Key, Value> entry : scanner3) {
                    if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
                        origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
                    }
                }
            }
            MetadataTableUtil.splitDatafiles(metadataPrevEndRow, splitRatio, new HashMap<>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
            MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, context, lock);
            return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
        }
    }
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 23 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class MetadataTableUtil method splitDatafiles.

public static void splitDatafiles(Text midRow, double splitRatio, Map<FileRef, FileUtil.FileInfo> firstAndLastRows, SortedMap<FileRef, DataFileValue> datafiles, SortedMap<FileRef, DataFileValue> lowDatafileSizes, SortedMap<FileRef, DataFileValue> highDatafileSizes, List<FileRef> highDatafilesToRemove) {
    for (Entry<FileRef, DataFileValue> entry : datafiles.entrySet()) {
        Text firstRow = null;
        Text lastRow = null;
        boolean rowsKnown = false;
        FileUtil.FileInfo mfi = firstAndLastRows.get(entry.getKey());
        if (mfi != null) {
            firstRow = mfi.getFirstRow();
            lastRow = mfi.getLastRow();
            rowsKnown = true;
        }
        if (rowsKnown && firstRow.compareTo(midRow) > 0) {
            // only in high
            long highSize = entry.getValue().getSize();
            long highEntries = entry.getValue().getNumEntries();
            highDatafileSizes.put(entry.getKey(), new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
        } else if (rowsKnown && lastRow.compareTo(midRow) <= 0) {
            // only in low
            long lowSize = entry.getValue().getSize();
            long lowEntries = entry.getValue().getNumEntries();
            lowDatafileSizes.put(entry.getKey(), new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));
            highDatafilesToRemove.add(entry.getKey());
        } else {
            long lowSize = (long) Math.floor((entry.getValue().getSize() * splitRatio));
            long lowEntries = (long) Math.floor((entry.getValue().getNumEntries() * splitRatio));
            lowDatafileSizes.put(entry.getKey(), new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));
            long highSize = (long) Math.ceil((entry.getValue().getSize() * (1.0 - splitRatio)));
            long highEntries = (long) Math.ceil((entry.getValue().getNumEntries() * (1.0 - splitRatio)));
            highDatafileSizes.put(entry.getKey(), new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
        }
    }
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Text(org.apache.hadoop.io.Text)

Example 24 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class MetadataTableUtil method getBulkFilesLoaded.

public static List<FileRef> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) throws IOException {
    List<FileRef> result = new ArrayList<>();
    try (Scanner mscanner = new IsolatedScanner(conn.createScanner(extent.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY))) {
        VolumeManager fs = VolumeManagerImpl.get();
        mscanner.setRange(extent.toMetadataRange());
        mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
        for (Entry<Key, Value> entry : mscanner) {
            if (Long.parseLong(entry.getValue().toString()) == tid) {
                result.add(new FileRef(fs, entry.getKey()));
            }
        }
        return result;
    } catch (TableNotFoundException ex) {
        // unlikely
        throw new RuntimeException("Onos! teh metadata table has vanished!!");
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) FileRef(org.apache.accumulo.server.fs.FileRef) ArrayList(java.util.ArrayList) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 25 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class MetadataTableUtil method getDataFileSizes.

public static SortedMap<FileRef, DataFileValue> getDataFileSizes(KeyExtent extent, ClientContext context) throws IOException {
    TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
    try (Scanner mdScanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        Text row = extent.getMetadataEntry();
        VolumeManager fs = VolumeManagerImpl.get();
        Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text(""));
        endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
        mdScanner.setRange(new Range(new Key(row), endKey));
        for (Entry<Key, Value> entry : mdScanner) {
            if (!entry.getKey().getRow().equals(row))
                break;
            DataFileValue dfv = new DataFileValue(entry.getValue().get());
            sizes.put(new FileRef(fs, entry.getKey()), dfv);
        }
        return sizes;
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) TreeMap(java.util.TreeMap) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Aggregations

FileRef (org.apache.accumulo.server.fs.FileRef)62 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)36 Value (org.apache.accumulo.core.data.Value)17 Key (org.apache.accumulo.core.data.Key)16 ArrayList (java.util.ArrayList)15 HashMap (java.util.HashMap)13 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)13 IOException (java.io.IOException)12 Test (org.junit.Test)12 Text (org.apache.hadoop.io.Text)11 Mutation (org.apache.accumulo.core.data.Mutation)10 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)10 Scanner (org.apache.accumulo.core.client.Scanner)9 PartialKey (org.apache.accumulo.core.data.PartialKey)9 TreeMap (java.util.TreeMap)8 FileSystem (org.apache.hadoop.fs.FileSystem)8 Path (org.apache.hadoop.fs.Path)8 HashSet (java.util.HashSet)7 IsolatedScanner (org.apache.accumulo.core.client.IsolatedScanner)6 ScannerImpl (org.apache.accumulo.core.client.impl.ScannerImpl)6