Search in sources :

Example 61 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class SplitRecoveryIT method ensureTabletHasNoUnexpectedMetadataEntries.

private void ensureTabletHasNoUnexpectedMetadataEntries(AccumuloServerContext context, KeyExtent extent, SortedMap<FileRef, DataFileValue> expectedMapFiles) throws Exception {
    try (Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        scanner.setRange(extent.toMetadataRange());
        HashSet<ColumnFQ> expectedColumns = new HashSet<>();
        expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
        expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
        expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
        expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
        HashSet<Text> expectedColumnFamilies = new HashSet<>();
        expectedColumnFamilies.add(DataFileColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
        Iterator<Entry<Key, Value>> iter = scanner.iterator();
        while (iter.hasNext()) {
            Key key = iter.next().getKey();
            if (!key.getRow().equals(extent.getMetadataEntry())) {
                throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
            }
            if (expectedColumnFamilies.contains(key.getColumnFamily())) {
                continue;
            }
            if (expectedColumns.remove(new ColumnFQ(key))) {
                continue;
            }
            throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
        }
        System.out.println("expectedColumns " + expectedColumns);
        if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
            throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
        }
        SortedMap<FileRef, DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, context);
        verifySame(expectedMapFiles, fixedMapFiles);
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) ColumnFQ(org.apache.accumulo.core.util.ColumnFQ) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) Entry(java.util.Map.Entry) FileRef(org.apache.accumulo.server.fs.FileRef) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet)

Example 62 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class SplitRecoveryIT method splitPartiallyAndRecover.

private void splitPartiallyAndRecover(AccumuloServerContext context, KeyExtent extent, KeyExtent high, KeyExtent low, double splitRatio, SortedMap<FileRef, DataFileValue> mapFiles, Text midRow, String location, int steps, ZooLock zl) throws Exception {
    SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
    SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
    List<FileRef> highDatafilesToRemove = new ArrayList<>();
    MetadataTableUtil.splitDatafiles(midRow, splitRatio, new HashMap<>(), mapFiles, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
    MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, context, zl);
    TServerInstance instance = new TServerInstance(location, zl.getSessionId());
    Writer writer = MetadataTableUtil.getMetadataTable(context);
    Assignment assignment = new Assignment(high, instance);
    Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
    assignment.server.putFutureLocation(m);
    writer.update(m);
    if (steps >= 1) {
        Map<Long, ? extends Collection<FileRef>> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, extent);
        MasterMetadataUtil.addNewTablet(context, low, "/lowDir", instance, lowDatafileSizes, bulkFiles, TabletTime.LOGICAL_TIME_ID + "0", -1l, -1l, zl);
    }
    if (steps >= 2) {
        MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, context, zl);
    }
    TabletServer.verifyTabletInformation(context, high, instance, new TreeMap<>(), "127.0.0.1:0", zl);
    if (steps >= 1) {
        ensureTabletHasNoUnexpectedMetadataEntries(context, low, lowDatafileSizes);
        ensureTabletHasNoUnexpectedMetadataEntries(context, high, highDatafileSizes);
        Map<Long, ? extends Collection<FileRef>> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, low);
        Map<Long, ? extends Collection<FileRef>> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, high);
        if (!lowBulkFiles.equals(highBulkFiles)) {
            throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
        }
        if (lowBulkFiles.size() == 0) {
            throw new Exception(" no bulk files " + low);
        }
    } else {
        ensureTabletHasNoUnexpectedMetadataEntries(context, extent, mapFiles);
    }
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) Assignment(org.apache.accumulo.server.master.state.Assignment) FileRef(org.apache.accumulo.server.fs.FileRef) Mutation(org.apache.accumulo.core.data.Mutation) ZooReaderWriter(org.apache.accumulo.server.zookeeper.ZooReaderWriter) Writer(org.apache.accumulo.core.client.impl.Writer) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter)

Aggregations

FileRef (org.apache.accumulo.server.fs.FileRef)62 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)36 Value (org.apache.accumulo.core.data.Value)17 Key (org.apache.accumulo.core.data.Key)16 ArrayList (java.util.ArrayList)15 HashMap (java.util.HashMap)13 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)13 IOException (java.io.IOException)12 Test (org.junit.Test)12 Text (org.apache.hadoop.io.Text)11 Mutation (org.apache.accumulo.core.data.Mutation)10 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)10 Scanner (org.apache.accumulo.core.client.Scanner)9 PartialKey (org.apache.accumulo.core.data.PartialKey)9 TreeMap (java.util.TreeMap)8 FileSystem (org.apache.hadoop.fs.FileSystem)8 Path (org.apache.hadoop.fs.Path)8 HashSet (java.util.HashSet)7 IsolatedScanner (org.apache.accumulo.core.client.IsolatedScanner)6 ScannerImpl (org.apache.accumulo.core.client.impl.ScannerImpl)6