Search in sources :

Example 41 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class SplitRecoveryIT method runSplitRecoveryTest.

private void runSplitRecoveryTest(AccumuloServerContext context, int failPoint, String mr, int extentToSplit, ZooLock zl, KeyExtent... extents) throws Exception {
    Text midRow = new Text(mr);
    SortedMap<FileRef, DataFileValue> splitMapFiles = null;
    for (int i = 0; i < extents.length; i++) {
        KeyExtent extent = extents[i];
        String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId() + "/dir_" + i;
        MetadataTableUtil.addTablet(extent, tdir, context, TabletTime.LOGICAL_TIME_ID, zl);
        SortedMap<FileRef, DataFileValue> mapFiles = new TreeMap<>();
        mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i));
        if (i == extentToSplit) {
            splitMapFiles = mapFiles;
        }
        int tid = 0;
        TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
        MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", context, zl);
    }
    KeyExtent extent = extents[extentToSplit];
    KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow);
    KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow());
    splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl);
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Text(org.apache.hadoop.io.Text) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent)

Example 42 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class SplitRecoveryIT method ensureTabletHasNoUnexpectedMetadataEntries.

private void ensureTabletHasNoUnexpectedMetadataEntries(AccumuloServerContext context, KeyExtent extent, SortedMap<FileRef, DataFileValue> expectedMapFiles) throws Exception {
    try (Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        scanner.setRange(extent.toMetadataRange());
        HashSet<ColumnFQ> expectedColumns = new HashSet<>();
        expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
        expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
        expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
        expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
        HashSet<Text> expectedColumnFamilies = new HashSet<>();
        expectedColumnFamilies.add(DataFileColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
        Iterator<Entry<Key, Value>> iter = scanner.iterator();
        while (iter.hasNext()) {
            Key key = iter.next().getKey();
            if (!key.getRow().equals(extent.getMetadataEntry())) {
                throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
            }
            if (expectedColumnFamilies.contains(key.getColumnFamily())) {
                continue;
            }
            if (expectedColumns.remove(new ColumnFQ(key))) {
                continue;
            }
            throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
        }
        System.out.println("expectedColumns " + expectedColumns);
        if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
            throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
        }
        SortedMap<FileRef, DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, context);
        verifySame(expectedMapFiles, fixedMapFiles);
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) ColumnFQ(org.apache.accumulo.core.util.ColumnFQ) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) Entry(java.util.Map.Entry) FileRef(org.apache.accumulo.server.fs.FileRef) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet)

Example 43 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class SplitRecoveryIT method splitPartiallyAndRecover.

private void splitPartiallyAndRecover(AccumuloServerContext context, KeyExtent extent, KeyExtent high, KeyExtent low, double splitRatio, SortedMap<FileRef, DataFileValue> mapFiles, Text midRow, String location, int steps, ZooLock zl) throws Exception {
    SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
    SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
    List<FileRef> highDatafilesToRemove = new ArrayList<>();
    MetadataTableUtil.splitDatafiles(midRow, splitRatio, new HashMap<>(), mapFiles, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
    MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, context, zl);
    TServerInstance instance = new TServerInstance(location, zl.getSessionId());
    Writer writer = MetadataTableUtil.getMetadataTable(context);
    Assignment assignment = new Assignment(high, instance);
    Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
    assignment.server.putFutureLocation(m);
    writer.update(m);
    if (steps >= 1) {
        Map<Long, ? extends Collection<FileRef>> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, extent);
        MasterMetadataUtil.addNewTablet(context, low, "/lowDir", instance, lowDatafileSizes, bulkFiles, TabletTime.LOGICAL_TIME_ID + "0", -1l, -1l, zl);
    }
    if (steps >= 2) {
        MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, context, zl);
    }
    TabletServer.verifyTabletInformation(context, high, instance, new TreeMap<>(), "127.0.0.1:0", zl);
    if (steps >= 1) {
        ensureTabletHasNoUnexpectedMetadataEntries(context, low, lowDatafileSizes);
        ensureTabletHasNoUnexpectedMetadataEntries(context, high, highDatafileSizes);
        Map<Long, ? extends Collection<FileRef>> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, low);
        Map<Long, ? extends Collection<FileRef>> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, high);
        if (!lowBulkFiles.equals(highBulkFiles)) {
            throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
        }
        if (lowBulkFiles.size() == 0) {
            throw new Exception(" no bulk files " + low);
        }
    } else {
        ensureTabletHasNoUnexpectedMetadataEntries(context, extent, mapFiles);
    }
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) Assignment(org.apache.accumulo.server.master.state.Assignment) FileRef(org.apache.accumulo.server.fs.FileRef) Mutation(org.apache.accumulo.core.data.Mutation) ZooReaderWriter(org.apache.accumulo.server.zookeeper.ZooReaderWriter) Writer(org.apache.accumulo.core.client.impl.Writer) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter)

Example 44 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class Merge method getSizeIterator.

protected Iterator<Size> getSizeIterator(Connector conn, String tablename, Text start, Text end) throws MergeException {
    // open up metatadata, walk through the tablets.
    Table.ID tableId;
    Scanner scanner;
    try {
        tableId = Tables.getTableId(conn.getInstance(), tablename);
        scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    } catch (Exception e) {
        throw new MergeException(e);
    }
    scanner.setRange(new KeyExtent(tableId, end, start).toMetadataRange());
    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
    final Iterator<Entry<Key, Value>> iterator = scanner.iterator();
    Iterator<Size> result = new Iterator<Size>() {

        Size next = fetch();

        @Override
        public boolean hasNext() {
            return next != null;
        }

        private Size fetch() {
            long tabletSize = 0;
            while (iterator.hasNext()) {
                Entry<Key, Value> entry = iterator.next();
                Key key = entry.getKey();
                if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                    tabletSize += new DataFileValue(entry.getValue().get()).getSize();
                } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
                    KeyExtent extent = new KeyExtent(key.getRow(), entry.getValue());
                    return new Size(extent, tabletSize);
                }
            }
            return null;
        }

        @Override
        public Size next() {
            Size result = next;
            next = fetch();
            return result;
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
    return result;
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ClientOnRequiredTable(org.apache.accumulo.core.cli.ClientOnRequiredTable) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Entry(java.util.Map.Entry) Iterator(java.util.Iterator) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key)

Aggregations

DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)44 FileRef (org.apache.accumulo.server.fs.FileRef)32 Value (org.apache.accumulo.core.data.Value)18 Text (org.apache.hadoop.io.Text)14 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)13 Mutation (org.apache.accumulo.core.data.Mutation)12 Test (org.junit.Test)12 ArrayList (java.util.ArrayList)9 Key (org.apache.accumulo.core.data.Key)9 TreeMap (java.util.TreeMap)8 HashMap (java.util.HashMap)7 HashSet (java.util.HashSet)6 Scanner (org.apache.accumulo.core.client.Scanner)6 IOException (java.io.IOException)5 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)5 ScannerImpl (org.apache.accumulo.core.client.impl.ScannerImpl)4 Pair (org.apache.accumulo.core.util.Pair)4 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)4 Entry (java.util.Map.Entry)3 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)3