Search in sources :

Example 1 with MetadataTime

use of org.apache.accumulo.core.metadata.schema.MetadataTime in project accumulo by apache.

the class FileSystemInitializer method createEntriesForTablet.

private void createEntriesForTablet(TreeMap<Key, Value> map, Tablet tablet) {
    Value EMPTY_SIZE = new DataFileValue(0, 0).encodeAsValue();
    Text extent = new Text(MetadataSchema.TabletsSection.encodeRow(tablet.tableId, tablet.endRow));
    addEntry(map, extent, DIRECTORY_COLUMN, new Value(tablet.dirName));
    addEntry(map, extent, TIME_COLUMN, new Value(new MetadataTime(0, TimeType.LOGICAL).encode()));
    addEntry(map, extent, PREV_ROW_COLUMN, MetadataSchema.TabletsSection.TabletColumnFamily.encodePrevEndRow(tablet.prevEndRow));
    for (String file : tablet.files) {
        addEntry(map, extent, new ColumnFQ(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME, new Text(file)), EMPTY_SIZE);
    }
}
Also used : ColumnFQ(org.apache.accumulo.core.util.ColumnFQ) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) MetadataTime(org.apache.accumulo.core.metadata.schema.MetadataTime)

Example 2 with MetadataTime

use of org.apache.accumulo.core.metadata.schema.MetadataTime in project accumulo by apache.

the class TabletGroupWatcher method mergeMetadataRecords.

private void mergeMetadataRecords(MergeInfo info) throws AccumuloException {
    KeyExtent range = info.getExtent();
    Manager.log.debug("Merging metadata for {}", range);
    KeyExtent stop = getHighTablet(range);
    Manager.log.debug("Highest tablet is {}", stop);
    Value firstPrevRowValue = null;
    Text stopRow = stop.toMetaRow();
    Text start = range.prevEndRow();
    if (start == null) {
        start = new Text();
    }
    Range scanRange = new Range(TabletsSection.encodeRow(range.tableId(), start), false, stopRow, false);
    String targetSystemTable = MetadataTable.NAME;
    if (range.isMeta()) {
        targetSystemTable = RootTable.NAME;
    }
    AccumuloClient client = manager.getContext();
    try (BatchWriter bw = client.createBatchWriter(targetSystemTable)) {
        long fileCount = 0;
        // Make file entries in highest tablet
        Scanner scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY);
        scanner.setRange(scanRange);
        TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
        ServerColumnFamily.TIME_COLUMN.fetch(scanner);
        ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        Mutation m = new Mutation(stopRow);
        MetadataTime maxLogicalTime = null;
        for (Entry<Key, Value> entry : scanner) {
            Key key = entry.getKey();
            Value value = entry.getValue();
            if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                m.put(key.getColumnFamily(), key.getColumnQualifier(), value);
                fileCount++;
            } else if (TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key) && firstPrevRowValue == null) {
                Manager.log.debug("prevRow entry for lowest tablet is {}", value);
                firstPrevRowValue = new Value(value);
            } else if (ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
                maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, MetadataTime.parse(value.toString()));
            } else if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                String uri = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(range.tableId(), value.toString());
                bw.addMutation(manager.getContext().getAmple().createDeleteMutation(uri));
            }
        }
        // read the logical time from the last tablet in the merge range, it is not included in
        // the loop above
        scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY);
        scanner.setRange(new Range(stopRow));
        ServerColumnFamily.TIME_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(ExternalCompactionColumnFamily.NAME);
        Set<String> extCompIds = new HashSet<>();
        for (Entry<Key, Value> entry : scanner) {
            if (ServerColumnFamily.TIME_COLUMN.hasColumns(entry.getKey())) {
                maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, MetadataTime.parse(entry.getValue().toString()));
            } else if (ExternalCompactionColumnFamily.NAME.equals(entry.getKey().getColumnFamily())) {
                extCompIds.add(entry.getKey().getColumnQualifierData().toString());
            }
        }
        if (maxLogicalTime != null)
            ServerColumnFamily.TIME_COLUMN.put(m, new Value(maxLogicalTime.encode()));
        // delete any entries for external compactions
        extCompIds.stream().forEach(ecid -> m.putDelete(ExternalCompactionColumnFamily.STR_NAME, ecid));
        if (!m.getUpdates().isEmpty()) {
            bw.addMutation(m);
        }
        bw.flush();
        Manager.log.debug("Moved {} files to {}", fileCount, stop);
        if (firstPrevRowValue == null) {
            Manager.log.debug("tablet already merged");
            return;
        }
        stop = new KeyExtent(stop.tableId(), stop.endRow(), TabletColumnFamily.decodePrevEndRow(firstPrevRowValue));
        Mutation updatePrevRow = TabletColumnFamily.createPrevRowMutation(stop);
        Manager.log.debug("Setting the prevRow for last tablet: {}", stop);
        bw.addMutation(updatePrevRow);
        bw.flush();
        deleteTablets(info, scanRange, bw, client);
        // Clean-up the last chopped marker
        var m2 = new Mutation(stopRow);
        ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m2);
        bw.addMutation(m2);
        bw.flush();
    } catch (Exception ex) {
        throw new AccumuloException(ex);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) DistributedStoreException(org.apache.accumulo.server.manager.state.DistributedStoreException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) NotServingTabletException(org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException) WalMarkerException(org.apache.accumulo.server.log.WalStateManager.WalMarkerException) BadLocationStateException(org.apache.accumulo.core.metadata.TabletLocationState.BadLocationStateException) TException(org.apache.thrift.TException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MetadataTime(org.apache.accumulo.core.metadata.schema.MetadataTime) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) HashSet(java.util.HashSet)

Example 3 with MetadataTime

use of org.apache.accumulo.core.metadata.schema.MetadataTime in project accumulo by apache.

the class TabletGroupWatcher method deleteTablets.

private void deleteTablets(MergeInfo info) throws AccumuloException {
    KeyExtent extent = info.getExtent();
    String targetSystemTable = extent.isMeta() ? RootTable.NAME : MetadataTable.NAME;
    Manager.log.debug("Deleting tablets for {}", extent);
    MetadataTime metadataTime = null;
    KeyExtent followingTablet = null;
    if (extent.endRow() != null) {
        Key nextExtent = new Key(extent.endRow()).followingKey(PartialKey.ROW);
        followingTablet = getHighTablet(new KeyExtent(extent.tableId(), nextExtent.getRow(), extent.endRow()));
        Manager.log.debug("Found following tablet {}", followingTablet);
    }
    try {
        AccumuloClient client = manager.getContext();
        ServerContext context = manager.getContext();
        Ample ample = context.getAmple();
        Text start = extent.prevEndRow();
        if (start == null) {
            start = new Text();
        }
        Manager.log.debug("Making file deletion entries for {}", extent);
        Range deleteRange = new Range(TabletsSection.encodeRow(extent.tableId(), start), false, TabletsSection.encodeRow(extent.tableId(), extent.endRow()), true);
        Scanner scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY);
        scanner.setRange(deleteRange);
        ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
        ServerColumnFamily.TIME_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        scanner.fetchColumnFamily(CurrentLocationColumnFamily.NAME);
        Set<String> datafiles = new TreeSet<>();
        for (Entry<Key, Value> entry : scanner) {
            Key key = entry.getKey();
            if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
                datafiles.add(TabletFileUtil.validate(key.getColumnQualifierData().toString()));
                if (datafiles.size() > 1000) {
                    ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
                    datafiles.clear();
                }
            } else if (ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
                metadataTime = MetadataTime.parse(entry.getValue().toString());
            } else if (key.compareColumnFamily(CurrentLocationColumnFamily.NAME) == 0) {
                throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
            } else if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                String path = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(extent.tableId(), entry.getValue().toString());
                datafiles.add(path);
                if (datafiles.size() > 1000) {
                    ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
                    datafiles.clear();
                }
            }
        }
        ample.putGcFileAndDirCandidates(extent.tableId(), datafiles);
        BatchWriter bw = client.createBatchWriter(targetSystemTable);
        try {
            deleteTablets(info, deleteRange, bw, client);
        } finally {
            bw.close();
        }
        if (followingTablet != null) {
            Manager.log.debug("Updating prevRow of {} to {}", followingTablet, extent.prevEndRow());
            bw = client.createBatchWriter(targetSystemTable);
            try {
                Mutation m = new Mutation(followingTablet.toMetaRow());
                TabletColumnFamily.PREV_ROW_COLUMN.put(m, TabletColumnFamily.encodePrevEndRow(extent.prevEndRow()));
                ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
                bw.addMutation(m);
                bw.flush();
            } finally {
                bw.close();
            }
        } else {
            // Recreate the default tablet to hold the end of the table
            MetadataTableUtil.addTablet(new KeyExtent(extent.tableId(), null, extent.prevEndRow()), ServerColumnFamily.DEFAULT_TABLET_DIR_NAME, manager.getContext(), metadataTime.getType(), manager.managerLock);
        }
    } catch (RuntimeException | TableNotFoundException ex) {
        throw new AccumuloException(ex);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ServerContext(org.apache.accumulo.server.ServerContext) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) Ample(org.apache.accumulo.core.metadata.schema.Ample) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MetadataTime(org.apache.accumulo.core.metadata.schema.MetadataTime) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 4 with MetadataTime

use of org.apache.accumulo.core.metadata.schema.MetadataTime in project accumulo by apache.

the class SplitRecoveryIT method runSplitRecoveryTest.

private void runSplitRecoveryTest(ServerContext context, int failPoint, String mr, int extentToSplit, ServiceLock zl, KeyExtent... extents) throws Exception {
    Text midRow = new Text(mr);
    SortedMap<StoredTabletFile, DataFileValue> splitMapFiles = null;
    for (int i = 0; i < extents.length; i++) {
        KeyExtent extent = extents[i];
        String dirName = "dir_" + i;
        String tdir = context.getTablesDirs().iterator().next() + "/" + extent.tableId() + "/" + dirName;
        MetadataTableUtil.addTablet(extent, dirName, context, TimeType.LOGICAL, zl);
        SortedMap<TabletFile, DataFileValue> mapFiles = new TreeMap<>();
        mapFiles.put(new TabletFile(new Path(tdir + "/" + RFile.EXTENSION + "_000_000")), new DataFileValue(1000017 + i, 10000 + i));
        int tid = 0;
        TransactionWatcher.ZooArbitrator.start(context, Constants.BULK_ARBITRATOR_TYPE, tid);
        SortedMap<StoredTabletFile, DataFileValue> storedFiles = new TreeMap<>(MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, new MetadataTime(0, TimeType.LOGICAL), context, zl));
        if (i == extentToSplit) {
            splitMapFiles = storedFiles;
        }
    }
    KeyExtent extent = extents[extentToSplit];
    KeyExtent high = new KeyExtent(extent.tableId(), extent.endRow(), midRow);
    KeyExtent low = new KeyExtent(extent.tableId(), midRow, extent.prevEndRow());
    splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl);
}
Also used : Path(org.apache.hadoop.fs.Path) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) MetadataTime(org.apache.accumulo.core.metadata.schema.MetadataTime)

Example 5 with MetadataTime

use of org.apache.accumulo.core.metadata.schema.MetadataTime in project accumulo by apache.

the class LogicalTimeTest method setUp.

@Before
public void setUp() {
    MetadataTime mTime = MetadataTime.parse("L1234");
    ltime = (LogicalTime) TabletTime.getInstance(mTime);
}
Also used : MetadataTime(org.apache.accumulo.core.metadata.schema.MetadataTime) Before(org.junit.Before)

Aggregations

MetadataTime (org.apache.accumulo.core.metadata.schema.MetadataTime)13 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)6 Text (org.apache.hadoop.io.Text)6 Value (org.apache.accumulo.core.data.Value)4 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)4 IOException (java.io.IOException)3 TreeMap (java.util.TreeMap)3 Mutation (org.apache.accumulo.core.data.Mutation)3 StoredTabletFile (org.apache.accumulo.core.metadata.StoredTabletFile)3 TabletFile (org.apache.accumulo.core.metadata.TabletFile)3 Test (org.junit.Test)3 ArrayList (java.util.ArrayList)2 HashSet (java.util.HashSet)2 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)2 AccumuloException (org.apache.accumulo.core.client.AccumuloException)2 BatchWriter (org.apache.accumulo.core.client.BatchWriter)2 Scanner (org.apache.accumulo.core.client.Scanner)2 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)2 Key (org.apache.accumulo.core.data.Key)2 PartialKey (org.apache.accumulo.core.data.PartialKey)2