Search in sources :

Example 6 with ScannerImpl

use of org.apache.accumulo.core.client.impl.ScannerImpl in project accumulo by apache.

the class MetadataTableUtil method getBulkFilesLoaded.

public static Map<Long, ? extends Collection<FileRef>> getBulkFilesLoaded(ClientContext context, KeyExtent extent) throws IOException {
    Text metadataRow = extent.getMetadataEntry();
    Map<Long, List<FileRef>> result = new HashMap<>();
    VolumeManager fs = VolumeManagerImpl.get();
    try (Scanner scanner = new ScannerImpl(context, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY)) {
        scanner.setRange(new Range(metadataRow));
        scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
        for (Entry<Key, Value> entry : scanner) {
            Long tid = Long.parseLong(entry.getValue().toString());
            List<FileRef> lst = result.get(tid);
            if (lst == null) {
                result.put(tid, lst = new ArrayList<>());
            }
            lst.add(new FileRef(fs, entry.getKey()));
        }
    }
    return result;
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) List(java.util.List) ArrayList(java.util.ArrayList) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 7 with ScannerImpl

use of org.apache.accumulo.core.client.impl.ScannerImpl in project accumulo by apache.

the class MetadataTableUtil method getFileAndLogEntries.

public static Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws KeeperException, InterruptedException, IOException {
    ArrayList<LogEntry> result = new ArrayList<>();
    TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
    VolumeManager fs = VolumeManagerImpl.get();
    if (extent.isRootTablet()) {
        getRootLogEntries(result);
        Path rootDir = new Path(getRootTabletDir());
        FileStatus[] files = fs.listStatus(rootDir);
        for (FileStatus fileStatus : files) {
            if (fileStatus.getPath().toString().endsWith("_tmp")) {
                continue;
            }
            DataFileValue dfv = new DataFileValue(0, 0);
            sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
        }
    } else {
        Table.ID systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
        try (Scanner scanner = new ScannerImpl(context, systemTableToCheck, Authorizations.EMPTY)) {
            scanner.fetchColumnFamily(LogColumnFamily.NAME);
            scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
            scanner.setRange(extent.toMetadataRange());
            for (Entry<Key, Value> entry : scanner) {
                if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
                    throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
                }
                if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
                    result.add(LogEntry.fromKeyValue(entry.getKey(), entry.getValue()));
                } else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                    DataFileValue dfv = new DataFileValue(entry.getValue().get());
                    sizes.put(new FileRef(fs, entry.getKey()), dfv);
                } else {
                    throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
                }
            }
        }
    }
    return new Pair<>(result, sizes);
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileStatus(org.apache.hadoop.fs.FileStatus) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) Pair(org.apache.accumulo.core.util.Pair)

Example 8 with ScannerImpl

use of org.apache.accumulo.core.client.impl.ScannerImpl in project accumulo by apache.

the class TabletServer method verifyTabletInformation.

public static Pair<Text, KeyExtent> verifyTabletInformation(AccumuloServerContext context, KeyExtent extent, TServerInstance instance, final SortedMap<Key, Value> tabletsKeyValues, String clientAddress, ZooLock lock) throws AccumuloSecurityException, DistributedStoreException, AccumuloException {
    Objects.requireNonNull(tabletsKeyValues);
    log.debug("verifying extent {}", extent);
    if (extent.isRootTablet()) {
        return verifyRootTablet(extent, instance);
    }
    Table.ID tableToVerify = MetadataTable.ID;
    if (extent.isMeta())
        tableToVerify = RootTable.ID;
    List<ColumnFQ> columnsToFetch = Arrays.asList(new ColumnFQ[] { TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN, TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN });
    TreeMap<Key, Value> tkv = new TreeMap<>();
    try (ScannerImpl scanner = new ScannerImpl(context, tableToVerify, Authorizations.EMPTY)) {
        scanner.setRange(extent.toMetadataRange());
        for (Entry<Key, Value> entry : scanner) tkv.put(entry.getKey(), entry.getValue());
    }
    // only populate map after success
    tabletsKeyValues.clear();
    tabletsKeyValues.putAll(tkv);
    Text metadataEntry = extent.getMetadataEntry();
    Value dir = checkTabletMetadata(extent, instance, tabletsKeyValues, metadataEntry);
    if (dir == null)
        return null;
    Value oldPrevEndRow = null;
    for (Entry<Key, Value> entry : tabletsKeyValues.entrySet()) {
        if (TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
            oldPrevEndRow = entry.getValue();
        }
    }
    if (oldPrevEndRow != null) {
        SortedMap<Text, SortedMap<ColumnFQ, Value>> tabletEntries;
        tabletEntries = MetadataTableUtil.getTabletEntries(tabletsKeyValues, columnsToFetch);
        KeyExtent fke;
        try {
            fke = MasterMetadataUtil.fixSplit(context, metadataEntry, tabletEntries.get(metadataEntry), instance, lock);
        } catch (IOException e) {
            log.error("Error fixing split {}", metadataEntry);
            throw new AccumuloException(e.toString());
        }
        if (!fke.equals(extent)) {
            return new Pair<>(null, fke);
        }
        // reread and reverify metadata entries now that metadata entries were fixed
        tabletsKeyValues.clear();
        return verifyTabletInformation(context, fke, instance, tabletsKeyValues, clientAddress, lock);
    }
    return new Pair<>(new Text(dir.get()), null);
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) TreeMap(java.util.TreeMap) TKeyExtent(org.apache.accumulo.core.data.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ColumnFQ(org.apache.accumulo.core.util.ColumnFQ) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) SortedMap(java.util.SortedMap) TKeyValue(org.apache.accumulo.core.data.thrift.TKeyValue) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) Pair(org.apache.accumulo.core.util.Pair)

Example 9 with ScannerImpl

use of org.apache.accumulo.core.client.impl.ScannerImpl in project accumulo by apache.

the class SplitRecoveryIT method ensureTabletHasNoUnexpectedMetadataEntries.

private void ensureTabletHasNoUnexpectedMetadataEntries(AccumuloServerContext context, KeyExtent extent, SortedMap<FileRef, DataFileValue> expectedMapFiles) throws Exception {
    try (Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        scanner.setRange(extent.toMetadataRange());
        HashSet<ColumnFQ> expectedColumns = new HashSet<>();
        expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
        expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
        expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
        expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
        HashSet<Text> expectedColumnFamilies = new HashSet<>();
        expectedColumnFamilies.add(DataFileColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
        expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
        Iterator<Entry<Key, Value>> iter = scanner.iterator();
        while (iter.hasNext()) {
            Key key = iter.next().getKey();
            if (!key.getRow().equals(extent.getMetadataEntry())) {
                throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
            }
            if (expectedColumnFamilies.contains(key.getColumnFamily())) {
                continue;
            }
            if (expectedColumns.remove(new ColumnFQ(key))) {
                continue;
            }
            throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
        }
        System.out.println("expectedColumns " + expectedColumns);
        if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
            throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
        }
        SortedMap<FileRef, DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, context);
        verifySame(expectedMapFiles, fixedMapFiles);
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) ColumnFQ(org.apache.accumulo.core.util.ColumnFQ) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) Entry(java.util.Map.Entry) FileRef(org.apache.accumulo.server.fs.FileRef) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet)

Aggregations

ScannerImpl (org.apache.accumulo.core.client.impl.ScannerImpl)9 Key (org.apache.accumulo.core.data.Key)9 Scanner (org.apache.accumulo.core.client.Scanner)8 PartialKey (org.apache.accumulo.core.data.PartialKey)7 Value (org.apache.accumulo.core.data.Value)7 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)7 IsolatedScanner (org.apache.accumulo.core.client.IsolatedScanner)6 FileRef (org.apache.accumulo.server.fs.FileRef)6 Text (org.apache.hadoop.io.Text)6 Range (org.apache.accumulo.core.data.Range)5 TreeMap (java.util.TreeMap)4 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)4 ArrayList (java.util.ArrayList)3 Table (org.apache.accumulo.core.client.impl.Table)3 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)3 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)3 RootTable (org.apache.accumulo.core.metadata.RootTable)3 AccumuloException (org.apache.accumulo.core.client.AccumuloException)2 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)2 ColumnFQ (org.apache.accumulo.core.util.ColumnFQ)2