Search in sources :

Example 16 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class MetadataTableUtil method getFileAndLogEntries.

public static Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws KeeperException, InterruptedException, IOException {
    ArrayList<LogEntry> result = new ArrayList<>();
    TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
    VolumeManager fs = VolumeManagerImpl.get();
    if (extent.isRootTablet()) {
        getRootLogEntries(result);
        Path rootDir = new Path(getRootTabletDir());
        FileStatus[] files = fs.listStatus(rootDir);
        for (FileStatus fileStatus : files) {
            if (fileStatus.getPath().toString().endsWith("_tmp")) {
                continue;
            }
            DataFileValue dfv = new DataFileValue(0, 0);
            sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
        }
    } else {
        Table.ID systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
        try (Scanner scanner = new ScannerImpl(context, systemTableToCheck, Authorizations.EMPTY)) {
            scanner.fetchColumnFamily(LogColumnFamily.NAME);
            scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
            scanner.setRange(extent.toMetadataRange());
            for (Entry<Key, Value> entry : scanner) {
                if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
                    throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
                }
                if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
                    result.add(LogEntry.fromKeyValue(entry.getKey(), entry.getValue()));
                } else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                    DataFileValue dfv = new DataFileValue(entry.getValue().get());
                    sizes.put(new FileRef(fs, entry.getKey()), dfv);
                } else {
                    throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
                }
            }
        }
    }
    return new Pair<>(result, sizes);
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileStatus(org.apache.hadoop.fs.FileStatus) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) Pair(org.apache.accumulo.core.util.Pair)

Example 17 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class ListVolumesUsed method listZookeeper.

private static void listZookeeper() throws Exception {
    System.out.println("Listing volumes referenced in zookeeper");
    TreeSet<String> volumes = new TreeSet<>();
    volumes.add(getTableURI(MetadataTableUtil.getRootTabletDir()));
    ArrayList<LogEntry> result = new ArrayList<>();
    MetadataTableUtil.getRootLogEntries(result);
    for (LogEntry logEntry : result) {
        getLogURIs(volumes, logEntry);
    }
    for (String volume : volumes) System.out.println("\tVolume : " + volume);
}
Also used : TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry)

Example 18 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class ListVolumesUsed method listTable.

private static void listTable(String name, Connector conn) throws Exception {
    System.out.println("Listing volumes referenced in " + name + " tablets section");
    Scanner scanner = conn.createScanner(name, Authorizations.EMPTY);
    scanner.setRange(MetadataSchema.TabletsSection.getRange());
    scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
    scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LogColumnFamily.NAME);
    MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
    TreeSet<String> volumes = new TreeSet<>();
    for (Entry<Key, Value> entry : scanner) {
        if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
            volumes.add(getTableURI(entry.getKey().getColumnQualifier().toString()));
        } else if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.LogColumnFamily.NAME)) {
            LogEntry le = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
            getLogURIs(volumes, le);
        } else if (MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
            volumes.add(getTableURI(entry.getValue().toString()));
        }
    }
    for (String volume : volumes) System.out.println("\tVolume : " + volume);
    volumes.clear();
    scanner.clearColumns();
    scanner.setRange(MetadataSchema.DeletesSection.getRange());
    for (Entry<Key, Value> entry : scanner) {
        String delPath = entry.getKey().getRow().toString().substring(MetadataSchema.DeletesSection.getRowPrefix().length());
        volumes.add(getTableURI(delPath));
    }
    System.out.println("Listing volumes referenced in " + name + " deletes section (volume replacement occurrs at deletion time)");
    for (String volume : volumes) System.out.println("\tVolume : " + volume);
    volumes.clear();
    WalStateManager wals = new WalStateManager(conn.getInstance(), ZooReaderWriter.getInstance());
    for (Path path : wals.getAllState().keySet()) {
        volumes.add(getLogURI(path.toString()));
    }
    System.out.println("Listing volumes referenced in " + name + " current logs");
    for (String volume : volumes) System.out.println("\tVolume : " + volume);
}
Also used : Path(org.apache.hadoop.fs.Path) Scanner(org.apache.accumulo.core.client.Scanner) TreeSet(java.util.TreeSet) WalStateManager(org.apache.accumulo.server.log.WalStateManager) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry)

Example 19 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class ReplicationOperationsImpl method referencedFiles.

@Override
public Set<String> referencedFiles(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
    requireNonNull(tableName);
    log.debug("Collecting referenced files for replication of table {}", tableName);
    Connector conn = context.getConnector();
    Table.ID tableId = getTableId(conn, tableName);
    log.debug("Found id of {} for name {}", tableId, tableName);
    // Get the WALs currently referenced by the table
    BatchScanner metaBs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
    metaBs.setRanges(Collections.singleton(MetadataSchema.TabletsSection.getRange(tableId)));
    metaBs.fetchColumnFamily(LogColumnFamily.NAME);
    Set<String> wals = new HashSet<>();
    try {
        for (Entry<Key, Value> entry : metaBs) {
            LogEntry logEntry = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
            wals.add(new Path(logEntry.filename).toString());
        }
    } finally {
        metaBs.close();
    }
    // And the WALs that need to be replicated for this table
    metaBs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
    metaBs.setRanges(Collections.singleton(ReplicationSection.getRange()));
    metaBs.fetchColumnFamily(ReplicationSection.COLF);
    try {
        Text buffer = new Text();
        for (Entry<Key, Value> entry : metaBs) {
            if (tableId.equals(ReplicationSection.getTableId(entry.getKey()))) {
                ReplicationSection.getFile(entry.getKey(), buffer);
                wals.add(buffer.toString());
            }
        }
    } finally {
        metaBs.close();
    }
    return wals;
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Text(org.apache.hadoop.io.Text) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) HashSet(java.util.HashSet)

Aggregations

LogEntry (org.apache.accumulo.core.tabletserver.log.LogEntry)19 Path (org.apache.hadoop.fs.Path)10 ArrayList (java.util.ArrayList)7 Mutation (org.apache.accumulo.core.data.Mutation)7 Value (org.apache.accumulo.core.data.Value)7 Text (org.apache.hadoop.io.Text)7 Scanner (org.apache.accumulo.core.client.Scanner)6 Key (org.apache.accumulo.core.data.Key)6 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)6 BatchWriter (org.apache.accumulo.core.client.BatchWriter)5 Table (org.apache.accumulo.core.client.impl.Table)5 Test (org.junit.Test)5 IOException (java.io.IOException)4 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)4 Connector (org.apache.accumulo.core.client.Connector)4 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)4 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)3 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)3 Status (org.apache.accumulo.server.replication.proto.Replication.Status)3 File (java.io.File)2