use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.
the class MetadataTableUtil method getFileAndLogEntries.
public static Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws KeeperException, InterruptedException, IOException {
ArrayList<LogEntry> result = new ArrayList<>();
TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
VolumeManager fs = VolumeManagerImpl.get();
if (extent.isRootTablet()) {
getRootLogEntries(result);
Path rootDir = new Path(getRootTabletDir());
FileStatus[] files = fs.listStatus(rootDir);
for (FileStatus fileStatus : files) {
if (fileStatus.getPath().toString().endsWith("_tmp")) {
continue;
}
DataFileValue dfv = new DataFileValue(0, 0);
sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
}
} else {
Table.ID systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
try (Scanner scanner = new ScannerImpl(context, systemTableToCheck, Authorizations.EMPTY)) {
scanner.fetchColumnFamily(LogColumnFamily.NAME);
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner.setRange(extent.toMetadataRange());
for (Entry<Key, Value> entry : scanner) {
if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
}
if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
result.add(LogEntry.fromKeyValue(entry.getKey(), entry.getValue()));
} else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
DataFileValue dfv = new DataFileValue(entry.getValue().get());
sizes.put(new FileRef(fs, entry.getKey()), dfv);
} else {
throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
}
}
}
}
return new Pair<>(result, sizes);
}
use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.
the class ListVolumesUsed method listZookeeper.
private static void listZookeeper() throws Exception {
System.out.println("Listing volumes referenced in zookeeper");
TreeSet<String> volumes = new TreeSet<>();
volumes.add(getTableURI(MetadataTableUtil.getRootTabletDir()));
ArrayList<LogEntry> result = new ArrayList<>();
MetadataTableUtil.getRootLogEntries(result);
for (LogEntry logEntry : result) {
getLogURIs(volumes, logEntry);
}
for (String volume : volumes) System.out.println("\tVolume : " + volume);
}
use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.
the class ListVolumesUsed method listTable.
private static void listTable(String name, Connector conn) throws Exception {
System.out.println("Listing volumes referenced in " + name + " tablets section");
Scanner scanner = conn.createScanner(name, Authorizations.EMPTY);
scanner.setRange(MetadataSchema.TabletsSection.getRange());
scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LogColumnFamily.NAME);
MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
TreeSet<String> volumes = new TreeSet<>();
for (Entry<Key, Value> entry : scanner) {
if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
volumes.add(getTableURI(entry.getKey().getColumnQualifier().toString()));
} else if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.LogColumnFamily.NAME)) {
LogEntry le = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
getLogURIs(volumes, le);
} else if (MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
volumes.add(getTableURI(entry.getValue().toString()));
}
}
for (String volume : volumes) System.out.println("\tVolume : " + volume);
volumes.clear();
scanner.clearColumns();
scanner.setRange(MetadataSchema.DeletesSection.getRange());
for (Entry<Key, Value> entry : scanner) {
String delPath = entry.getKey().getRow().toString().substring(MetadataSchema.DeletesSection.getRowPrefix().length());
volumes.add(getTableURI(delPath));
}
System.out.println("Listing volumes referenced in " + name + " deletes section (volume replacement occurrs at deletion time)");
for (String volume : volumes) System.out.println("\tVolume : " + volume);
volumes.clear();
WalStateManager wals = new WalStateManager(conn.getInstance(), ZooReaderWriter.getInstance());
for (Path path : wals.getAllState().keySet()) {
volumes.add(getLogURI(path.toString()));
}
System.out.println("Listing volumes referenced in " + name + " current logs");
for (String volume : volumes) System.out.println("\tVolume : " + volume);
}
use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.
the class ReplicationOperationsImpl method referencedFiles.
@Override
public Set<String> referencedFiles(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
requireNonNull(tableName);
log.debug("Collecting referenced files for replication of table {}", tableName);
Connector conn = context.getConnector();
Table.ID tableId = getTableId(conn, tableName);
log.debug("Found id of {} for name {}", tableId, tableName);
// Get the WALs currently referenced by the table
BatchScanner metaBs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
metaBs.setRanges(Collections.singleton(MetadataSchema.TabletsSection.getRange(tableId)));
metaBs.fetchColumnFamily(LogColumnFamily.NAME);
Set<String> wals = new HashSet<>();
try {
for (Entry<Key, Value> entry : metaBs) {
LogEntry logEntry = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
wals.add(new Path(logEntry.filename).toString());
}
} finally {
metaBs.close();
}
// And the WALs that need to be replicated for this table
metaBs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
metaBs.setRanges(Collections.singleton(ReplicationSection.getRange()));
metaBs.fetchColumnFamily(ReplicationSection.COLF);
try {
Text buffer = new Text();
for (Entry<Key, Value> entry : metaBs) {
if (tableId.equals(ReplicationSection.getTableId(entry.getKey()))) {
ReplicationSection.getFile(entry.getKey(), buffer);
wals.add(buffer.toString());
}
}
} finally {
metaBs.close();
}
return wals;
}
Aggregations