use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.
the class MasterMetadataUtil method fixSplit.
private static KeyExtent fixSplit(ClientContext context, Table.ID tableId, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
if (metadataPrevEndRow == null)
// prev end row....
throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
// check to see if prev tablet exist in metadata tablet
Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(tableId, metadataPrevEndRow)));
try (ScannerImpl scanner2 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
VolumeManager fs = VolumeManagerImpl.get();
if (!scanner2.iterator().hasNext()) {
log.info("Rolling back incomplete split {} {}", metadataEntry, metadataPrevEndRow);
MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, lock);
return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
} else {
log.info("Finishing incomplete split {} {}", metadataEntry, metadataPrevEndRow);
List<FileRef> highDatafilesToRemove = new ArrayList<>();
SortedMap<FileRef, DataFileValue> origDatafileSizes = new TreeMap<>();
SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
try (Scanner scanner3 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
Key rowKey = new Key(metadataEntry);
scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
for (Entry<Key, Value> entry : scanner3) {
if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
}
}
}
MetadataTableUtil.splitDatafiles(metadataPrevEndRow, splitRatio, new HashMap<>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, context, lock);
return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
}
}
}
use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.
the class MetadataTableUtil method checkClone.
@VisibleForTesting
public static int checkClone(String tableName, Table.ID srcTableId, Table.ID tableId, Connector conn, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException {
TabletIterator srcIter = new TabletIterator(createCloneScanner(tableName, srcTableId, conn), new KeyExtent(srcTableId, null, null).toMetadataRange(), true, true);
TabletIterator cloneIter = new TabletIterator(createCloneScanner(tableName, tableId, conn), new KeyExtent(tableId, null, null).toMetadataRange(), true, true);
if (!cloneIter.hasNext() || !srcIter.hasNext())
throw new RuntimeException(" table deleted during clone? srcTableId = " + srcTableId + " tableId=" + tableId);
int rewrites = 0;
while (cloneIter.hasNext()) {
Map<Key, Value> cloneTablet = cloneIter.next();
Text cloneEndRow = new KeyExtent(cloneTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
HashSet<String> cloneFiles = new HashSet<>();
boolean cloneSuccessful = false;
for (Entry<Key, Value> entry : cloneTablet.entrySet()) {
if (entry.getKey().getColumnFamily().equals(ClonedColumnFamily.NAME)) {
cloneSuccessful = true;
break;
}
}
if (!cloneSuccessful)
getFiles(cloneFiles, cloneTablet, null);
List<Map<Key, Value>> srcTablets = new ArrayList<>();
Map<Key, Value> srcTablet = srcIter.next();
srcTablets.add(srcTablet);
Text srcEndRow = new KeyExtent(srcTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
int cmp = compareEndRows(cloneEndRow, srcEndRow);
if (cmp < 0)
throw new TabletIterator.TabletDeletedException("Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);
HashSet<String> srcFiles = new HashSet<>();
if (!cloneSuccessful)
getFiles(srcFiles, srcTablet, srcTableId);
while (cmp > 0) {
srcTablet = srcIter.next();
srcTablets.add(srcTablet);
srcEndRow = new KeyExtent(srcTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
cmp = compareEndRows(cloneEndRow, srcEndRow);
if (cmp < 0)
throw new TabletIterator.TabletDeletedException("Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);
if (!cloneSuccessful)
getFiles(srcFiles, srcTablet, srcTableId);
}
if (cloneSuccessful)
continue;
if (!srcFiles.containsAll(cloneFiles)) {
// delete existing cloned tablet entry
Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());
for (Entry<Key, Value> entry : cloneTablet.entrySet()) {
Key k = entry.getKey();
m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp());
}
bw.addMutation(m);
for (Map<Key, Value> st : srcTablets) bw.addMutation(createCloneMutation(srcTableId, tableId, st));
rewrites++;
} else {
// write out marker that this tablet was successfully cloned
Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());
m.put(ClonedColumnFamily.NAME, new Text(""), new Value("OK".getBytes(UTF_8)));
bw.addMutation(m);
}
}
bw.flush();
return rewrites;
}
use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.
the class MetadataTableUtil method deleteTable.
public static void deleteTable(Table.ID tableId, boolean insertDeletes, ClientContext context, ZooLock lock) throws AccumuloException, IOException {
try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
// scan metadata for our table and delete everything we find
Mutation m = null;
ms.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
// insert deletes before deleting data from metadata... this makes the code fault tolerant
if (insertDeletes) {
ms.fetchColumnFamily(DataFileColumnFamily.NAME);
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
}
if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
}
}
bw.flush();
ms.clearColumns();
}
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (m == null) {
m = new Mutation(key.getRow());
if (lock != null)
putLockID(lock, m);
}
if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
bw.addMutation(m);
m = new Mutation(key.getRow());
if (lock != null)
putLockID(lock, m);
}
m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
}
if (m != null)
bw.addMutation(m);
}
}
use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.
the class MetadataTableUtil method createCloneMutation.
private static Mutation createCloneMutation(Table.ID srcTableId, Table.ID tableId, Map<Key, Value> tablet) {
KeyExtent ke = new KeyExtent(tablet.keySet().iterator().next().getRow(), (Text) null);
Mutation m = new Mutation(KeyExtent.getMetadataEntry(tableId, ke.getEndRow()));
for (Entry<Key, Value> entry : tablet.entrySet()) {
if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
String cf = entry.getKey().getColumnQualifier().toString();
if (!cf.startsWith("../") && !cf.contains(":"))
cf = "../" + srcTableId + entry.getKey().getColumnQualifier();
m.put(entry.getKey().getColumnFamily(), new Text(cf), entry.getValue());
} else if (entry.getKey().getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
m.put(TabletsSection.LastLocationColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
} else if (entry.getKey().getColumnFamily().equals(TabletsSection.LastLocationColumnFamily.NAME)) {
// skip
} else {
m.put(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier(), entry.getValue());
}
}
return m;
}
use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.
the class TableDiskUsage method getDiskUsage.
public static Map<TreeSet<String>, Long> getDiskUsage(Set<Table.ID> tableIds, VolumeManager fs, Connector conn) throws IOException {
TableDiskUsage tdu = new TableDiskUsage();
// Add each tableID
for (Table.ID tableId : tableIds) tdu.addTable(tableId);
HashSet<Table.ID> tablesReferenced = new HashSet<>(tableIds);
HashSet<Table.ID> emptyTableIds = new HashSet<>();
HashSet<String> nameSpacesReferenced = new HashSet<>();
// For each table ID
for (Table.ID tableId : tableIds) {
Scanner mdScanner;
try {
mdScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
} catch (TableNotFoundException e) {
throw new RuntimeException(e);
}
mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
mdScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
if (!mdScanner.iterator().hasNext()) {
emptyTableIds.add(tableId);
}
// Read each file referenced by that table
for (Entry<Key, Value> entry : mdScanner) {
String file = entry.getKey().getColumnQualifier().toString();
String[] parts = file.split("/");
// the filename
String uniqueName = parts[parts.length - 1];
if (file.contains(":") || file.startsWith("../")) {
String ref = parts[parts.length - 3];
// Track any tables which are referenced externally by the current table
if (!ref.equals(tableId.canonicalID())) {
tablesReferenced.add(Table.ID.of(ref));
}
if (file.contains(":") && parts.length > 3) {
List<String> base = Arrays.asList(Arrays.copyOf(parts, parts.length - 3));
nameSpacesReferenced.add(Joiner.on("/").join(base));
}
}
// add this file to this table
tdu.linkFileAndTable(tableId, uniqueName);
}
}
// Each table seen (provided by user, or reference by table the user provided)
for (Table.ID tableId : tablesReferenced) {
for (String tableDir : nameSpacesReferenced) {
// Find each file and add its size
FileStatus[] files = fs.globStatus(new Path(tableDir + "/" + tableId + "/*/*"));
if (files != null) {
for (FileStatus fileStatus : files) {
// Assumes that all filenames are unique
String name = fileStatus.getPath().getName();
tdu.addFileSize(name, fileStatus.getLen());
}
}
}
}
Map<Table.ID, String> reverseTableIdMap = Tables.getIdToNameMap(conn.getInstance());
TreeMap<TreeSet<String>, Long> usage = new TreeMap<>((o1, o2) -> {
int len1 = o1.size();
int len2 = o2.size();
int min = Math.min(len1, len2);
Iterator<String> iter1 = o1.iterator();
Iterator<String> iter2 = o2.iterator();
int count = 0;
while (count < min) {
String s1 = iter1.next();
String s2 = iter2.next();
int cmp = s1.compareTo(s2);
if (cmp != 0)
return cmp;
count++;
}
return len1 - len2;
});
for (Entry<List<Table.ID>, Long> entry : tdu.calculateUsage().entrySet()) {
TreeSet<String> tableNames = new TreeSet<>();
// Convert size shared by each table id into size shared by each table name
for (Table.ID tableId : entry.getKey()) tableNames.add(reverseTableIdMap.get(tableId));
// Make table names to shared file size
usage.put(tableNames, entry.getValue());
}
if (!emptyTableIds.isEmpty()) {
TreeSet<String> emptyTables = new TreeSet<>();
for (Table.ID tableId : emptyTableIds) {
emptyTables.add(reverseTableIdMap.get(tableId));
}
usage.put(emptyTables, 0L);
}
return usage;
}
Aggregations