use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class SplitRecoveryIT method runSplitRecoveryTest.
private void runSplitRecoveryTest(AccumuloServerContext context, int failPoint, String mr, int extentToSplit, ZooLock zl, KeyExtent... extents) throws Exception {
Text midRow = new Text(mr);
SortedMap<FileRef, DataFileValue> splitMapFiles = null;
for (int i = 0; i < extents.length; i++) {
KeyExtent extent = extents[i];
String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId() + "/dir_" + i;
MetadataTableUtil.addTablet(extent, tdir, context, TabletTime.LOGICAL_TIME_ID, zl);
SortedMap<FileRef, DataFileValue> mapFiles = new TreeMap<>();
mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i));
if (i == extentToSplit) {
splitMapFiles = mapFiles;
}
int tid = 0;
TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", context, zl);
}
KeyExtent extent = extents[extentToSplit];
KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow);
KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow());
splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl);
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class SplitRecoveryIT method ensureTabletHasNoUnexpectedMetadataEntries.
private void ensureTabletHasNoUnexpectedMetadataEntries(AccumuloServerContext context, KeyExtent extent, SortedMap<FileRef, DataFileValue> expectedMapFiles) throws Exception {
try (Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
scanner.setRange(extent.toMetadataRange());
HashSet<ColumnFQ> expectedColumns = new HashSet<>();
expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
HashSet<Text> expectedColumnFamilies = new HashSet<>();
expectedColumnFamilies.add(DataFileColumnFamily.NAME);
expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
Iterator<Entry<Key, Value>> iter = scanner.iterator();
while (iter.hasNext()) {
Key key = iter.next().getKey();
if (!key.getRow().equals(extent.getMetadataEntry())) {
throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
}
if (expectedColumnFamilies.contains(key.getColumnFamily())) {
continue;
}
if (expectedColumns.remove(new ColumnFQ(key))) {
continue;
}
throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
}
System.out.println("expectedColumns " + expectedColumns);
if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
}
SortedMap<FileRef, DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, context);
verifySame(expectedMapFiles, fixedMapFiles);
}
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class SplitRecoveryIT method splitPartiallyAndRecover.
private void splitPartiallyAndRecover(AccumuloServerContext context, KeyExtent extent, KeyExtent high, KeyExtent low, double splitRatio, SortedMap<FileRef, DataFileValue> mapFiles, Text midRow, String location, int steps, ZooLock zl) throws Exception {
SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
List<FileRef> highDatafilesToRemove = new ArrayList<>();
MetadataTableUtil.splitDatafiles(midRow, splitRatio, new HashMap<>(), mapFiles, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, context, zl);
TServerInstance instance = new TServerInstance(location, zl.getSessionId());
Writer writer = MetadataTableUtil.getMetadataTable(context);
Assignment assignment = new Assignment(high, instance);
Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
assignment.server.putFutureLocation(m);
writer.update(m);
if (steps >= 1) {
Map<Long, ? extends Collection<FileRef>> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, extent);
MasterMetadataUtil.addNewTablet(context, low, "/lowDir", instance, lowDatafileSizes, bulkFiles, TabletTime.LOGICAL_TIME_ID + "0", -1l, -1l, zl);
}
if (steps >= 2) {
MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, context, zl);
}
TabletServer.verifyTabletInformation(context, high, instance, new TreeMap<>(), "127.0.0.1:0", zl);
if (steps >= 1) {
ensureTabletHasNoUnexpectedMetadataEntries(context, low, lowDatafileSizes);
ensureTabletHasNoUnexpectedMetadataEntries(context, high, highDatafileSizes);
Map<Long, ? extends Collection<FileRef>> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, low);
Map<Long, ? extends Collection<FileRef>> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, high);
if (!lowBulkFiles.equals(highBulkFiles)) {
throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
}
if (lowBulkFiles.size() == 0) {
throw new Exception(" no bulk files " + low);
}
} else {
ensureTabletHasNoUnexpectedMetadataEntries(context, extent, mapFiles);
}
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class Merge method getSizeIterator.
protected Iterator<Size> getSizeIterator(Connector conn, String tablename, Text start, Text end) throws MergeException {
// open up metatadata, walk through the tablets.
Table.ID tableId;
Scanner scanner;
try {
tableId = Tables.getTableId(conn.getInstance(), tablename);
scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
} catch (Exception e) {
throw new MergeException(e);
}
scanner.setRange(new KeyExtent(tableId, end, start).toMetadataRange());
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
final Iterator<Entry<Key, Value>> iterator = scanner.iterator();
Iterator<Size> result = new Iterator<Size>() {
Size next = fetch();
@Override
public boolean hasNext() {
return next != null;
}
private Size fetch() {
long tabletSize = 0;
while (iterator.hasNext()) {
Entry<Key, Value> entry = iterator.next();
Key key = entry.getKey();
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
tabletSize += new DataFileValue(entry.getValue().get()).getSize();
} else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
KeyExtent extent = new KeyExtent(key.getRow(), entry.getValue());
return new Size(extent, tabletSize);
}
}
return null;
}
@Override
public Size next() {
Size result = next;
next = fetch();
return result;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
return result;
}
Aggregations