use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class BasicCompactionStrategy method calculateTotalSize.
/**
* Calculates the total size of input files in the compaction plan
*/
private Long calculateTotalSize(MajorCompactionRequest request, CompactionPlan plan) {
long totalSize = 0;
Map<StoredTabletFile, DataFileValue> allFiles = request.getFiles();
for (StoredTabletFile fileRef : plan.inputFiles) {
totalSize += allFiles.get(fileRef).getSize();
}
return totalSize;
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class VolumeIT method testReplaceVolume.
private void testReplaceVolume(AccumuloClient client, boolean cleanShutdown) throws Exception {
String[] tableNames = getUniqueNames(3);
verifyVolumesUsed(client, tableNames[0], false, v1, v2);
// write to 2nd table, but do not flush data to disk before shutdown
try (AccumuloClient c2 = cluster.createAccumuloClient("root", new PasswordToken(ROOT_PASSWORD))) {
writeData(tableNames[1], c2);
}
if (cleanShutdown)
assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
cluster.stop();
File v1f = new File(v1.toUri());
File v8f = new File(new File(v1.getParent().toUri()), "v8");
assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
Path v8 = new Path(v8f.toURI());
File v2f = new File(v2.toUri());
File v9f = new File(new File(v2.getParent().toUri()), "v9");
assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
Path v9 = new Path(v9f.toURI());
updateConfig(config -> {
config.setProperty(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
config.setProperty(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
});
// start cluster and verify that volumes were replaced
cluster.start();
verifyVolumesUsed(client, tableNames[0], true, v8, v9);
verifyVolumesUsed(client, tableNames[1], true, v8, v9);
// verify writes to new dir
client.tableOperations().compact(tableNames[0], null, null, true, true);
client.tableOperations().compact(tableNames[1], null, null, true, true);
verifyVolumesUsed(client, tableNames[0], true, v8, v9);
verifyVolumesUsed(client, tableNames[1], true, v8, v9);
client.tableOperations().compact(RootTable.NAME, new CompactionConfig().setWait(true));
// check that root tablet is not on volume 1 or 2
int count = 0;
for (StoredTabletFile file : ((ClientContext) client).getAmple().readTablet(RootTable.EXTENT).getFiles()) {
assertTrue(file.getMetaUpdateDelete().startsWith(v8.toString()) || file.getMetaUpdateDelete().startsWith(v9.toString()));
count++;
}
assertTrue(count > 0);
client.tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<>(), new HashSet<>());
client.tableOperations().flush(MetadataTable.NAME, null, null, true);
client.tableOperations().flush(RootTable.NAME, null, null, true);
verifyVolumesUsed(client, tableNames[0], true, v8, v9);
verifyVolumesUsed(client, tableNames[1], true, v8, v9);
verifyVolumesUsed(client, tableNames[2], true, v8, v9);
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class SplitRecoveryIT method splitPartiallyAndRecover.
private void splitPartiallyAndRecover(ServerContext context, KeyExtent extent, KeyExtent high, KeyExtent low, double splitRatio, SortedMap<StoredTabletFile, DataFileValue> mapFiles, Text midRow, String location, int steps, ServiceLock zl) throws Exception {
SortedMap<StoredTabletFile, DataFileValue> lowDatafileSizes = new TreeMap<>();
SortedMap<StoredTabletFile, DataFileValue> highDatafileSizes = new TreeMap<>();
List<StoredTabletFile> highDatafilesToRemove = new ArrayList<>();
MetadataTableUtil.splitDatafiles(midRow, splitRatio, new HashMap<>(), mapFiles, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
MetadataTableUtil.splitTablet(high, extent.prevEndRow(), splitRatio, context, zl, Set.of());
TServerInstance instance = new TServerInstance(location, zl.getSessionId());
Assignment assignment = new Assignment(high, instance);
TabletMutator tabletMutator = context.getAmple().mutateTablet(extent);
tabletMutator.putLocation(assignment.server, LocationType.FUTURE);
tabletMutator.mutate();
if (steps >= 1) {
Map<Long, List<TabletFile>> bulkFiles = getBulkFilesLoaded(context, high);
ManagerMetadataUtil.addNewTablet(context, low, "lowDir", instance, lowDatafileSizes, bulkFiles, new MetadataTime(0, TimeType.LOGICAL), -1L, -1L, zl);
}
if (steps >= 2) {
MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, context, zl);
}
TabletMetadata meta = context.getAmple().readTablet(high);
KeyExtent fixedExtent = ManagerMetadataUtil.fixSplit(context, meta, zl);
if (steps < 2)
assertEquals(splitRatio, meta.getSplitRatio(), 0.0);
if (steps >= 1) {
assertEquals(high, fixedExtent);
ensureTabletHasNoUnexpectedMetadataEntries(context, low, lowDatafileSizes);
ensureTabletHasNoUnexpectedMetadataEntries(context, high, highDatafileSizes);
Map<Long, ? extends Collection<TabletFile>> lowBulkFiles = getBulkFilesLoaded(context, low);
Map<Long, ? extends Collection<TabletFile>> highBulkFiles = getBulkFilesLoaded(context, high);
if (!lowBulkFiles.equals(highBulkFiles)) {
throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
}
if (lowBulkFiles.isEmpty()) {
throw new Exception(" no bulk files " + low);
}
} else {
assertEquals(extent, fixedExtent);
ensureTabletHasNoUnexpectedMetadataEntries(context, extent, mapFiles);
}
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class SplitRecoveryIT method ensureTabletHasNoUnexpectedMetadataEntries.
private void ensureTabletHasNoUnexpectedMetadataEntries(ServerContext context, KeyExtent extent, SortedMap<StoredTabletFile, DataFileValue> expectedMapFiles) throws Exception {
try (Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
scanner.setRange(extent.toMetaRange());
HashSet<ColumnFQ> expectedColumns = new HashSet<>();
expectedColumns.add(ServerColumnFamily.DIRECTORY_COLUMN);
expectedColumns.add(TabletColumnFamily.PREV_ROW_COLUMN);
expectedColumns.add(ServerColumnFamily.TIME_COLUMN);
expectedColumns.add(ServerColumnFamily.LOCK_COLUMN);
HashSet<Text> expectedColumnFamilies = new HashSet<>();
expectedColumnFamilies.add(DataFileColumnFamily.NAME);
expectedColumnFamilies.add(FutureLocationColumnFamily.NAME);
expectedColumnFamilies.add(CurrentLocationColumnFamily.NAME);
expectedColumnFamilies.add(LastLocationColumnFamily.NAME);
expectedColumnFamilies.add(BulkFileColumnFamily.NAME);
Iterator<Entry<Key, Value>> iter = scanner.iterator();
boolean sawPer = false;
while (iter.hasNext()) {
Entry<Key, Value> entry = iter.next();
Key key = entry.getKey();
if (!key.getRow().equals(extent.toMetaRow())) {
throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
}
if (TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
sawPer = true;
if (!KeyExtent.fromMetaPrevRow(entry).equals(extent)) {
throw new Exception("Unexpected prev end row " + entry);
}
}
if (expectedColumnFamilies.contains(key.getColumnFamily())) {
continue;
}
if (expectedColumns.remove(new ColumnFQ(key))) {
continue;
}
throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
}
if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
}
assertTrue(sawPer);
SortedMap<StoredTabletFile, DataFileValue> fixedMapFiles = MetadataTableUtil.getFileAndLogEntries(context, extent).getSecond();
verifySame(expectedMapFiles, fixedMapFiles);
}
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class SplitRecoveryIT method verifySame.
private void verifySame(SortedMap<StoredTabletFile, DataFileValue> datafileSizes, SortedMap<StoredTabletFile, DataFileValue> fixedDatafileSizes) throws Exception {
if (!datafileSizes.keySet().containsAll(fixedDatafileSizes.keySet()) || !fixedDatafileSizes.keySet().containsAll(datafileSizes.keySet())) {
throw new Exception("Key sets not the same " + datafileSizes.keySet() + " != " + fixedDatafileSizes.keySet());
}
for (Entry<StoredTabletFile, DataFileValue> entry : datafileSizes.entrySet()) {
DataFileValue dfv = entry.getValue();
DataFileValue otherDfv = fixedDatafileSizes.get(entry.getKey());
if (!dfv.equals(otherDfv)) {
throw new Exception(entry.getKey() + " dfv not equal " + dfv + " " + otherDfv);
}
}
}
Aggregations