use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class VolumeIT method testRemoveVolumes.
@Test
public void testRemoveVolumes() throws Exception {
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
String[] tableNames = getUniqueNames(2);
verifyVolumesUsed(client, tableNames[0], false, v1, v2);
assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
cluster.stop();
updateConfig(config -> config.setProperty(Property.INSTANCE_VOLUMES.getKey(), v2.toString()));
// start cluster and verify that volume was decommissioned
cluster.start();
client.tableOperations().compact(tableNames[0], null, null, true, true);
verifyVolumesUsed(client, tableNames[0], true, v2);
client.tableOperations().compact(RootTable.NAME, new CompactionConfig().setWait(true));
// check that root tablet is not on volume 1
int count = 0;
for (StoredTabletFile file : ((ClientContext) client).getAmple().readTablet(RootTable.EXTENT).getFiles()) {
assertTrue(file.getMetaUpdateDelete().startsWith(v2.toString()));
count++;
}
assertTrue(count > 0);
client.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(), new HashSet<>());
client.tableOperations().flush(MetadataTable.NAME, null, null, true);
client.tableOperations().flush(RootTable.NAME, null, null, true);
verifyVolumesUsed(client, tableNames[0], true, v2);
verifyVolumesUsed(client, tableNames[1], true, v2);
}
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class TabletMetadataTest method testAllColumns.
@Test
public void testAllColumns() {
KeyExtent extent = new KeyExtent(TableId.of("5"), new Text("df"), new Text("da"));
Mutation mutation = TabletColumnFamily.createPrevRowMutation(extent);
COMPACT_COLUMN.put(mutation, new Value("5"));
DIRECTORY_COLUMN.put(mutation, new Value("t-0001757"));
FLUSH_COLUMN.put(mutation, new Value("6"));
TIME_COLUMN.put(mutation, new Value("M123456789"));
String bf1 = "hdfs://nn1/acc/tables/1/t-0001/bf1";
String bf2 = "hdfs://nn1/acc/tables/1/t-0001/bf2";
mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf1).put(FateTxId.formatTid(56));
mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf2).put(FateTxId.formatTid(59));
mutation.at().family(ClonedColumnFamily.NAME).qualifier("").put("OK");
DataFileValue dfv1 = new DataFileValue(555, 23);
StoredTabletFile tf1 = new StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/df1.rf");
StoredTabletFile tf2 = new StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/df2.rf");
mutation.at().family(DataFileColumnFamily.NAME).qualifier(tf1.getMetaUpdateDelete()).put(dfv1.encode());
DataFileValue dfv2 = new DataFileValue(234, 13);
mutation.at().family(DataFileColumnFamily.NAME).qualifier(tf2.getMetaUpdateDelete()).put(dfv2.encode());
mutation.at().family(CurrentLocationColumnFamily.NAME).qualifier("s001").put("server1:8555");
mutation.at().family(LastLocationColumnFamily.NAME).qualifier("s000").put("server2:8555");
LogEntry le1 = new LogEntry(extent, 55, "lf1");
mutation.at().family(le1.getColumnFamily()).qualifier(le1.getColumnQualifier()).timestamp(le1.timestamp).put(le1.getValue());
LogEntry le2 = new LogEntry(extent, 57, "lf2");
mutation.at().family(le2.getColumnFamily()).qualifier(le2.getColumnQualifier()).timestamp(le2.timestamp).put(le2.getValue());
StoredTabletFile sf1 = new StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/sf1.rf");
StoredTabletFile sf2 = new StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/sf2.rf");
mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf1.getMetaUpdateDelete()).put("");
mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf2.getMetaUpdateDelete()).put("");
SortedMap<Key, Value> rowMap = toRowMap(mutation);
TabletMetadata tm = TabletMetadata.convertRow(rowMap.entrySet().iterator(), EnumSet.allOf(ColumnType.class), true);
assertEquals("OK", tm.getCloned());
assertEquals(5L, tm.getCompactId().getAsLong());
assertEquals("t-0001757", tm.getDirName());
assertEquals(extent.endRow(), tm.getEndRow());
assertEquals(extent, tm.getExtent());
assertEquals(Set.of(tf1, tf2), Set.copyOf(tm.getFiles()));
assertEquals(Map.of(tf1, dfv1, tf2, dfv2), tm.getFilesMap());
assertEquals(6L, tm.getFlushId().getAsLong());
assertEquals(rowMap, tm.getKeyValues());
assertEquals(Map.of(new StoredTabletFile(bf1), 56L, new StoredTabletFile(bf2), 59L), tm.getLoaded());
assertEquals(HostAndPort.fromParts("server1", 8555), tm.getLocation().getHostAndPort());
assertEquals("s001", tm.getLocation().getSession());
assertEquals(LocationType.CURRENT, tm.getLocation().getType());
assertTrue(tm.hasCurrent());
assertEquals(HostAndPort.fromParts("server2", 8555), tm.getLast().getHostAndPort());
assertEquals("s000", tm.getLast().getSession());
assertEquals(LocationType.LAST, tm.getLast().getType());
assertEquals(Set.of(le1.getValue() + " " + le1.timestamp, le2.getValue() + " " + le2.timestamp), tm.getLogs().stream().map(le -> le.getValue() + " " + le.timestamp).collect(toSet()));
assertEquals(extent.prevEndRow(), tm.getPrevEndRow());
assertEquals(extent.tableId(), tm.getTableId());
assertTrue(tm.sawPrevEndRow());
assertEquals("M123456789", tm.getTime().encode());
assertEquals(Set.of(sf1, sf2), Set.copyOf(tm.getScans()));
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class MetadataTableUtil method updateTabletDataFile.
public static Map<StoredTabletFile, DataFileValue> updateTabletDataFile(long tid, KeyExtent extent, Map<TabletFile, DataFileValue> estSizes, MetadataTime time, ServerContext context, ServiceLock zooLock) {
TabletMutator tablet = context.getAmple().mutateTablet(extent);
tablet.putTime(time);
Map<StoredTabletFile, DataFileValue> newFiles = new HashMap<>(estSizes.size());
estSizes.forEach((tf, dfv) -> {
tablet.putFile(tf, dfv);
tablet.putBulkFile(tf, tid);
newFiles.put(tf.insert(), dfv);
});
tablet.putZooLock(zooLock);
tablet.mutate();
return newFiles;
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class MetadataTableUtil method splitDatafiles.
public static void splitDatafiles(Text midRow, double splitRatio, Map<TabletFile, FileUtil.FileInfo> firstAndLastRows, SortedMap<StoredTabletFile, DataFileValue> datafiles, SortedMap<StoredTabletFile, DataFileValue> lowDatafileSizes, SortedMap<StoredTabletFile, DataFileValue> highDatafileSizes, List<StoredTabletFile> highDatafilesToRemove) {
for (Entry<StoredTabletFile, DataFileValue> entry : datafiles.entrySet()) {
Text firstRow = null;
Text lastRow = null;
boolean rowsKnown = false;
FileUtil.FileInfo mfi = firstAndLastRows.get(entry.getKey());
if (mfi != null) {
firstRow = mfi.getFirstRow();
lastRow = mfi.getLastRow();
rowsKnown = true;
}
if (rowsKnown && firstRow.compareTo(midRow) > 0) {
// only in high
long highSize = entry.getValue().getSize();
long highEntries = entry.getValue().getNumEntries();
highDatafileSizes.put(entry.getKey(), new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
} else if (rowsKnown && lastRow.compareTo(midRow) <= 0) {
// only in low
long lowSize = entry.getValue().getSize();
long lowEntries = entry.getValue().getNumEntries();
lowDatafileSizes.put(entry.getKey(), new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));
highDatafilesToRemove.add(entry.getKey());
} else {
long lowSize = (long) Math.floor((entry.getValue().getSize() * splitRatio));
long lowEntries = (long) Math.floor((entry.getValue().getNumEntries() * splitRatio));
lowDatafileSizes.put(entry.getKey(), new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));
long highSize = (long) Math.ceil((entry.getValue().getSize() * (1.0 - splitRatio)));
long highEntries = (long) Math.ceil((entry.getValue().getNumEntries() * (1.0 - splitRatio)));
highDatafileSizes.put(entry.getKey(), new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
}
}
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class ManagerMetadataUtil method updateTabletDataFile.
/**
* Update tablet file data from flush. Returns a StoredTabletFile if there are data entries.
*/
public static Optional<StoredTabletFile> updateTabletDataFile(ServerContext context, KeyExtent extent, TabletFile newDatafile, DataFileValue dfv, MetadataTime time, String address, ServiceLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
TabletMutator tablet = context.getAmple().mutateTablet(extent);
// if there are no entries, the path doesn't get stored in metadata table, only the flush ID
Optional<StoredTabletFile> newFile = Optional.empty();
// if entries are present, write to path to metadata table
if (dfv.getNumEntries() > 0) {
tablet.putFile(newDatafile, dfv);
tablet.putTime(time);
newFile = Optional.of(newDatafile.insert());
TServerInstance self = getTServerInstance(address, zooLock);
tablet.putLocation(self, LocationType.LAST);
// remove the old location
if (lastLocation != null && !lastLocation.equals(self)) {
tablet.deleteLocation(lastLocation, LocationType.LAST);
}
}
tablet.putFlushId(flushId);
unusedWalLogs.forEach(tablet::deleteWal);
tablet.putZooLock(zooLock);
tablet.mutate();
return newFile;
}
Aggregations