use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class MasterMetadataUtil method addNewTablet.
public static void addNewTablet(ClientContext context, KeyExtent extent, String path, TServerInstance location, Map<FileRef, DataFileValue> datafileSizes, Map<Long, ? extends Collection<FileRef>> bulkLoadedFiles, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
Mutation m = extent.getPrevRowUpdateMutation();
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes(UTF_8)));
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
if (lastFlushID > 0)
TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
if (lastCompactID > 0)
TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + lastCompactID).getBytes()));
if (location != null) {
location.putLocation(m);
location.clearFutureLocation(m);
}
for (Entry<FileRef, DataFileValue> entry : datafileSizes.entrySet()) {
m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
}
for (Entry<Long, ? extends Collection<FileRef>> entry : bulkLoadedFiles.entrySet()) {
Value tidBytes = new Value(Long.toString(entry.getKey()).getBytes());
for (FileRef ref : entry.getValue()) {
m.put(TabletsSection.BulkFileColumnFamily.NAME, ref.meta(), new Value(tidBytes));
}
}
MetadataTableUtil.update(context, zooLock, m, extent);
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class MasterMetadataUtil method getUpdateForTabletDataFile.
/**
* Create an update that updates a tablet
*
* @return A Mutation to update a tablet from the given information
*/
private static Mutation getUpdateForTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time, Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
Mutation m = new Mutation(extent.getMetadataEntry());
if (dfv.getNumEntries() > 0) {
m.put(DataFileColumnFamily.NAME, path.meta(), new Value(dfv.encode()));
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
// stuff in this location
TServerInstance self = getTServerInstance(address, zooLock);
self.putLastLocation(m);
// erase the old location
if (lastLocation != null && !lastLocation.equals(self))
lastLocation.clearLastLocation(m);
}
if (unusedWalLogs != null) {
for (String entry : unusedWalLogs) {
m.putDelete(LogColumnFamily.NAME, new Text(entry));
}
}
for (FileRef scanFile : filesInUseByScans) m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0]));
if (mergeFile != null)
m.putDelete(DataFileColumnFamily.NAME, mergeFile.meta());
TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(Long.toString(flushId).getBytes(UTF_8)));
return m;
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class MetadataTableUtil method getBulkFilesLoaded.
public static Map<Long, ? extends Collection<FileRef>> getBulkFilesLoaded(ClientContext context, KeyExtent extent) throws IOException {
Text metadataRow = extent.getMetadataEntry();
Map<Long, List<FileRef>> result = new HashMap<>();
VolumeManager fs = VolumeManagerImpl.get();
try (Scanner scanner = new ScannerImpl(context, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY)) {
scanner.setRange(new Range(metadataRow));
scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
for (Entry<Key, Value> entry : scanner) {
Long tid = Long.parseLong(entry.getValue().toString());
List<FileRef> lst = result.get(tid);
if (lst == null) {
result.put(tid, lst = new ArrayList<>());
}
lst.add(new FileRef(fs, entry.getKey()));
}
}
return result;
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class MetadataTableUtil method removeScanFiles.
public static void removeScanFiles(KeyExtent extent, Set<FileRef> scanFiles, ClientContext context, ZooLock zooLock) {
Mutation m = new Mutation(extent.getMetadataEntry());
for (FileRef pathToRemove : scanFiles) m.putDelete(ScanFileColumnFamily.NAME, pathToRemove.meta());
update(context, zooLock, m, extent);
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class MetadataTableUtil method finishSplit.
public static void finishSplit(Text metadataEntry, Map<FileRef, DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, final ClientContext context, ZooLock zooLock) {
Mutation m = new Mutation(metadataEntry);
TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
for (Entry<FileRef, DataFileValue> entry : datafileSizes.entrySet()) {
m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
}
for (FileRef pathToRemove : highDatafilesToRemove) {
m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
}
update(context, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
}
Aggregations