use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class ConfigurableCompactionStrategyTest method testOutputOptions.
// file selection options are adequately tested by ShellServerIT
@Test
public void testOutputOptions() throws Exception {
MajorCompactionRequest mcr = new MajorCompactionRequest(new KeyExtent(Table.ID.of("1"), null, null), MajorCompactionReason.USER, null);
Map<FileRef, DataFileValue> files = new HashMap<>();
files.put(new FileRef("hdfs://nn1/accumulo/tables/1/t-009/F00001.rf"), new DataFileValue(50000, 400));
mcr.setFiles(files);
// test setting no output options
ConfigurableCompactionStrategy ccs = new ConfigurableCompactionStrategy();
Map<String, String> opts = new HashMap<>();
ccs.init(opts);
CompactionPlan plan = ccs.getCompactionPlan(mcr);
Assert.assertEquals(0, plan.writeParameters.getBlockSize());
Assert.assertEquals(0, plan.writeParameters.getHdfsBlockSize());
Assert.assertEquals(0, plan.writeParameters.getIndexBlockSize());
Assert.assertEquals(0, plan.writeParameters.getReplication());
Assert.assertEquals(null, plan.writeParameters.getCompressType());
// test setting all output options
ccs = new ConfigurableCompactionStrategy();
CompactionSettings.OUTPUT_BLOCK_SIZE_OPT.put(opts, "64K");
CompactionSettings.OUTPUT_COMPRESSION_OPT.put(opts, "snappy");
CompactionSettings.OUTPUT_HDFS_BLOCK_SIZE_OPT.put(opts, "256M");
CompactionSettings.OUTPUT_INDEX_BLOCK_SIZE_OPT.put(opts, "32K");
CompactionSettings.OUTPUT_REPLICATION_OPT.put(opts, "5");
ccs.init(opts);
plan = ccs.getCompactionPlan(mcr);
Assert.assertEquals(ConfigurationTypeHelper.getFixedMemoryAsBytes("64K"), plan.writeParameters.getBlockSize());
Assert.assertEquals(ConfigurationTypeHelper.getFixedMemoryAsBytes("256M"), plan.writeParameters.getHdfsBlockSize());
Assert.assertEquals(ConfigurationTypeHelper.getFixedMemoryAsBytes("32K"), plan.writeParameters.getIndexBlockSize());
Assert.assertEquals(5, plan.writeParameters.getReplication());
Assert.assertEquals("snappy", plan.writeParameters.getCompressType());
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class Initialize method createEntriesForTablet.
private static void createEntriesForTablet(TreeMap<Key, Value> map, Tablet tablet) {
Value EMPTY_SIZE = new DataFileValue(0, 0).encodeAsValue();
Text extent = new Text(KeyExtent.getMetadataEntry(tablet.tableId, tablet.endRow));
addEntry(map, extent, DIRECTORY_COLUMN, new Value(tablet.dir.getBytes(UTF_8)));
addEntry(map, extent, TIME_COLUMN, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(UTF_8)));
addEntry(map, extent, PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(tablet.prevEndRow));
for (String file : tablet.files) {
addEntry(map, extent, new ColumnFQ(DataFileColumnFamily.NAME, new Text(file)), EMPTY_SIZE);
}
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MasterMetadataUtil method addNewTablet.
public static void addNewTablet(ClientContext context, KeyExtent extent, String path, TServerInstance location, Map<FileRef, DataFileValue> datafileSizes, Map<Long, ? extends Collection<FileRef>> bulkLoadedFiles, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
Mutation m = extent.getPrevRowUpdateMutation();
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes(UTF_8)));
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
if (lastFlushID > 0)
TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
if (lastCompactID > 0)
TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + lastCompactID).getBytes()));
if (location != null) {
location.putLocation(m);
location.clearFutureLocation(m);
}
for (Entry<FileRef, DataFileValue> entry : datafileSizes.entrySet()) {
m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
}
for (Entry<Long, ? extends Collection<FileRef>> entry : bulkLoadedFiles.entrySet()) {
Value tidBytes = new Value(Long.toString(entry.getKey()).getBytes());
for (FileRef ref : entry.getValue()) {
m.put(TabletsSection.BulkFileColumnFamily.NAME, ref.meta(), new Value(tidBytes));
}
}
MetadataTableUtil.update(context, zooLock, m, extent);
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MasterMetadataUtil method getUpdateForTabletDataFile.
/**
* Create an update that updates a tablet
*
* @return A Mutation to update a tablet from the given information
*/
private static Mutation getUpdateForTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time, Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
Mutation m = new Mutation(extent.getMetadataEntry());
if (dfv.getNumEntries() > 0) {
m.put(DataFileColumnFamily.NAME, path.meta(), new Value(dfv.encode()));
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
// stuff in this location
TServerInstance self = getTServerInstance(address, zooLock);
self.putLastLocation(m);
// erase the old location
if (lastLocation != null && !lastLocation.equals(self))
lastLocation.clearLastLocation(m);
}
if (unusedWalLogs != null) {
for (String entry : unusedWalLogs) {
m.putDelete(LogColumnFamily.NAME, new Text(entry));
}
}
for (FileRef scanFile : filesInUseByScans) m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0]));
if (mergeFile != null)
m.putDelete(DataFileColumnFamily.NAME, mergeFile.meta());
TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(Long.toString(flushId).getBytes(UTF_8)));
return m;
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MetadataTableUtil method finishSplit.
public static void finishSplit(Text metadataEntry, Map<FileRef, DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, final ClientContext context, ZooLock zooLock) {
Mutation m = new Mutation(metadataEntry);
TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
for (Entry<FileRef, DataFileValue> entry : datafileSizes.entrySet()) {
m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
}
for (FileRef pathToRemove : highDatafilesToRemove) {
m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
}
update(context, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
}
Aggregations