use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class DatafileManagerTest method testReserveMergingMinorCompactionFile_MaxExceeded.
/*
* Test max file size (table.compaction.minor.merge.file.size.max) exceeded when calling reserveMergingMinorCompactionFile
*/
@Test
public void testReserveMergingMinorCompactionFile_MaxExceeded() throws IOException {
String maxMergeFileSize = "1000B";
EasyMock.expect(tablet.getTableConfiguration()).andReturn(tableConf);
EasyMock.expect(tableConf.get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE)).andReturn(maxMergeFileSize);
EasyMock.replay(tablet, tableConf);
SortedMap<FileRef, DataFileValue> testFiles = createFileMap("largefile", "10M", "file2", "100M", "file3", "100M", "file4", "100M", "file5", "100M");
DatafileManager dfm = new DatafileManager(tablet, testFiles);
FileRef mergeFile = dfm.reserveMergingMinorCompactionFile();
EasyMock.verify(tablet, tableConf);
assertEquals(null, mergeFile);
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class DatafileManagerTest method testReserveMergingMinorCompactionFile.
/*
* Test the smallest file is chosen for merging minor compaction
*/
@Test
public void testReserveMergingMinorCompactionFile() throws IOException {
String maxMergeFileSize = "1000B";
EasyMock.expect(tablet.getTableConfiguration()).andReturn(tableConf);
EasyMock.expect(tableConf.get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE)).andReturn(maxMergeFileSize);
EasyMock.replay(tablet, tableConf);
SortedMap<FileRef, DataFileValue> testFiles = createFileMap("smallfile", "100B", "file2", "100M", "file3", "100M", "file4", "100M", "file5", "100M");
DatafileManager dfm = new DatafileManager(tablet, testFiles);
FileRef mergeFile = dfm.reserveMergingMinorCompactionFile();
EasyMock.verify(tablet, tableConf);
assertEquals("smallfile", mergeFile.path().getName());
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class TwoTierCompactionStrategyTest method testDefaultCompaction.
@Test
public void testDefaultCompaction() throws IOException {
ttcs.init(opts);
conf = DefaultConfiguration.getInstance();
KeyExtent ke = new KeyExtent(Table.ID.of("0"), null, null);
mcr = new MajorCompactionRequest(ke, MajorCompactionReason.NORMAL, conf);
Map<FileRef, DataFileValue> fileMap = createFileMap("f1", "10M", "f2", "10M", "f3", "10M", "f4", "10M", "f5", "100M", "f6", "100M", "f7", "100M", "f8", "100M");
mcr.setFiles(fileMap);
Assert.assertTrue(ttcs.shouldCompact(mcr));
Assert.assertEquals(8, mcr.getFiles().size());
List<FileRef> filesToCompact = ttcs.getCompactionPlan(mcr).inputFiles;
Assert.assertEquals(fileMap.keySet(), new HashSet<>(filesToCompact));
Assert.assertEquals(8, filesToCompact.size());
Assert.assertEquals(null, ttcs.getCompactionPlan(mcr).writeParameters.getCompressType());
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MasterMetadataUtil method replaceDatafiles.
public static void replaceDatafiles(ClientContext context, KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId, DataFileValue size, String address, TServerInstance lastLocation, ZooLock zooLock, boolean insertDeleteFlags) throws IOException {
if (insertDeleteFlags) {
// add delete flags for those paths before the data file reference is removed
MetadataTableUtil.addDeleteEntries(extent, datafilesToDelete, context);
}
// replace data file references to old mapfiles with the new mapfiles
Mutation m = new Mutation(extent.getMetadataEntry());
for (FileRef pathToRemove : datafilesToDelete) m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
for (FileRef scanFile : scanFiles) m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0]));
if (size.getNumEntries() > 0)
m.put(DataFileColumnFamily.NAME, path.meta(), new Value(size.encode()));
if (compactionId != null)
TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes()));
TServerInstance self = getTServerInstance(address, zooLock);
self.putLastLocation(m);
// remove the old location
if (lastLocation != null && !lastLocation.equals(self))
lastLocation.clearLastLocation(m);
MetadataTableUtil.update(context, zooLock, m, extent);
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MasterMetadataUtil method fixSplit.
private static KeyExtent fixSplit(ClientContext context, Table.ID tableId, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
if (metadataPrevEndRow == null)
// prev end row....
throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
// check to see if prev tablet exist in metadata tablet
Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(tableId, metadataPrevEndRow)));
try (ScannerImpl scanner2 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
VolumeManager fs = VolumeManagerImpl.get();
if (!scanner2.iterator().hasNext()) {
log.info("Rolling back incomplete split {} {}", metadataEntry, metadataPrevEndRow);
MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, lock);
return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
} else {
log.info("Finishing incomplete split {} {}", metadataEntry, metadataPrevEndRow);
List<FileRef> highDatafilesToRemove = new ArrayList<>();
SortedMap<FileRef, DataFileValue> origDatafileSizes = new TreeMap<>();
SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
try (Scanner scanner3 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
Key rowKey = new Key(metadataEntry);
scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
for (Entry<Key, Value> entry : scanner3) {
if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
}
}
}
MetadataTableUtil.splitDatafiles(metadataPrevEndRow, splitRatio, new HashMap<>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, context, lock);
return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
}
}
}
Aggregations