use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class Tablet method prepareForMinC.
private synchronized MinorCompactionTask prepareForMinC(long flushId, MinorCompactionReason mincReason) {
CommitSession oldCommitSession = getTabletMemory().prepareForMinC();
otherLogs = currentLogs;
currentLogs = new ConcurrentSkipListSet<>();
FileRef mergeFile = null;
if (mincReason != MinorCompactionReason.RECOVERY) {
mergeFile = getDatafileManager().reserveMergingMinorCompactionFile();
}
double tracePercent = tabletServer.getConfiguration().getFraction(Property.TSERV_MINC_TRACE_PERCENT);
return new MinorCompactionTask(this, mergeFile, oldCommitSession, flushId, mincReason, tracePercent);
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class Tablet method getFirstAndLastKeys.
private Map<FileRef, Pair<Key, Key>> getFirstAndLastKeys(SortedMap<FileRef, DataFileValue> allFiles) throws IOException {
Map<FileRef, Pair<Key, Key>> result = new HashMap<>();
FileOperations fileFactory = FileOperations.getInstance();
VolumeManager fs = getTabletServer().getFileSystem();
for (Entry<FileRef, DataFileValue> entry : allFiles.entrySet()) {
FileRef file = entry.getKey();
FileSystem ns = fs.getVolumeByPath(file.path()).getFileSystem();
try (FileSKVIterator openReader = fileFactory.newReaderBuilder().forFile(file.path().toString(), ns, ns.getConf()).withTableConfiguration(this.getTableConfiguration()).seekToBeginning().build()) {
Key first = openReader.getFirstKey();
Key last = openReader.getLastKey();
result.put(file, new Pair<>(first, last));
}
}
return result;
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class DatafileManagerTest method testReserveMergingMinorCompactionFileDisabled.
/*
* Test disabled max file size for merging minor compaction
*/
@Test
public void testReserveMergingMinorCompactionFileDisabled() throws IOException {
String maxMergeFileSize = "0";
EasyMock.expect(tablet.getTableConfiguration()).andReturn(tableConf);
EasyMock.expect(tableConf.get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE)).andReturn(maxMergeFileSize);
EasyMock.replay(tablet, tableConf);
SortedMap<FileRef, DataFileValue> testFiles = createFileMap("smallishfile", "10M", "file2", "100M", "file3", "100M", "file4", "100M", "file5", "100M");
DatafileManager dfm = new DatafileManager(tablet, testFiles);
FileRef mergeFile = dfm.reserveMergingMinorCompactionFile();
EasyMock.verify(tablet, tableConf);
assertEquals("smallishfile", mergeFile.path().getName());
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class CompactionPlanTest method testOverlappingInputAndDelete.
@Test
public void testOverlappingInputAndDelete() {
CompactionPlan cp1 = new CompactionPlan();
FileRef fr1 = new FileRef("hdfs://nn1/accumulo/tables/1/t-1/1.rf");
FileRef fr2 = new FileRef("hdfs://nn1/accumulo/tables/1/t-1/2.rf");
cp1.inputFiles.add(fr1);
cp1.deleteFiles.add(fr1);
cp1.deleteFiles.add(fr2);
Set<FileRef> allFiles = ImmutableSet.of(fr1, fr2);
exception.expect(IllegalStateException.class);
cp1.validate(allFiles);
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class TwoTierCompactionStrategyTest method testLargeCompaction.
@Test
public void testLargeCompaction() throws IOException {
ttcs.init(opts);
conf = DefaultConfiguration.getInstance();
KeyExtent ke = new KeyExtent(Table.ID.of("0"), null, null);
mcr = new MajorCompactionRequest(ke, MajorCompactionReason.NORMAL, conf);
Map<FileRef, DataFileValue> fileMap = createFileMap("f1", "2G", "f2", "2G", "f3", "2G", "f4", "2G");
mcr.setFiles(fileMap);
Assert.assertTrue(ttcs.shouldCompact(mcr));
Assert.assertEquals(4, mcr.getFiles().size());
List<FileRef> filesToCompact = ttcs.getCompactionPlan(mcr).inputFiles;
Assert.assertEquals(fileMap.keySet(), new HashSet<>(filesToCompact));
Assert.assertEquals(4, filesToCompact.size());
Assert.assertEquals(largeCompressionType, ttcs.getCompactionPlan(mcr).writeParameters.getCompressType());
}
Aggregations