use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class CompactableImplTest method newECM.
private static ExternalCompactionMetadata newECM(Set<StoredTabletFile> jobFiles, Set<StoredTabletFile> nextFiles, CompactionKind kind, boolean propagateDeletes, boolean initiallySelectedAll, Long compactionId) {
TabletFile compactTmpName = newFile("C00000A.rf_tmp");
String compactorId = "cid";
short priority = 9;
CompactionExecutorId ceid = CompactionExecutorIdImpl.externalId("ecs1");
return new ExternalCompactionMetadata(jobFiles, nextFiles, compactTmpName, compactorId, kind, priority, ceid, propagateDeletes, initiallySelectedAll, compactionId);
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class CompactionPlanTest method testInputNotInAllFiles.
@Test
public void testInputNotInAllFiles() {
CompactionPlan cp1 = new CompactionPlan();
StoredTabletFile fr1 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-1/1.rf");
StoredTabletFile fr2 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-1/2.rf");
StoredTabletFile fr3 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-2/3.rf");
cp1.inputFiles.add(fr1);
cp1.inputFiles.add(fr2);
cp1.inputFiles.add(fr3);
Set<StoredTabletFile> allFiles = Set.of(fr1, fr2);
assertThrows(IllegalStateException.class, () -> cp1.validate(allFiles));
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class CompactionPlanTest method testDeleteNotInAllFiles.
@Test
public void testDeleteNotInAllFiles() {
CompactionPlan cp1 = new CompactionPlan();
StoredTabletFile fr1 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-1/1.rf");
StoredTabletFile fr2 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-1/2.rf");
StoredTabletFile fr3 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-2/3.rf");
cp1.deleteFiles.add(fr1);
cp1.deleteFiles.add(fr2);
cp1.deleteFiles.add(fr3);
Set<StoredTabletFile> allFiles = Set.of(fr1, fr2);
assertThrows(IllegalStateException.class, () -> cp1.validate(allFiles));
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class SplitRecoveryIT method runSplitRecoveryTest.
private void runSplitRecoveryTest(ServerContext context, int failPoint, String mr, int extentToSplit, ServiceLock zl, KeyExtent... extents) throws Exception {
Text midRow = new Text(mr);
SortedMap<StoredTabletFile, DataFileValue> splitMapFiles = null;
for (int i = 0; i < extents.length; i++) {
KeyExtent extent = extents[i];
String dirName = "dir_" + i;
String tdir = context.getTablesDirs().iterator().next() + "/" + extent.tableId() + "/" + dirName;
MetadataTableUtil.addTablet(extent, dirName, context, TimeType.LOGICAL, zl);
SortedMap<TabletFile, DataFileValue> mapFiles = new TreeMap<>();
mapFiles.put(new TabletFile(new Path(tdir + "/" + RFile.EXTENSION + "_000_000")), new DataFileValue(1000017 + i, 10000 + i));
int tid = 0;
TransactionWatcher.ZooArbitrator.start(context, Constants.BULK_ARBITRATOR_TYPE, tid);
SortedMap<StoredTabletFile, DataFileValue> storedFiles = new TreeMap<>(MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, new MetadataTime(0, TimeType.LOGICAL), context, zl));
if (i == extentToSplit) {
splitMapFiles = storedFiles;
}
}
KeyExtent extent = extents[extentToSplit];
KeyExtent high = new KeyExtent(extent.tableId(), extent.endRow(), midRow);
KeyExtent low = new KeyExtent(extent.tableId(), midRow, extent.prevEndRow());
splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl);
}
use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.
the class Compactor method createCompactionJob.
/**
* Create compaction runnable
*
* @param job
* compaction job
* @param totalInputEntries
* object to capture total entries
* @param totalInputBytes
* object to capture input file size
* @param started
* started latch
* @param stopped
* stopped latch
* @param err
* reference to error
* @return Runnable compaction job
*/
protected Runnable createCompactionJob(final TExternalCompactionJob job, final LongAdder totalInputEntries, final LongAdder totalInputBytes, final CountDownLatch started, final CountDownLatch stopped, final AtomicReference<Throwable> err) {
return new Runnable() {
@Override
public void run() {
// Its only expected that a single compaction runs at a time. Multiple compactions running
// at a time could cause odd behavior like out of order and unexpected thrift calls to the
// coordinator. This is a sanity check to ensure the expectation is met. Should this check
// ever fail, it means there is a bug elsewhere.
Preconditions.checkState(compactionRunning.compareAndSet(false, true));
try {
LOG.info("Starting up compaction runnable for job: {}", job);
TCompactionStatusUpdate update = new TCompactionStatusUpdate(TCompactionState.STARTED, "Compaction started", -1, -1, -1);
updateCompactionState(job, update);
final AccumuloConfiguration tConfig;
var extent = KeyExtent.fromThrift(job.getExtent());
if (!job.getOverrides().isEmpty()) {
tConfig = new ConfigurationCopy(getContext().getTableConfiguration(extent.tableId()));
job.getOverrides().forEach((k, v) -> ((ConfigurationCopy) tConfig).set(k, v));
LOG.debug("Overriding table properties with {}", job.getOverrides());
} else {
tConfig = getContext().getTableConfiguration(extent.tableId());
}
final TabletFile outputFile = new TabletFile(new Path(job.getOutputFile()));
final Map<StoredTabletFile, DataFileValue> files = new TreeMap<>();
job.getFiles().forEach(f -> {
files.put(new StoredTabletFile(f.getMetadataFileEntry()), new DataFileValue(f.getSize(), f.getEntries(), f.getTimestamp()));
totalInputEntries.add(f.getEntries());
totalInputBytes.add(f.getSize());
});
final List<IteratorSetting> iters = new ArrayList<>();
job.getIteratorSettings().getIterators().forEach(tis -> iters.add(SystemIteratorUtil.toIteratorSetting(tis)));
ExtCEnv cenv = new ExtCEnv(JOB_HOLDER, queueName);
FileCompactor compactor = new FileCompactor(getContext(), extent, files, outputFile, job.isPropagateDeletes(), cenv, iters, tConfig);
LOG.trace("Starting compactor");
started.countDown();
org.apache.accumulo.server.compaction.CompactionStats stat = compactor.call();
TCompactionStats cs = new TCompactionStats();
cs.setEntriesRead(stat.getEntriesRead());
cs.setEntriesWritten(stat.getEntriesWritten());
cs.setFileSize(stat.getFileSize());
JOB_HOLDER.setStats(cs);
LOG.info("Compaction completed successfully {} ", job.getExternalCompactionId());
// Update state when completed
TCompactionStatusUpdate update2 = new TCompactionStatusUpdate(TCompactionState.SUCCEEDED, "Compaction completed successfully", -1, -1, -1);
updateCompactionState(job, update2);
} catch (Exception e) {
LOG.error("Compaction failed", e);
err.set(e);
} finally {
stopped.countDown();
Preconditions.checkState(compactionRunning.compareAndSet(true, false));
}
}
};
}
Aggregations