use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class ManagerMetadataUtil method addNewTablet.
public static void addNewTablet(ServerContext context, KeyExtent extent, String dirName, TServerInstance location, Map<StoredTabletFile, DataFileValue> datafileSizes, Map<Long, ? extends Collection<TabletFile>> bulkLoadedFiles, MetadataTime time, long lastFlushID, long lastCompactID, ServiceLock zooLock) {
TabletMutator tablet = context.getAmple().mutateTablet(extent);
tablet.putPrevEndRow(extent.prevEndRow());
tablet.putZooLock(zooLock);
tablet.putDirName(dirName);
tablet.putTime(time);
if (lastFlushID > 0)
tablet.putFlushId(lastFlushID);
if (lastCompactID > 0)
tablet.putCompactionId(lastCompactID);
if (location != null) {
tablet.putLocation(location, LocationType.CURRENT);
tablet.deleteLocation(location, LocationType.FUTURE);
}
datafileSizes.forEach(tablet::putFile);
for (Entry<Long, ? extends Collection<TabletFile>> entry : bulkLoadedFiles.entrySet()) {
for (TabletFile ref : entry.getValue()) {
tablet.putBulkFile(ref, entry.getKey());
}
}
tablet.mutate();
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class CompactableImplTest method newECM.
private static ExternalCompactionMetadata newECM(Set<StoredTabletFile> jobFiles, Set<StoredTabletFile> nextFiles, CompactionKind kind, boolean propagateDeletes, boolean initiallySelectedAll, Long compactionId) {
TabletFile compactTmpName = newFile("C00000A.rf_tmp");
String compactorId = "cid";
short priority = 9;
CompactionExecutorId ceid = CompactionExecutorIdImpl.externalId("ecs1");
return new ExternalCompactionMetadata(jobFiles, nextFiles, compactTmpName, compactorId, kind, priority, ceid, propagateDeletes, initiallySelectedAll, compactionId);
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class SplitRecoveryIT method runSplitRecoveryTest.
private void runSplitRecoveryTest(ServerContext context, int failPoint, String mr, int extentToSplit, ServiceLock zl, KeyExtent... extents) throws Exception {
Text midRow = new Text(mr);
SortedMap<StoredTabletFile, DataFileValue> splitMapFiles = null;
for (int i = 0; i < extents.length; i++) {
KeyExtent extent = extents[i];
String dirName = "dir_" + i;
String tdir = context.getTablesDirs().iterator().next() + "/" + extent.tableId() + "/" + dirName;
MetadataTableUtil.addTablet(extent, dirName, context, TimeType.LOGICAL, zl);
SortedMap<TabletFile, DataFileValue> mapFiles = new TreeMap<>();
mapFiles.put(new TabletFile(new Path(tdir + "/" + RFile.EXTENSION + "_000_000")), new DataFileValue(1000017 + i, 10000 + i));
int tid = 0;
TransactionWatcher.ZooArbitrator.start(context, Constants.BULK_ARBITRATOR_TYPE, tid);
SortedMap<StoredTabletFile, DataFileValue> storedFiles = new TreeMap<>(MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, new MetadataTime(0, TimeType.LOGICAL), context, zl));
if (i == extentToSplit) {
splitMapFiles = storedFiles;
}
}
KeyExtent extent = extents[extentToSplit];
KeyExtent high = new KeyExtent(extent.tableId(), extent.endRow(), midRow);
KeyExtent low = new KeyExtent(extent.tableId(), midRow, extent.prevEndRow());
splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl);
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class Compactor method createCompactionJob.
/**
* Create compaction runnable
*
* @param job
* compaction job
* @param totalInputEntries
* object to capture total entries
* @param totalInputBytes
* object to capture input file size
* @param started
* started latch
* @param stopped
* stopped latch
* @param err
* reference to error
* @return Runnable compaction job
*/
protected Runnable createCompactionJob(final TExternalCompactionJob job, final LongAdder totalInputEntries, final LongAdder totalInputBytes, final CountDownLatch started, final CountDownLatch stopped, final AtomicReference<Throwable> err) {
return new Runnable() {
@Override
public void run() {
// Its only expected that a single compaction runs at a time. Multiple compactions running
// at a time could cause odd behavior like out of order and unexpected thrift calls to the
// coordinator. This is a sanity check to ensure the expectation is met. Should this check
// ever fail, it means there is a bug elsewhere.
Preconditions.checkState(compactionRunning.compareAndSet(false, true));
try {
LOG.info("Starting up compaction runnable for job: {}", job);
TCompactionStatusUpdate update = new TCompactionStatusUpdate(TCompactionState.STARTED, "Compaction started", -1, -1, -1);
updateCompactionState(job, update);
final AccumuloConfiguration tConfig;
var extent = KeyExtent.fromThrift(job.getExtent());
if (!job.getOverrides().isEmpty()) {
tConfig = new ConfigurationCopy(getContext().getTableConfiguration(extent.tableId()));
job.getOverrides().forEach((k, v) -> ((ConfigurationCopy) tConfig).set(k, v));
LOG.debug("Overriding table properties with {}", job.getOverrides());
} else {
tConfig = getContext().getTableConfiguration(extent.tableId());
}
final TabletFile outputFile = new TabletFile(new Path(job.getOutputFile()));
final Map<StoredTabletFile, DataFileValue> files = new TreeMap<>();
job.getFiles().forEach(f -> {
files.put(new StoredTabletFile(f.getMetadataFileEntry()), new DataFileValue(f.getSize(), f.getEntries(), f.getTimestamp()));
totalInputEntries.add(f.getEntries());
totalInputBytes.add(f.getSize());
});
final List<IteratorSetting> iters = new ArrayList<>();
job.getIteratorSettings().getIterators().forEach(tis -> iters.add(SystemIteratorUtil.toIteratorSetting(tis)));
ExtCEnv cenv = new ExtCEnv(JOB_HOLDER, queueName);
FileCompactor compactor = new FileCompactor(getContext(), extent, files, outputFile, job.isPropagateDeletes(), cenv, iters, tConfig);
LOG.trace("Starting compactor");
started.countDown();
org.apache.accumulo.server.compaction.CompactionStats stat = compactor.call();
TCompactionStats cs = new TCompactionStats();
cs.setEntriesRead(stat.getEntriesRead());
cs.setEntriesWritten(stat.getEntriesWritten());
cs.setFileSize(stat.getFileSize());
JOB_HOLDER.setStats(cs);
LOG.info("Compaction completed successfully {} ", job.getExternalCompactionId());
// Update state when completed
TCompactionStatusUpdate update2 = new TCompactionStatusUpdate(TCompactionState.SUCCEEDED, "Compaction completed successfully", -1, -1, -1);
updateCompactionState(job, update2);
} catch (Exception e) {
LOG.error("Compaction failed", e);
err.set(e);
} finally {
stopped.countDown();
Preconditions.checkState(compactionRunning.compareAndSet(true, false));
}
}
};
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class ThriftClientHandler method bulkImport.
@Override
public List<TKeyExtent> bulkImport(TInfo tinfo, TCredentials credentials, final long tid, final Map<TKeyExtent, Map<String, MapFileInfo>> files, final boolean setTime) throws ThriftSecurityException {
if (!security.canPerformSystemActions(credentials)) {
throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
}
try {
return transactionWatcher.run(Constants.BULK_ARBITRATOR_TYPE, tid, () -> {
List<TKeyExtent> failures = new ArrayList<>();
for (Entry<TKeyExtent, Map<String, MapFileInfo>> entry : files.entrySet()) {
TKeyExtent tke = entry.getKey();
Map<String, MapFileInfo> fileMap = entry.getValue();
Map<TabletFile, MapFileInfo> fileRefMap = new HashMap<>();
for (Entry<String, MapFileInfo> mapping : fileMap.entrySet()) {
Path path = new Path(mapping.getKey());
FileSystem ns = context.getVolumeManager().getFileSystemByPath(path);
path = ns.makeQualified(path);
fileRefMap.put(new TabletFile(path), mapping.getValue());
}
Tablet importTablet = server.getOnlineTablet(KeyExtent.fromThrift(tke));
if (importTablet == null) {
failures.add(tke);
} else {
try {
importTablet.importMapFiles(tid, fileRefMap, setTime);
} catch (IOException ioe) {
log.info("files {} not imported to {}: {}", fileMap.keySet(), KeyExtent.fromThrift(tke), ioe.getMessage());
failures.add(tke);
}
}
}
return failures;
});
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
Aggregations