use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class MetadataTableUtil method checkClone.
@VisibleForTesting
public static int checkClone(String testTableName, TableId srcTableId, TableId tableId, AccumuloClient client, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException {
Iterator<TabletMetadata> srcIter = createCloneScanner(testTableName, srcTableId, client).iterator();
Iterator<TabletMetadata> cloneIter = createCloneScanner(testTableName, tableId, client).iterator();
if (!cloneIter.hasNext() || !srcIter.hasNext())
throw new RuntimeException(" table deleted during clone? srcTableId = " + srcTableId + " tableId=" + tableId);
int rewrites = 0;
while (cloneIter.hasNext()) {
TabletMetadata cloneTablet = cloneIter.next();
Text cloneEndRow = cloneTablet.getEndRow();
HashSet<TabletFile> cloneFiles = new HashSet<>();
boolean cloneSuccessful = cloneTablet.getCloned() != null;
if (!cloneSuccessful)
cloneFiles.addAll(cloneTablet.getFiles());
List<TabletMetadata> srcTablets = new ArrayList<>();
TabletMetadata srcTablet = srcIter.next();
srcTablets.add(srcTablet);
Text srcEndRow = srcTablet.getEndRow();
int cmp = compareEndRows(cloneEndRow, srcEndRow);
if (cmp < 0)
throw new TabletDeletedException("Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);
HashSet<TabletFile> srcFiles = new HashSet<>();
if (!cloneSuccessful)
srcFiles.addAll(srcTablet.getFiles());
while (cmp > 0) {
srcTablet = srcIter.next();
srcTablets.add(srcTablet);
srcEndRow = srcTablet.getEndRow();
cmp = compareEndRows(cloneEndRow, srcEndRow);
if (cmp < 0)
throw new TabletDeletedException("Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);
if (!cloneSuccessful)
srcFiles.addAll(srcTablet.getFiles());
}
if (cloneSuccessful)
continue;
if (srcFiles.containsAll(cloneFiles)) {
// write out marker that this tablet was successfully cloned
Mutation m = new Mutation(cloneTablet.getExtent().toMetaRow());
m.put(ClonedColumnFamily.NAME, new Text(""), new Value("OK"));
bw.addMutation(m);
} else {
// delete existing cloned tablet entry
Mutation m = new Mutation(cloneTablet.getExtent().toMetaRow());
for (Entry<Key, Value> entry : cloneTablet.getKeyValues().entrySet()) {
Key k = entry.getKey();
m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp());
}
bw.addMutation(m);
for (TabletMetadata st : srcTablets) bw.addMutation(createCloneMutation(srcTableId, tableId, st.getKeyValues()));
rewrites++;
}
}
bw.flush();
return rewrites;
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class CompactableImpl method compact.
@Override
public void compact(CompactionServiceId service, CompactionJob job, RateLimiter readLimiter, RateLimiter writeLimiter, long queuedTime) {
Optional<CompactionInfo> ocInfo = reserveFilesForCompaction(service, job);
if (ocInfo.isEmpty())
return;
var cInfo = ocInfo.get();
StoredTabletFile newFile = null;
long startTime = System.currentTimeMillis();
CompactionKind kind = job.getKind();
CompactionStats stats = new CompactionStats();
try {
TabletLogger.compacting(getExtent(), job, cInfo.localCompactionCfg);
tablet.incrementStatusMajor();
var check = new CompactionCheck(service, kind, cInfo.checkCompactionId);
TabletFile tmpFileName = tablet.getNextMapFilenameForMajc(cInfo.propagateDeletes);
var compactEnv = new MajCEnv(kind, check, readLimiter, writeLimiter, cInfo.propagateDeletes);
SortedMap<StoredTabletFile, DataFileValue> allFiles = tablet.getDatafiles();
HashMap<StoredTabletFile, DataFileValue> compactFiles = new HashMap<>();
cInfo.jobFiles.forEach(file -> compactFiles.put(file, allFiles.get(file)));
stats = CompactableUtils.compact(tablet, job, cInfo, compactEnv, compactFiles, tmpFileName);
newFile = CompactableUtils.bringOnline(tablet.getDatafileManager(), cInfo, stats, compactFiles, allFiles, kind, tmpFileName);
TabletLogger.compacted(getExtent(), job, newFile);
} catch (CompactionCanceledException cce) {
log.debug("Compaction canceled {} ", getExtent());
} catch (Exception e) {
newFile = null;
throw new RuntimeException(e);
} finally {
completeCompaction(job, cInfo.jobFiles, newFile);
tablet.updateTimer(MAJOR, queuedTime, startTime, stats.getEntriesRead(), newFile == null);
}
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class CompactableImpl method reserveExternalCompaction.
@Override
public ExternalCompactionJob reserveExternalCompaction(CompactionServiceId service, CompactionJob job, String compactorId, ExternalCompactionId externalCompactionId) {
Preconditions.checkState(!tablet.getExtent().isMeta());
Optional<CompactionInfo> ocInfo = reserveFilesForCompaction(service, job);
if (ocInfo.isEmpty())
return null;
var cInfo = ocInfo.get();
try {
Map<String, String> overrides = CompactableUtils.getOverrides(job.getKind(), tablet, cInfo.localHelper, job.getFiles());
TabletFile compactTmpName = tablet.getNextMapFilenameForMajc(cInfo.propagateDeletes);
ExternalCompactionInfo ecInfo = new ExternalCompactionInfo();
ecInfo.meta = new ExternalCompactionMetadata(cInfo.jobFiles, Sets.difference(cInfo.selectedFiles, cInfo.jobFiles), compactTmpName, compactorId, job.getKind(), job.getPriority(), job.getExecutor(), cInfo.propagateDeletes, cInfo.initiallySelectedAll, cInfo.checkCompactionId);
tablet.getContext().getAmple().mutateTablet(getExtent()).putExternalCompaction(externalCompactionId, ecInfo.meta).mutate();
ecInfo.job = job;
externalCompactions.put(externalCompactionId, ecInfo);
SortedMap<StoredTabletFile, DataFileValue> allFiles = tablet.getDatafiles();
HashMap<StoredTabletFile, DataFileValue> compactFiles = new HashMap<>();
cInfo.jobFiles.forEach(file -> compactFiles.put(file, allFiles.get(file)));
TabletLogger.compacting(getExtent(), job, cInfo.localCompactionCfg);
return new ExternalCompactionJob(compactFiles, cInfo.propagateDeletes, compactTmpName, getExtent(), externalCompactionId, job.getKind(), cInfo.iters, cInfo.checkCompactionId, overrides);
} catch (Exception e) {
externalCompactions.remove(externalCompactionId);
completeCompaction(job, cInfo.jobFiles, null);
throw new RuntimeException(e);
}
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class ThriftClientHandler method loadFiles.
@Override
public void loadFiles(TInfo tinfo, TCredentials credentials, long tid, String dir, Map<TKeyExtent, Map<String, MapFileInfo>> tabletImports, boolean setTime) throws ThriftSecurityException {
if (!security.canPerformSystemActions(credentials)) {
throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
}
transactionWatcher.runQuietly(Constants.BULK_ARBITRATOR_TYPE, tid, () -> {
tabletImports.forEach((tke, fileMap) -> {
Map<TabletFile, MapFileInfo> newFileMap = new HashMap<>();
for (Entry<String, MapFileInfo> mapping : fileMap.entrySet()) {
Path path = new Path(dir, mapping.getKey());
FileSystem ns = context.getVolumeManager().getFileSystemByPath(path);
path = ns.makeQualified(path);
newFileMap.put(new TabletFile(path), mapping.getValue());
}
var files = newFileMap.keySet().stream().map(TabletFile::getPathStr).collect(toList());
server.updateBulkImportState(files, BulkImportState.INITIAL);
Tablet importTablet = server.getOnlineTablet(KeyExtent.fromThrift(tke));
if (importTablet != null) {
try {
server.updateBulkImportState(files, BulkImportState.PROCESSING);
importTablet.importMapFiles(tid, newFileMap, setTime);
} catch (IOException ioe) {
log.debug("files {} not imported to {}: {}", fileMap.keySet(), KeyExtent.fromThrift(tke), ioe.getMessage());
} finally {
server.removeBulkImportState(files);
}
}
});
});
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class Upgrader9to10 method upgradeRootTabletMetadata.
/**
* Improvements to the metadata and root tables were made in this version. See pull request
* <a href="https://github.com/apache/accumulo/pull/1174">#1174</a> for more details.
*/
private void upgradeRootTabletMetadata(ServerContext context) {
String rootMetaSer = getFromZK(context, ZROOT_TABLET);
if (rootMetaSer == null || rootMetaSer.isEmpty()) {
String dir = getFromZK(context, ZROOT_TABLET_PATH);
List<LogEntry> logs = getRootLogEntries(context);
TServerInstance last = getLocation(context, ZROOT_TABLET_LAST_LOCATION);
TServerInstance future = getLocation(context, ZROOT_TABLET_FUTURE_LOCATION);
TServerInstance current = getLocation(context, ZROOT_TABLET_LOCATION);
UpgradeMutator tabletMutator = new UpgradeMutator(context);
tabletMutator.putPrevEndRow(RootTable.EXTENT.prevEndRow());
tabletMutator.putDirName(upgradeDirColumn(dir));
if (last != null)
tabletMutator.putLocation(last, LocationType.LAST);
if (future != null)
tabletMutator.putLocation(future, LocationType.FUTURE);
if (current != null)
tabletMutator.putLocation(current, LocationType.CURRENT);
logs.forEach(tabletMutator::putWal);
Map<String, DataFileValue> files = cleanupRootTabletFiles(context.getVolumeManager(), dir);
files.forEach((path, dfv) -> tabletMutator.putFile(new TabletFile(new Path(path)), dfv));
tabletMutator.putTime(computeRootTabletTime(context, files.keySet()));
tabletMutator.mutate();
}
try {
context.getZooReaderWriter().putPersistentData(context.getZooKeeperRoot() + ZROOT_TABLET_GC_CANDIDATES, new RootGcCandidates().toJson().getBytes(UTF_8), NodeExistsPolicy.SKIP);
} catch (KeeperException | InterruptedException e) {
throw new RuntimeException(e);
}
// this operation must be idempotent, so deleting after updating is very important
delete(context, ZROOT_TABLET_CURRENT_LOGS);
delete(context, ZROOT_TABLET_FUTURE_LOCATION);
delete(context, ZROOT_TABLET_LAST_LOCATION);
delete(context, ZROOT_TABLET_LOCATION);
delete(context, ZROOT_TABLET_WALOGS);
delete(context, ZROOT_TABLET_PATH);
}
Aggregations