Search in sources :

Example 21 with TabletFile

use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.

the class MetadataTableUtil method checkClone.

@VisibleForTesting
public static int checkClone(String testTableName, TableId srcTableId, TableId tableId, AccumuloClient client, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException {
    Iterator<TabletMetadata> srcIter = createCloneScanner(testTableName, srcTableId, client).iterator();
    Iterator<TabletMetadata> cloneIter = createCloneScanner(testTableName, tableId, client).iterator();
    if (!cloneIter.hasNext() || !srcIter.hasNext())
        throw new RuntimeException(" table deleted during clone?  srcTableId = " + srcTableId + " tableId=" + tableId);
    int rewrites = 0;
    while (cloneIter.hasNext()) {
        TabletMetadata cloneTablet = cloneIter.next();
        Text cloneEndRow = cloneTablet.getEndRow();
        HashSet<TabletFile> cloneFiles = new HashSet<>();
        boolean cloneSuccessful = cloneTablet.getCloned() != null;
        if (!cloneSuccessful)
            cloneFiles.addAll(cloneTablet.getFiles());
        List<TabletMetadata> srcTablets = new ArrayList<>();
        TabletMetadata srcTablet = srcIter.next();
        srcTablets.add(srcTablet);
        Text srcEndRow = srcTablet.getEndRow();
        int cmp = compareEndRows(cloneEndRow, srcEndRow);
        if (cmp < 0)
            throw new TabletDeletedException("Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);
        HashSet<TabletFile> srcFiles = new HashSet<>();
        if (!cloneSuccessful)
            srcFiles.addAll(srcTablet.getFiles());
        while (cmp > 0) {
            srcTablet = srcIter.next();
            srcTablets.add(srcTablet);
            srcEndRow = srcTablet.getEndRow();
            cmp = compareEndRows(cloneEndRow, srcEndRow);
            if (cmp < 0)
                throw new TabletDeletedException("Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);
            if (!cloneSuccessful)
                srcFiles.addAll(srcTablet.getFiles());
        }
        if (cloneSuccessful)
            continue;
        if (srcFiles.containsAll(cloneFiles)) {
            // write out marker that this tablet was successfully cloned
            Mutation m = new Mutation(cloneTablet.getExtent().toMetaRow());
            m.put(ClonedColumnFamily.NAME, new Text(""), new Value("OK"));
            bw.addMutation(m);
        } else {
            // delete existing cloned tablet entry
            Mutation m = new Mutation(cloneTablet.getExtent().toMetaRow());
            for (Entry<Key, Value> entry : cloneTablet.getKeyValues().entrySet()) {
                Key k = entry.getKey();
                m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp());
            }
            bw.addMutation(m);
            for (TabletMetadata st : srcTablets) bw.addMutation(createCloneMutation(srcTableId, tableId, st.getKeyValues()));
            rewrites++;
        }
    }
    bw.flush();
    return rewrites;
}
Also used : ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) TabletDeletedException(org.apache.accumulo.core.metadata.schema.TabletDeletedException) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) TabletMetadata(org.apache.accumulo.core.metadata.schema.TabletMetadata) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 22 with TabletFile

use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.

the class CompactableImpl method compact.

@Override
public void compact(CompactionServiceId service, CompactionJob job, RateLimiter readLimiter, RateLimiter writeLimiter, long queuedTime) {
    Optional<CompactionInfo> ocInfo = reserveFilesForCompaction(service, job);
    if (ocInfo.isEmpty())
        return;
    var cInfo = ocInfo.get();
    StoredTabletFile newFile = null;
    long startTime = System.currentTimeMillis();
    CompactionKind kind = job.getKind();
    CompactionStats stats = new CompactionStats();
    try {
        TabletLogger.compacting(getExtent(), job, cInfo.localCompactionCfg);
        tablet.incrementStatusMajor();
        var check = new CompactionCheck(service, kind, cInfo.checkCompactionId);
        TabletFile tmpFileName = tablet.getNextMapFilenameForMajc(cInfo.propagateDeletes);
        var compactEnv = new MajCEnv(kind, check, readLimiter, writeLimiter, cInfo.propagateDeletes);
        SortedMap<StoredTabletFile, DataFileValue> allFiles = tablet.getDatafiles();
        HashMap<StoredTabletFile, DataFileValue> compactFiles = new HashMap<>();
        cInfo.jobFiles.forEach(file -> compactFiles.put(file, allFiles.get(file)));
        stats = CompactableUtils.compact(tablet, job, cInfo, compactEnv, compactFiles, tmpFileName);
        newFile = CompactableUtils.bringOnline(tablet.getDatafileManager(), cInfo, stats, compactFiles, allFiles, kind, tmpFileName);
        TabletLogger.compacted(getExtent(), job, newFile);
    } catch (CompactionCanceledException cce) {
        log.debug("Compaction canceled {} ", getExtent());
    } catch (Exception e) {
        newFile = null;
        throw new RuntimeException(e);
    } finally {
        completeCompaction(job, cInfo.jobFiles, newFile);
        tablet.updateTimer(MAJOR, queuedTime, startTime, stats.getEntriesRead(), newFile == null);
    }
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CompactionStats(org.apache.accumulo.server.compaction.CompactionStats) UncheckedIOException(java.io.UncheckedIOException) CompactionCanceledException(org.apache.accumulo.server.compaction.FileCompactor.CompactionCanceledException) IOException(java.io.IOException) NoNodeException(org.apache.zookeeper.KeeperException.NoNodeException) CompactionKind(org.apache.accumulo.core.spi.compaction.CompactionKind) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) CompactionCanceledException(org.apache.accumulo.server.compaction.FileCompactor.CompactionCanceledException)

Example 23 with TabletFile

use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.

the class CompactableImpl method reserveExternalCompaction.

@Override
public ExternalCompactionJob reserveExternalCompaction(CompactionServiceId service, CompactionJob job, String compactorId, ExternalCompactionId externalCompactionId) {
    Preconditions.checkState(!tablet.getExtent().isMeta());
    Optional<CompactionInfo> ocInfo = reserveFilesForCompaction(service, job);
    if (ocInfo.isEmpty())
        return null;
    var cInfo = ocInfo.get();
    try {
        Map<String, String> overrides = CompactableUtils.getOverrides(job.getKind(), tablet, cInfo.localHelper, job.getFiles());
        TabletFile compactTmpName = tablet.getNextMapFilenameForMajc(cInfo.propagateDeletes);
        ExternalCompactionInfo ecInfo = new ExternalCompactionInfo();
        ecInfo.meta = new ExternalCompactionMetadata(cInfo.jobFiles, Sets.difference(cInfo.selectedFiles, cInfo.jobFiles), compactTmpName, compactorId, job.getKind(), job.getPriority(), job.getExecutor(), cInfo.propagateDeletes, cInfo.initiallySelectedAll, cInfo.checkCompactionId);
        tablet.getContext().getAmple().mutateTablet(getExtent()).putExternalCompaction(externalCompactionId, ecInfo.meta).mutate();
        ecInfo.job = job;
        externalCompactions.put(externalCompactionId, ecInfo);
        SortedMap<StoredTabletFile, DataFileValue> allFiles = tablet.getDatafiles();
        HashMap<StoredTabletFile, DataFileValue> compactFiles = new HashMap<>();
        cInfo.jobFiles.forEach(file -> compactFiles.put(file, allFiles.get(file)));
        TabletLogger.compacting(getExtent(), job, cInfo.localCompactionCfg);
        return new ExternalCompactionJob(compactFiles, cInfo.propagateDeletes, compactTmpName, getExtent(), externalCompactionId, job.getKind(), cInfo.iters, cInfo.checkCompactionId, overrides);
    } catch (Exception e) {
        externalCompactions.remove(externalCompactionId);
        completeCompaction(job, cInfo.jobFiles, null);
        throw new RuntimeException(e);
    }
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ExternalCompactionJob(org.apache.accumulo.tserver.compactions.ExternalCompactionJob) UncheckedIOException(java.io.UncheckedIOException) CompactionCanceledException(org.apache.accumulo.server.compaction.FileCompactor.CompactionCanceledException) IOException(java.io.IOException) NoNodeException(org.apache.zookeeper.KeeperException.NoNodeException) ExternalCompactionMetadata(org.apache.accumulo.core.metadata.schema.ExternalCompactionMetadata) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile)

Example 24 with TabletFile

use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.

the class ThriftClientHandler method loadFiles.

@Override
public void loadFiles(TInfo tinfo, TCredentials credentials, long tid, String dir, Map<TKeyExtent, Map<String, MapFileInfo>> tabletImports, boolean setTime) throws ThriftSecurityException {
    if (!security.canPerformSystemActions(credentials)) {
        throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
    }
    transactionWatcher.runQuietly(Constants.BULK_ARBITRATOR_TYPE, tid, () -> {
        tabletImports.forEach((tke, fileMap) -> {
            Map<TabletFile, MapFileInfo> newFileMap = new HashMap<>();
            for (Entry<String, MapFileInfo> mapping : fileMap.entrySet()) {
                Path path = new Path(dir, mapping.getKey());
                FileSystem ns = context.getVolumeManager().getFileSystemByPath(path);
                path = ns.makeQualified(path);
                newFileMap.put(new TabletFile(path), mapping.getValue());
            }
            var files = newFileMap.keySet().stream().map(TabletFile::getPathStr).collect(toList());
            server.updateBulkImportState(files, BulkImportState.INITIAL);
            Tablet importTablet = server.getOnlineTablet(KeyExtent.fromThrift(tke));
            if (importTablet != null) {
                try {
                    server.updateBulkImportState(files, BulkImportState.PROCESSING);
                    importTablet.importMapFiles(tid, newFileMap, setTime);
                } catch (IOException ioe) {
                    log.debug("files {} not imported to {}: {}", fileMap.keySet(), KeyExtent.fromThrift(tke), ioe.getMessage());
                } finally {
                    server.removeBulkImportState(files);
                }
            }
        });
    });
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) FileSystem(org.apache.hadoop.fs.FileSystem) TabletFile(org.apache.accumulo.core.metadata.TabletFile) MapFileInfo(org.apache.accumulo.core.dataImpl.thrift.MapFileInfo) Tablet(org.apache.accumulo.tserver.tablet.Tablet) IOException(java.io.IOException) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException)

Example 25 with TabletFile

use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.

the class Upgrader9to10 method upgradeRootTabletMetadata.

/**
 * Improvements to the metadata and root tables were made in this version. See pull request
 * <a href="https://github.com/apache/accumulo/pull/1174">#1174</a> for more details.
 */
private void upgradeRootTabletMetadata(ServerContext context) {
    String rootMetaSer = getFromZK(context, ZROOT_TABLET);
    if (rootMetaSer == null || rootMetaSer.isEmpty()) {
        String dir = getFromZK(context, ZROOT_TABLET_PATH);
        List<LogEntry> logs = getRootLogEntries(context);
        TServerInstance last = getLocation(context, ZROOT_TABLET_LAST_LOCATION);
        TServerInstance future = getLocation(context, ZROOT_TABLET_FUTURE_LOCATION);
        TServerInstance current = getLocation(context, ZROOT_TABLET_LOCATION);
        UpgradeMutator tabletMutator = new UpgradeMutator(context);
        tabletMutator.putPrevEndRow(RootTable.EXTENT.prevEndRow());
        tabletMutator.putDirName(upgradeDirColumn(dir));
        if (last != null)
            tabletMutator.putLocation(last, LocationType.LAST);
        if (future != null)
            tabletMutator.putLocation(future, LocationType.FUTURE);
        if (current != null)
            tabletMutator.putLocation(current, LocationType.CURRENT);
        logs.forEach(tabletMutator::putWal);
        Map<String, DataFileValue> files = cleanupRootTabletFiles(context.getVolumeManager(), dir);
        files.forEach((path, dfv) -> tabletMutator.putFile(new TabletFile(new Path(path)), dfv));
        tabletMutator.putTime(computeRootTabletTime(context, files.keySet()));
        tabletMutator.mutate();
    }
    try {
        context.getZooReaderWriter().putPersistentData(context.getZooKeeperRoot() + ZROOT_TABLET_GC_CANDIDATES, new RootGcCandidates().toJson().getBytes(UTF_8), NodeExistsPolicy.SKIP);
    } catch (KeeperException | InterruptedException e) {
        throw new RuntimeException(e);
    }
    // this operation must be idempotent, so deleting after updating is very important
    delete(context, ZROOT_TABLET_CURRENT_LOGS);
    delete(context, ZROOT_TABLET_FUTURE_LOCATION);
    delete(context, ZROOT_TABLET_LAST_LOCATION);
    delete(context, ZROOT_TABLET_LOCATION);
    delete(context, ZROOT_TABLET_WALOGS);
    delete(context, ZROOT_TABLET_PATH);
}
Also used : Path(org.apache.hadoop.fs.Path) RootGcCandidates(org.apache.accumulo.server.metadata.RootGcCandidates) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) TServerInstance(org.apache.accumulo.core.metadata.TServerInstance) TabletFile(org.apache.accumulo.core.metadata.TabletFile) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) KeeperException(org.apache.zookeeper.KeeperException)

Aggregations

TabletFile (org.apache.accumulo.core.metadata.TabletFile)36 StoredTabletFile (org.apache.accumulo.core.metadata.StoredTabletFile)20 IOException (java.io.IOException)17 Path (org.apache.hadoop.fs.Path)15 ArrayList (java.util.ArrayList)14 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)14 Key (org.apache.accumulo.core.data.Key)13 FileSystem (org.apache.hadoop.fs.FileSystem)13 HashMap (java.util.HashMap)12 TreeMap (java.util.TreeMap)10 Value (org.apache.accumulo.core.data.Value)8 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)8 HashSet (java.util.HashSet)6 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)6 FileSKVIterator (org.apache.accumulo.core.file.FileSKVIterator)6 SortedKeyValueIterator (org.apache.accumulo.core.iterators.SortedKeyValueIterator)6 Text (org.apache.hadoop.io.Text)6 Map (java.util.Map)5 PartialKey (org.apache.accumulo.core.data.PartialKey)5 List (java.util.List)4