Search in sources :

Example 31 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class FileUtilTest method testCleanupIndexOpWithDfsDir.

@Test
public void testCleanupIndexOpWithDfsDir() throws IOException {
    // And a "unique" tmp directory for each volume
    File tmp1 = new File(accumuloDir, "tmp");
    assertTrue(tmp1.mkdirs() || tmp1.isDirectory());
    Path tmpPath1 = new Path(tmp1.toURI());
    HashMap<Property, String> testProps = new HashMap<>();
    testProps.put(INSTANCE_DFS_DIR, accumuloDir.getAbsolutePath());
    VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
    FileUtil.cleanupIndexOp(tmpPath1, fs, new ArrayList<>());
    Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) HashMap(java.util.HashMap) File(java.io.File) Property(org.apache.accumulo.core.conf.Property) Test(org.junit.Test)

Example 32 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class BulkImport method call.

@Override
public Repo<Master> call(long tid, Master master) throws Exception {
    log.debug(" tid {} sourceDir {}", tid, sourceDir);
    Utils.getReadLock(tableId, tid).lock();
    // check that the error directory exists and is empty
    VolumeManager fs = master.getFileSystem();
    Path errorPath = new Path(errorDir);
    FileStatus errorStatus = null;
    try {
        errorStatus = fs.getFileStatus(errorPath);
    } catch (FileNotFoundException ex) {
    // ignored
    }
    if (errorStatus == null)
        throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir + " does not exist");
    if (!errorStatus.isDirectory())
        throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir + " is not a directory");
    if (fs.listStatus(errorPath).length != 0)
        throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir + " is not empty");
    ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
    master.updateBulkImportStatus(sourceDir, BulkImportState.MOVING);
    // move the files into the directory
    try {
        String bulkDir = prepareBulkImport(master, fs, sourceDir, tableId);
        log.debug(" tid {} bulkDir {}", tid, bulkDir);
        return new LoadFiles(tableId, sourceDir, bulkDir, errorDir, setTime);
    } catch (IOException ex) {
        log.error("error preparing the bulk import directory", ex);
        throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_INPUT_DIRECTORY, sourceDir + ": " + ex);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) FileStatus(org.apache.hadoop.fs.FileStatus) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) AcceptableThriftTableOperationException(org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException)

Example 33 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class TabletServerLogger method startLogMaker.

private synchronized void startLogMaker() {
    if (nextLogMaker != null) {
        return;
    }
    nextLogMaker = new SimpleThreadPool(1, "WALog creator");
    nextLogMaker.submit(new LoggingRunnable(log, new Runnable() {

        @Override
        public void run() {
            final ServerResources conf = tserver.getServerConfig();
            final VolumeManager fs = conf.getFileSystem();
            while (!nextLogMaker.isShutdown()) {
                DfsLogger alog = null;
                try {
                    log.debug("Creating next WAL");
                    alog = new DfsLogger(conf, syncCounter, flushCounter);
                    alog.open(tserver.getClientAddressString());
                    String fileName = alog.getFileName();
                    log.debug("Created next WAL " + fileName);
                    tserver.addNewLogMarker(alog);
                    while (!nextLog.offer(alog, 12, TimeUnit.HOURS)) {
                        log.info("Our WAL was not used for 12 hours: {}", fileName);
                    }
                } catch (Exception t) {
                    log.error("Failed to open WAL", t);
                    if (null != alog) {
                        // object before trying to create a new one.
                        try {
                            alog.close();
                        } catch (Exception e) {
                            log.error("Failed to close WAL after it failed to open", e);
                        }
                        // Try to avoid leaving a bunch of empty WALs lying around
                        try {
                            Path path = alog.getPath();
                            if (fs.exists(path)) {
                                fs.delete(path);
                            }
                        } catch (Exception e) {
                            log.warn("Failed to delete a WAL that failed to open", e);
                        }
                    }
                    try {
                        nextLog.offer(t, 12, TimeUnit.HOURS);
                    } catch (InterruptedException ex) {
                    // ignore
                    }
                }
            }
        }
    }));
}
Also used : Path(org.apache.hadoop.fs.Path) LoggingRunnable(org.apache.accumulo.fate.util.LoggingRunnable) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) LoggingRunnable(org.apache.accumulo.fate.util.LoggingRunnable) SimpleThreadPool(org.apache.accumulo.core.util.SimpleThreadPool) ServerResources(org.apache.accumulo.tserver.log.DfsLogger.ServerResources) IOException(java.io.IOException)

Example 34 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class LogReader method main.

/**
 * Dump a Log File (Map or Sequence) to stdout. Will read from HDFS or local file system.
 *
 * @param args
 *          - first argument is the file to print
 */
public static void main(String[] args) throws IOException {
    Opts opts = new Opts();
    opts.parseArgs(LogReader.class.getName(), args);
    VolumeManager fs = VolumeManagerImpl.get();
    Matcher rowMatcher = null;
    KeyExtent ke = null;
    Text row = null;
    if (opts.files.isEmpty()) {
        new JCommander(opts).usage();
        return;
    }
    if (opts.row != null)
        row = new Text(opts.row);
    if (opts.extent != null) {
        String[] sa = opts.extent.split(";");
        ke = new KeyExtent(Table.ID.of(sa[0]), new Text(sa[1]), new Text(sa[2]));
    }
    if (opts.regexp != null) {
        Pattern pattern = Pattern.compile(opts.regexp);
        rowMatcher = pattern.matcher("");
    }
    Set<Integer> tabletIds = new HashSet<>();
    for (String file : opts.files) {
        Path path = new Path(file);
        LogFileKey key = new LogFileKey();
        LogFileValue value = new LogFileValue();
        if (fs.isFile(path)) {
            try (final FSDataInputStream fsinput = fs.open(path)) {
                // read log entries from a simple hdfs file
                DFSLoggerInputStreams streams;
                try {
                    streams = DfsLogger.readHeaderAndReturnStream(fsinput, SiteConfiguration.getInstance());
                } catch (LogHeaderIncompleteException e) {
                    log.warn("Could not read header for {} . Ignoring...", path);
                    continue;
                }
                try (DataInputStream input = streams.getDecryptingInputStream()) {
                    while (true) {
                        try {
                            key.readFields(input);
                            value.readFields(input);
                        } catch (EOFException ex) {
                            break;
                        }
                        printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
                    }
                }
            }
        } else {
            // read the log entries sorted in a map file
            MultiReader input = new MultiReader(fs, path);
            while (input.next(key, value)) {
                printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Pattern(java.util.regex.Pattern) Matcher(java.util.regex.Matcher) MultiReader(org.apache.accumulo.tserver.log.MultiReader) Text(org.apache.hadoop.io.Text) DataInputStream(java.io.DataInputStream) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) LogHeaderIncompleteException(org.apache.accumulo.tserver.log.DfsLogger.LogHeaderIncompleteException) JCommander(com.beust.jcommander.JCommander) DFSLoggerInputStreams(org.apache.accumulo.tserver.log.DfsLogger.DFSLoggerInputStreams) EOFException(java.io.EOFException) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) HashSet(java.util.HashSet)

Example 35 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class Tablet method _majorCompact.

private CompactionStats _majorCompact(MajorCompactionReason reason) throws IOException, CompactionCanceledException {
    long t1, t2, t3;
    Pair<Long, UserCompactionConfig> compactionId = null;
    CompactionStrategy strategy = null;
    Map<FileRef, Pair<Key, Key>> firstAndLastKeys = null;
    if (reason == MajorCompactionReason.USER) {
        try {
            compactionId = getCompactionID();
            strategy = createCompactionStrategy(compactionId.getSecond().getCompactionStrategy());
        } catch (NoNodeException e) {
            throw new RuntimeException(e);
        }
    } else if (reason == MajorCompactionReason.NORMAL || reason == MajorCompactionReason.IDLE) {
        strategy = Property.createTableInstanceFromPropertyName(tableConfiguration, Property.TABLE_COMPACTION_STRATEGY, CompactionStrategy.class, new DefaultCompactionStrategy());
        strategy.init(Property.getCompactionStrategyOptions(tableConfiguration));
    } else if (reason == MajorCompactionReason.CHOP) {
        firstAndLastKeys = getFirstAndLastKeys(getDatafileManager().getDatafileSizes());
    } else {
        throw new IllegalArgumentException("Unknown compaction reason " + reason);
    }
    if (strategy != null) {
        BlockCache sc = tabletResources.getTabletServerResourceManager().getSummaryCache();
        BlockCache ic = tabletResources.getTabletServerResourceManager().getIndexCache();
        MajorCompactionRequest request = new MajorCompactionRequest(extent, reason, getTabletServer().getFileSystem(), tableConfiguration, sc, ic);
        request.setFiles(getDatafileManager().getDatafileSizes());
        strategy.gatherInformation(request);
    }
    Map<FileRef, DataFileValue> filesToCompact = null;
    int maxFilesToCompact = tableConfiguration.getCount(Property.TSERV_MAJC_THREAD_MAXOPEN);
    CompactionStats majCStats = new CompactionStats();
    CompactionPlan plan = null;
    boolean propogateDeletes = false;
    boolean updateCompactionID = false;
    synchronized (this) {
        // plan all that work that needs to be done in the sync block... then do the actual work
        // outside the sync block
        t1 = System.currentTimeMillis();
        majorCompactionState = CompactionState.WAITING_TO_START;
        getTabletMemory().waitForMinC();
        t2 = System.currentTimeMillis();
        majorCompactionState = CompactionState.IN_PROGRESS;
        notifyAll();
        VolumeManager fs = getTabletServer().getFileSystem();
        if (extent.isRootTablet()) {
            // very important that we call this before doing major compaction,
            // otherwise deleted compacted files could possible be brought back
            // at some point if the file they were compacted to was legitimately
            // removed by a major compaction
            RootFiles.cleanupReplacement(fs, fs.listStatus(this.location), false);
        }
        SortedMap<FileRef, DataFileValue> allFiles = getDatafileManager().getDatafileSizes();
        List<FileRef> inputFiles = new ArrayList<>();
        if (reason == MajorCompactionReason.CHOP) {
            // enforce rules: files with keys outside our range need to be compacted
            inputFiles.addAll(findChopFiles(extent, firstAndLastKeys, allFiles.keySet()));
        } else {
            MajorCompactionRequest request = new MajorCompactionRequest(extent, reason, tableConfiguration);
            request.setFiles(allFiles);
            plan = strategy.getCompactionPlan(request);
            if (plan != null) {
                plan.validate(allFiles.keySet());
                inputFiles.addAll(plan.inputFiles);
            }
        }
        if (inputFiles.isEmpty()) {
            if (reason == MajorCompactionReason.USER) {
                if (compactionId.getSecond().getIterators().isEmpty()) {
                    log.debug("No-op major compaction by USER on 0 input files because no iterators present.");
                    lastCompactID = compactionId.getFirst();
                    updateCompactionID = true;
                } else {
                    log.debug("Major compaction by USER on 0 input files with iterators.");
                    filesToCompact = new HashMap<>();
                }
            } else {
                return majCStats;
            }
        } else {
            // If no original files will exist at the end of the compaction, we do not have to propogate deletes
            Set<FileRef> droppedFiles = new HashSet<>();
            droppedFiles.addAll(inputFiles);
            if (plan != null)
                droppedFiles.addAll(plan.deleteFiles);
            propogateDeletes = !(droppedFiles.equals(allFiles.keySet()));
            log.debug("Major compaction plan: {} propogate deletes : {}", plan, propogateDeletes);
            filesToCompact = new HashMap<>(allFiles);
            filesToCompact.keySet().retainAll(inputFiles);
            getDatafileManager().reserveMajorCompactingFiles(filesToCompact.keySet());
        }
        t3 = System.currentTimeMillis();
    }
    try {
        log.debug(String.format("MajC initiate lock %.2f secs, wait %.2f secs", (t3 - t2) / 1000.0, (t2 - t1) / 1000.0));
        if (updateCompactionID) {
            MetadataTableUtil.updateTabletCompactID(extent, compactionId.getFirst(), tabletServer, getTabletServer().getLock());
            return majCStats;
        }
        if (!propogateDeletes && compactionId == null) {
            // compacting everything, so update the compaction id in metadata
            try {
                compactionId = getCompactionID();
                if (compactionId.getSecond().getCompactionStrategy() != null) {
                    compactionId = null;
                // TODO maybe return unless chop?
                }
            } catch (NoNodeException e) {
                throw new RuntimeException(e);
            }
        }
        List<IteratorSetting> compactionIterators = new ArrayList<>();
        if (compactionId != null) {
            if (reason == MajorCompactionReason.USER) {
                if (getCompactionCancelID() >= compactionId.getFirst()) {
                    // compaction was canceled
                    return majCStats;
                }
                compactionIterators = compactionId.getSecond().getIterators();
                synchronized (this) {
                    if (lastCompactID >= compactionId.getFirst())
                        // already compacted
                        return majCStats;
                }
            }
        }
        // ACCUMULO-3645 run loop at least once, even if filesToCompact.isEmpty()
        do {
            int numToCompact = maxFilesToCompact;
            if (filesToCompact.size() > maxFilesToCompact && filesToCompact.size() < 2 * maxFilesToCompact) {
                // on the second to last compaction pass, compact the minimum amount of files possible
                numToCompact = filesToCompact.size() - maxFilesToCompact + 1;
            }
            Set<FileRef> smallestFiles = removeSmallest(filesToCompact, numToCompact);
            FileRef fileName = getNextMapFilename((filesToCompact.size() == 0 && !propogateDeletes) ? "A" : "C");
            FileRef compactTmpName = new FileRef(fileName.path().toString() + "_tmp");
            AccumuloConfiguration tableConf = createTableConfiguration(tableConfiguration, plan);
            Span span = Trace.start("compactFiles");
            try {
                CompactionEnv cenv = new CompactionEnv() {

                    @Override
                    public boolean isCompactionEnabled() {
                        return Tablet.this.isCompactionEnabled();
                    }

                    @Override
                    public IteratorScope getIteratorScope() {
                        return IteratorScope.majc;
                    }

                    @Override
                    public RateLimiter getReadLimiter() {
                        return getTabletServer().getMajorCompactionReadLimiter();
                    }

                    @Override
                    public RateLimiter getWriteLimiter() {
                        return getTabletServer().getMajorCompactionWriteLimiter();
                    }
                };
                HashMap<FileRef, DataFileValue> copy = new HashMap<>(getDatafileManager().getDatafileSizes());
                if (!copy.keySet().containsAll(smallestFiles))
                    throw new IllegalStateException("Cannot find data file values for " + smallestFiles);
                copy.keySet().retainAll(smallestFiles);
                log.debug("Starting MajC {} ({}) {} --> {} {}", extent, reason, copy.keySet(), compactTmpName, compactionIterators);
                // always propagate deletes, unless last batch
                boolean lastBatch = filesToCompact.isEmpty();
                Compactor compactor = new Compactor(tabletServer, this, copy, null, compactTmpName, lastBatch ? propogateDeletes : true, cenv, compactionIterators, reason.ordinal(), tableConf);
                CompactionStats mcs = compactor.call();
                span.data("files", "" + smallestFiles.size());
                span.data("read", "" + mcs.getEntriesRead());
                span.data("written", "" + mcs.getEntriesWritten());
                majCStats.add(mcs);
                if (lastBatch && plan != null && plan.deleteFiles != null) {
                    smallestFiles.addAll(plan.deleteFiles);
                }
                getDatafileManager().bringMajorCompactionOnline(smallestFiles, compactTmpName, fileName, filesToCompact.size() == 0 && compactionId != null ? compactionId.getFirst() : null, new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten()));
                // to add the deleted file
                if (filesToCompact.size() > 0 && mcs.getEntriesWritten() > 0) {
                    filesToCompact.put(fileName, new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten()));
                }
            } finally {
                span.stop();
            }
        } while (filesToCompact.size() > 0);
        return majCStats;
    } finally {
        synchronized (Tablet.this) {
            getDatafileManager().clearMajorCompactingFile();
        }
    }
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) CompactionPlan(org.apache.accumulo.tserver.compaction.CompactionPlan) NoNodeException(org.apache.zookeeper.KeeperException.NoNodeException) DefaultCompactionStrategy(org.apache.accumulo.tserver.compaction.DefaultCompactionStrategy) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) Span(org.apache.accumulo.core.trace.Span) MajorCompactionRequest(org.apache.accumulo.tserver.compaction.MajorCompactionRequest) FileRef(org.apache.accumulo.server.fs.FileRef) Pair(org.apache.accumulo.core.util.Pair) HashSet(java.util.HashSet) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) DefaultCompactionStrategy(org.apache.accumulo.tserver.compaction.DefaultCompactionStrategy) CompactionStrategy(org.apache.accumulo.tserver.compaction.CompactionStrategy) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) UserCompactionConfig(org.apache.accumulo.server.master.tableOps.UserCompactionConfig) CompactionEnv(org.apache.accumulo.tserver.tablet.Compactor.CompactionEnv) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) AtomicLong(java.util.concurrent.atomic.AtomicLong) BlockCache(org.apache.accumulo.core.file.blockfile.cache.BlockCache)

Aggregations

VolumeManager (org.apache.accumulo.server.fs.VolumeManager)57 Path (org.apache.hadoop.fs.Path)30 IOException (java.io.IOException)17 Test (org.junit.Test)17 Key (org.apache.accumulo.core.data.Key)14 HashMap (java.util.HashMap)13 Value (org.apache.accumulo.core.data.Value)13 Scanner (org.apache.accumulo.core.client.Scanner)12 ArrayList (java.util.ArrayList)11 FileRef (org.apache.accumulo.server.fs.FileRef)10 Connector (org.apache.accumulo.core.client.Connector)9 Instance (org.apache.accumulo.core.client.Instance)9 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)7 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)7 AccumuloServerContext (org.apache.accumulo.server.AccumuloServerContext)7 ServerConfigurationFactory (org.apache.accumulo.server.conf.ServerConfigurationFactory)7 File (java.io.File)6 AccumuloException (org.apache.accumulo.core.client.AccumuloException)6 FileStatus (org.apache.hadoop.fs.FileStatus)6 Text (org.apache.hadoop.io.Text)6