Search in sources :

Example 1 with InterruptibleIterator

use of org.apache.accumulo.core.iterators.system.InterruptibleIterator in project accumulo by apache.

the class ScanDataSource method createIterator.

private SortedKeyValueIterator<Key, Value> createIterator() throws IOException {
    Map<FileRef, DataFileValue> files;
    SamplerConfigurationImpl samplerConfig = options.getSamplerConfigurationImpl();
    synchronized (tablet) {
        if (memIters != null)
            throw new IllegalStateException("Tried to create new scan iterator w/o releasing memory");
        if (tablet.isClosed())
            throw new TabletClosedException();
        if (interruptFlag.get())
            throw new IterationInterruptedException(tablet.getExtent().toString() + " " + interruptFlag.hashCode());
        // only acquire the file manager when we know the tablet is open
        if (fileManager == null) {
            fileManager = tablet.getTabletResources().newScanFileManager();
            tablet.addActiveScans(this);
        }
        if (fileManager.getNumOpenFiles() != 0)
            throw new IllegalStateException("Tried to create new scan iterator w/o releasing files");
        // set this before trying to get iterators in case
        // getIterators() throws an exception
        expectedDeletionCount = tablet.getDataSourceDeletions();
        memIters = tablet.getTabletMemory().getIterators(samplerConfig);
        Pair<Long, Map<FileRef, DataFileValue>> reservation = tablet.getDatafileManager().reserveFilesForScan();
        fileReservationId = reservation.getFirst();
        files = reservation.getSecond();
    }
    Collection<InterruptibleIterator> mapfiles = fileManager.openFiles(files, options.isIsolated(), samplerConfig);
    for (SortedKeyValueIterator<Key, Value> skvi : Iterables.concat(mapfiles, memIters)) ((InterruptibleIterator) skvi).setInterruptFlag(interruptFlag);
    List<SortedKeyValueIterator<Key, Value>> iters = new ArrayList<>(mapfiles.size() + memIters.size());
    iters.addAll(mapfiles);
    iters.addAll(memIters);
    MultiIterator multiIter = new MultiIterator(iters, tablet.getExtent());
    TabletIteratorEnvironment iterEnv = new TabletIteratorEnvironment(IteratorScope.scan, tablet.getTableConfiguration(), fileManager, files, options.getAuthorizations(), samplerConfig);
    statsIterator = new StatsIterator(multiIter, TabletServer.seekCount, tablet.getScannedCounter());
    SortedKeyValueIterator<Key, Value> visFilter = IteratorUtil.setupSystemScanIterators(statsIterator, options.getColumnSet(), options.getAuthorizations(), options.getDefaultLabels());
    if (!loadIters) {
        return visFilter;
    } else {
        List<IterInfo> iterInfos;
        Map<String, Map<String, String>> iterOpts;
        ParsedIteratorConfig pic = tablet.getTableConfiguration().getParsedIteratorConfig(IteratorScope.scan);
        if (options.getSsiList().size() == 0 && options.getSsio().size() == 0) {
            // No scan time iterator options were set, so can just use the pre-parsed table iterator options.
            iterInfos = pic.getIterInfo();
            iterOpts = pic.getOpts();
        } else {
            // Scan time iterator options were set, so need to merge those with pre-parsed table iterator options.
            iterOpts = new HashMap<>(pic.getOpts().size() + options.getSsio().size());
            iterInfos = new ArrayList<>(pic.getIterInfo().size() + options.getSsiList().size());
            IteratorUtil.mergeIteratorConfig(iterInfos, iterOpts, pic.getIterInfo(), pic.getOpts(), options.getSsiList(), options.getSsio());
        }
        String context;
        if (options.getClassLoaderContext() != null) {
            log.trace("Loading iterators for scan with scan context: {}", options.getClassLoaderContext());
            context = options.getClassLoaderContext();
        } else {
            context = pic.getContext();
            if (context != null) {
                log.trace("Loading iterators for scan with table context: {}", options.getClassLoaderContext());
            } else {
                log.trace("Loading iterators for scan");
            }
        }
        return iterEnv.getTopLevelIterator(IteratorUtil.loadIterators(visFilter, iterInfos, iterOpts, iterEnv, true, context));
    }
}
Also used : SamplerConfigurationImpl(org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl) ArrayList(java.util.ArrayList) InterruptibleIterator(org.apache.accumulo.core.iterators.system.InterruptibleIterator) IterInfo(org.apache.accumulo.core.data.thrift.IterInfo) FileRef(org.apache.accumulo.server.fs.FileRef) TabletIteratorEnvironment(org.apache.accumulo.tserver.TabletIteratorEnvironment) IterationInterruptedException(org.apache.accumulo.core.iterators.IterationInterruptedException) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) MultiIterator(org.apache.accumulo.core.iterators.system.MultiIterator) SortedKeyValueIterator(org.apache.accumulo.core.iterators.SortedKeyValueIterator) StatsIterator(org.apache.accumulo.core.iterators.system.StatsIterator) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) ParsedIteratorConfig(org.apache.accumulo.server.conf.TableConfiguration.ParsedIteratorConfig) HashMap(java.util.HashMap) Map(java.util.Map) Key(org.apache.accumulo.core.data.Key)

Example 2 with InterruptibleIterator

use of org.apache.accumulo.core.iterators.system.InterruptibleIterator in project accumulo by apache.

the class InMemoryMap method delete.

public void delete(long waitTime) {
    synchronized (this) {
        if (deleted)
            throw new IllegalStateException("Double delete");
        deleted = true;
    }
    long t1 = System.currentTimeMillis();
    while (activeIters.size() > 0 && System.currentTimeMillis() - t1 < waitTime) {
        sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
    }
    if (activeIters.size() > 0) {
        // dump memmap exactly as is to a tmp file on disk, and switch scans to that temp file
        try {
            Configuration conf = CachedConfiguration.getInstance();
            FileSystem fs = FileSystem.getLocal(conf);
            String tmpFile = memDumpDir + "/memDump" + UUID.randomUUID() + "." + RFile.EXTENSION;
            Configuration newConf = new Configuration(conf);
            newConf.setInt("io.seqfile.compress.blocksize", 100000);
            AccumuloConfiguration siteConf = SiteConfiguration.getInstance();
            if (getOrCreateSampler() != null) {
                siteConf = createSampleConfig(siteConf);
            }
            FileSKVWriter out = new RFileOperations().newWriterBuilder().forFile(tmpFile, fs, newConf).withTableConfiguration(siteConf).build();
            InterruptibleIterator iter = map.skvIterator(null);
            HashSet<ByteSequence> allfams = new HashSet<>();
            for (Entry<String, Set<ByteSequence>> entry : lggroups.entrySet()) {
                allfams.addAll(entry.getValue());
                out.startNewLocalityGroup(entry.getKey(), entry.getValue());
                iter.seek(new Range(), entry.getValue(), true);
                dumpLocalityGroup(out, iter);
            }
            out.startDefaultLocalityGroup();
            iter.seek(new Range(), allfams, false);
            dumpLocalityGroup(out, iter);
            out.close();
            log.debug("Created mem dump file {}", tmpFile);
            memDumpFile = tmpFile;
            synchronized (activeIters) {
                for (MemoryIterator mi : activeIters) {
                    mi.switchNow();
                }
            }
            // rely on unix behavior that file will be deleted when last
            // reader closes it
            fs.delete(new Path(memDumpFile), true);
        } catch (IOException ioe) {
            log.error("Failed to create mem dump file", ioe);
            while (activeIters.size() > 0) {
                sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
            }
        }
    }
    SimpleMap tmpMap = map;
    synchronized (this) {
        map = null;
    }
    tmpMap.delete();
}
Also used : Path(org.apache.hadoop.fs.Path) Set(java.util.Set) HashSet(java.util.HashSet) Configuration(org.apache.hadoop.conf.Configuration) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) SiteConfiguration(org.apache.accumulo.core.conf.SiteConfiguration) CachedConfiguration(org.apache.accumulo.core.util.CachedConfiguration) FileSKVWriter(org.apache.accumulo.core.file.FileSKVWriter) IOException(java.io.IOException) Range(org.apache.accumulo.core.data.Range) InterruptibleIterator(org.apache.accumulo.core.iterators.system.InterruptibleIterator) RFileOperations(org.apache.accumulo.core.file.rfile.RFileOperations) FileSystem(org.apache.hadoop.fs.FileSystem) ByteSequence(org.apache.accumulo.core.data.ByteSequence) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) HashSet(java.util.HashSet)

Aggregations

InterruptibleIterator (org.apache.accumulo.core.iterators.system.InterruptibleIterator)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 Map (java.util.Map)1 Set (java.util.Set)1 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)1 SiteConfiguration (org.apache.accumulo.core.conf.SiteConfiguration)1 ByteSequence (org.apache.accumulo.core.data.ByteSequence)1 Key (org.apache.accumulo.core.data.Key)1 Range (org.apache.accumulo.core.data.Range)1 Value (org.apache.accumulo.core.data.Value)1 IterInfo (org.apache.accumulo.core.data.thrift.IterInfo)1 FileSKVWriter (org.apache.accumulo.core.file.FileSKVWriter)1 RFileOperations (org.apache.accumulo.core.file.rfile.RFileOperations)1 IterationInterruptedException (org.apache.accumulo.core.iterators.IterationInterruptedException)1 SortedKeyValueIterator (org.apache.accumulo.core.iterators.SortedKeyValueIterator)1 MultiIterator (org.apache.accumulo.core.iterators.system.MultiIterator)1 StatsIterator (org.apache.accumulo.core.iterators.system.StatsIterator)1