use of org.apache.accumulo.core.iterators.IterationInterruptedException in project accumulo by apache.
the class MapFileIterator method seek.
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
if (columnFamilies.size() != 0 || inclusive) {
throw new IllegalArgumentException("I do not know how to filter column families");
}
if (range == null)
throw new IllegalArgumentException("Cannot seek to null range");
if (interruptFlag != null && interruptFlag.get())
throw new IterationInterruptedException();
Key key = range.getStartKey();
if (key == null) {
key = new Key();
}
reader.seek(key);
while (hasTop() && range.beforeStartKey(getTopKey())) {
next();
}
}
use of org.apache.accumulo.core.iterators.IterationInterruptedException in project accumulo by apache.
the class ScanDataSource method createIterator.
private SortedKeyValueIterator<Key, Value> createIterator() throws IOException {
Map<FileRef, DataFileValue> files;
SamplerConfigurationImpl samplerConfig = options.getSamplerConfigurationImpl();
synchronized (tablet) {
if (memIters != null)
throw new IllegalStateException("Tried to create new scan iterator w/o releasing memory");
if (tablet.isClosed())
throw new TabletClosedException();
if (interruptFlag.get())
throw new IterationInterruptedException(tablet.getExtent().toString() + " " + interruptFlag.hashCode());
// only acquire the file manager when we know the tablet is open
if (fileManager == null) {
fileManager = tablet.getTabletResources().newScanFileManager();
tablet.addActiveScans(this);
}
if (fileManager.getNumOpenFiles() != 0)
throw new IllegalStateException("Tried to create new scan iterator w/o releasing files");
// set this before trying to get iterators in case
// getIterators() throws an exception
expectedDeletionCount = tablet.getDataSourceDeletions();
memIters = tablet.getTabletMemory().getIterators(samplerConfig);
Pair<Long, Map<FileRef, DataFileValue>> reservation = tablet.getDatafileManager().reserveFilesForScan();
fileReservationId = reservation.getFirst();
files = reservation.getSecond();
}
Collection<InterruptibleIterator> mapfiles = fileManager.openFiles(files, options.isIsolated(), samplerConfig);
for (SortedKeyValueIterator<Key, Value> skvi : Iterables.concat(mapfiles, memIters)) ((InterruptibleIterator) skvi).setInterruptFlag(interruptFlag);
List<SortedKeyValueIterator<Key, Value>> iters = new ArrayList<>(mapfiles.size() + memIters.size());
iters.addAll(mapfiles);
iters.addAll(memIters);
MultiIterator multiIter = new MultiIterator(iters, tablet.getExtent());
TabletIteratorEnvironment iterEnv = new TabletIteratorEnvironment(IteratorScope.scan, tablet.getTableConfiguration(), fileManager, files, options.getAuthorizations(), samplerConfig);
statsIterator = new StatsIterator(multiIter, TabletServer.seekCount, tablet.getScannedCounter());
SortedKeyValueIterator<Key, Value> visFilter = IteratorUtil.setupSystemScanIterators(statsIterator, options.getColumnSet(), options.getAuthorizations(), options.getDefaultLabels());
if (!loadIters) {
return visFilter;
} else {
List<IterInfo> iterInfos;
Map<String, Map<String, String>> iterOpts;
ParsedIteratorConfig pic = tablet.getTableConfiguration().getParsedIteratorConfig(IteratorScope.scan);
if (options.getSsiList().size() == 0 && options.getSsio().size() == 0) {
// No scan time iterator options were set, so can just use the pre-parsed table iterator options.
iterInfos = pic.getIterInfo();
iterOpts = pic.getOpts();
} else {
// Scan time iterator options were set, so need to merge those with pre-parsed table iterator options.
iterOpts = new HashMap<>(pic.getOpts().size() + options.getSsio().size());
iterInfos = new ArrayList<>(pic.getIterInfo().size() + options.getSsiList().size());
IteratorUtil.mergeIteratorConfig(iterInfos, iterOpts, pic.getIterInfo(), pic.getOpts(), options.getSsiList(), options.getSsio());
}
String context;
if (options.getClassLoaderContext() != null) {
log.trace("Loading iterators for scan with scan context: {}", options.getClassLoaderContext());
context = options.getClassLoaderContext();
} else {
context = pic.getContext();
if (context != null) {
log.trace("Loading iterators for scan with table context: {}", options.getClassLoaderContext());
} else {
log.trace("Loading iterators for scan");
}
}
return iterEnv.getTopLevelIterator(IteratorUtil.loadIterators(visFilter, iterInfos, iterOpts, iterEnv, true, context));
}
}
use of org.apache.accumulo.core.iterators.IterationInterruptedException in project accumulo by apache.
the class Scanner method read.
public ScanBatch read() throws IOException, TabletClosedException {
ScanDataSource dataSource = null;
Batch results = null;
try {
try {
scannerSemaphore.acquire();
} catch (InterruptedException e) {
sawException = true;
}
// sawException may have occurred within close, so we cannot assume that an interrupted exception was its cause
if (sawException)
throw new IllegalStateException("Tried to use scanner after exception occurred.");
if (scanClosed)
throw new IllegalStateException("Tried to use scanner after it was closed.");
if (options.isIsolated()) {
if (isolatedDataSource == null)
isolatedDataSource = new ScanDataSource(tablet, options);
dataSource = isolatedDataSource;
} else {
dataSource = new ScanDataSource(tablet, options);
}
SortedKeyValueIterator<Key, Value> iter;
if (options.isIsolated()) {
if (isolatedIter == null)
isolatedIter = new SourceSwitchingIterator(dataSource, true);
else
isolatedDataSource.reattachFileManager();
iter = isolatedIter;
} else {
iter = new SourceSwitchingIterator(dataSource, false);
}
results = tablet.nextBatch(iter, range, options.getNum(), options.getColumnSet(), options.getBatchTimeOut(), options.isIsolated());
if (results.getResults() == null) {
range = null;
return new ScanBatch(new ArrayList<>(), false);
} else if (results.getContinueKey() == null) {
return new ScanBatch(results.getResults(), false);
} else {
range = new Range(results.getContinueKey(), !results.isSkipContinueKey(), range.getEndKey(), range.isEndKeyInclusive());
return new ScanBatch(results.getResults(), true);
}
} catch (IterationInterruptedException iie) {
sawException = true;
if (tablet.isClosed())
throw new TabletClosedException(iie);
else
throw iie;
} catch (IOException ioe) {
if (tablet.shutdownInProgress()) {
log.debug("IOException while shutdown in progress ", ioe);
// assume IOException was caused by execution of HDFS shutdown hook
throw new TabletClosedException(ioe);
}
sawException = true;
dataSource.close(true);
throw ioe;
} catch (RuntimeException re) {
sawException = true;
throw re;
} finally {
// to return mapfiles, even when exception is thrown
if (null != dataSource && !options.isIsolated()) {
dataSource.close(false);
} else if (null != dataSource) {
dataSource.detachFileManager();
}
if (results != null && results.getResults() != null)
tablet.updateQueryStats(results.getResults().size(), results.getNumBytes());
scannerSemaphore.release();
}
}
use of org.apache.accumulo.core.iterators.IterationInterruptedException in project accumulo by apache.
the class InMemoryMapTest method runInterruptSampleTest.
private void runInterruptSampleTest(boolean deepCopy, boolean delete, boolean dcAfterDelete) throws Exception {
SamplerConfigurationImpl sampleConfig1 = new SamplerConfigurationImpl(RowSampler.class.getName(), ImmutableMap.of("hasher", "murmur3_32", "modulus", "2"));
Sampler sampler = SamplerFactory.newSampler(sampleConfig1, DefaultConfiguration.getInstance());
ConfigurationCopy config1 = newConfig(tempFolder.newFolder().getAbsolutePath());
for (Entry<String, String> entry : sampleConfig1.toTablePropertiesMap().entrySet()) {
config1.set(entry.getKey(), entry.getValue());
}
InMemoryMap imm = new InMemoryMap(config1);
TreeMap<Key, Value> expectedSample = new TreeMap<>();
TreeMap<Key, Value> expectedAll = new TreeMap<>();
for (int r = 0; r < 1000; r++) {
String row = String.format("r%06d", r);
mutate(imm, row, "cf1:cq1", 5, "v" + (2 * r), sampler, expectedSample, expectedAll);
mutate(imm, row, "cf2:cq2", 5, "v" + ((2 * r) + 1), sampler, expectedSample, expectedAll);
}
assertTrue(expectedSample.size() > 0);
MemoryIterator miter = imm.skvIterator(sampleConfig1);
AtomicBoolean iFlag = new AtomicBoolean(false);
miter.setInterruptFlag(iFlag);
SortedKeyValueIterator<Key, Value> iter = miter;
if (delete && !dcAfterDelete) {
imm.delete(0);
}
if (deepCopy) {
iter = iter.deepCopy(new SampleIE(sampleConfig1));
}
if (delete && dcAfterDelete) {
imm.delete(0);
}
assertEquals(expectedSample, readAll(iter));
iFlag.set(true);
try {
readAll(iter);
Assert.fail();
} catch (IterationInterruptedException iie) {
}
miter.close();
}
use of org.apache.accumulo.core.iterators.IterationInterruptedException in project accumulo by apache.
the class LookupTask method run.
@Override
public void run() {
MultiScanSession session = (MultiScanSession) server.getSession(scanID);
String oldThreadName = Thread.currentThread().getName();
try {
if (isCancelled() || session == null)
return;
TableConfiguration acuTableConf = server.getTableConfiguration(session.threadPoolExtent);
long maxResultsSize = acuTableConf.getAsBytes(Property.TABLE_SCAN_MAXMEM);
runState.set(ScanRunState.RUNNING);
Thread.currentThread().setName("Client: " + session.client + " User: " + session.getUser() + " Start: " + session.startTime + " Table: ");
long bytesAdded = 0;
long maxScanTime = 4000;
long startTime = System.currentTimeMillis();
List<KVEntry> results = new ArrayList<>();
Map<KeyExtent, List<Range>> failures = new HashMap<>();
List<KeyExtent> fullScans = new ArrayList<>();
KeyExtent partScan = null;
Key partNextKey = null;
boolean partNextKeyInclusive = false;
Iterator<Entry<KeyExtent, List<Range>>> iter = session.queries.entrySet().iterator();
// check the time so that the read ahead thread is not monopolized
while (iter.hasNext() && bytesAdded < maxResultsSize && (System.currentTimeMillis() - startTime) < maxScanTime) {
Entry<KeyExtent, List<Range>> entry = iter.next();
iter.remove();
// check that tablet server is serving requested tablet
Tablet tablet = server.getOnlineTablet(entry.getKey());
if (tablet == null) {
failures.put(entry.getKey(), entry.getValue());
continue;
}
Thread.currentThread().setName("Client: " + session.client + " User: " + session.getUser() + " Start: " + session.startTime + " Tablet: " + entry.getKey().toString());
LookupResult lookupResult;
try {
// canceled
if (isCancelled())
interruptFlag.set(true);
lookupResult = tablet.lookup(entry.getValue(), session.columnSet, session.auths, results, maxResultsSize - bytesAdded, session.ssiList, session.ssio, interruptFlag, session.samplerConfig, session.batchTimeOut, session.context);
// if the tablet was closed it it possible that the
// interrupt flag was set.... do not want it set for
// the next
// lookup
interruptFlag.set(false);
} catch (IOException e) {
log.warn("lookup failed for tablet " + entry.getKey(), e);
throw new RuntimeException(e);
}
bytesAdded += lookupResult.bytesAdded;
if (lookupResult.unfinishedRanges.size() > 0) {
if (lookupResult.closed) {
failures.put(entry.getKey(), lookupResult.unfinishedRanges);
} else {
session.queries.put(entry.getKey(), lookupResult.unfinishedRanges);
partScan = entry.getKey();
partNextKey = lookupResult.unfinishedRanges.get(0).getStartKey();
partNextKeyInclusive = lookupResult.unfinishedRanges.get(0).isStartKeyInclusive();
}
} else {
fullScans.add(entry.getKey());
}
}
long finishTime = System.currentTimeMillis();
session.totalLookupTime += (finishTime - startTime);
session.numEntries += results.size();
// convert everything to thrift before adding result
List<TKeyValue> retResults = new ArrayList<>();
for (KVEntry entry : results) retResults.add(new TKeyValue(entry.getKey().toThrift(), ByteBuffer.wrap(entry.getValue().get())));
Map<TKeyExtent, List<TRange>> retFailures = Translator.translate(failures, Translators.KET, new Translator.ListTranslator<>(Translators.RT));
List<TKeyExtent> retFullScans = Translator.translate(fullScans, Translators.KET);
TKeyExtent retPartScan = null;
TKey retPartNextKey = null;
if (partScan != null) {
retPartScan = partScan.toThrift();
retPartNextKey = partNextKey.toThrift();
}
// add results to queue
addResult(new MultiScanResult(retResults, retFailures, retFullScans, retPartScan, retPartNextKey, partNextKeyInclusive, session.queries.size() != 0));
} catch (IterationInterruptedException iie) {
if (!isCancelled()) {
log.warn("Iteration interrupted, when scan not cancelled", iie);
addResult(iie);
}
} catch (SampleNotPresentException e) {
addResult(e);
} catch (Throwable e) {
log.warn("exception while doing multi-scan ", e);
addResult(e);
} finally {
Thread.currentThread().setName(oldThreadName);
runState.set(ScanRunState.FINISHED);
}
}
Aggregations