use of java.nio.channels.ClosedByInterruptException in project hive by apache.
the class LlapInputFormat method getRecordReader.
@Override
public RecordReader<NullWritable, VectorizedRowBatch> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
// Check LLAP-aware split (e.g. OrcSplit) to make sure it's compatible.
RecordReader<NullWritable, VectorizedRowBatch> noLlap = checkLlapSplit(split, job, reporter);
if (noLlap != null)
return noLlap;
FileSplit fileSplit = (FileSplit) split;
reporter.setStatus(fileSplit.toString());
try {
// At this entry point, we are going to assume that these are logical table columns.
// Perhaps we should go thru the code and clean this up to be more explicit; for now, we
// will start with this single assumption and maintain clear semantics from here.
List<Integer> tableIncludedCols = ColumnProjectionUtils.isReadAllColumns(job) ? null : ColumnProjectionUtils.getReadColumnIDs(job);
LlapRecordReader rr = LlapRecordReader.create(job, fileSplit, tableIncludedCols, hostName, cvp, executor, sourceInputFormat, sourceSerDe, reporter, daemonConf);
if (rr == null) {
// Reader-specific incompatibility like SMB or schema evolution.
return sourceInputFormat.getRecordReader(split, job, reporter);
}
// For non-vectorized operator case, wrap the reader if possible.
RecordReader<NullWritable, VectorizedRowBatch> result = rr;
if (!Utilities.getIsVectorized(job)) {
result = null;
if (HiveConf.getBoolVar(job, ConfVars.LLAP_IO_ROW_WRAPPER_ENABLED)) {
result = wrapLlapReader(tableIncludedCols, rr, split);
}
if (result == null) {
// Cannot wrap a reader for non-vectorized pipeline.
return sourceInputFormat.getRecordReader(split, job, reporter);
}
}
// This starts the reader in the background.
rr.start();
return result;
} catch (Exception ex) {
Throwable rootCause = JavaUtils.findRootCause(ex);
if (checkLimitReached(job) && (rootCause instanceof InterruptedException || rootCause instanceof ClosedByInterruptException)) {
LlapIoImpl.LOG.info("Ignoring exception while getting record reader as limit is reached", rootCause);
return new NullRowsRecordReader(job, split);
} else {
throw new IOException(ex);
}
}
}
use of java.nio.channels.ClosedByInterruptException in project hive by apache.
the class BuddyAllocator method preallocateArenaBuffer.
private ByteBuffer preallocateArenaBuffer(int arenaSize) throws ClosedByInterruptException {
if (isMapped) {
RandomAccessFile rwf = null;
File rf = null;
Preconditions.checkArgument(isDirect, "All memory mapped allocations have to be direct buffers");
try {
rf = File.createTempFile("arena-", ".cache", cacheDir.toFile());
rwf = new RandomAccessFile(rf, "rw");
// truncate (TODO: posix_fallocate?)
rwf.setLength(arenaSize);
// see discussion in YARN-5551 for the memory accounting discussion
return rwf.getChannel().map(MapMode.READ_WRITE, 0, arenaSize);
} catch (ClosedByInterruptException cbi) {
LlapIoImpl.LOG.warn("Interrupted while trying to allocate memory mapped arena", cbi);
// finally may not execute on thread interrupts so cleanup the arena file as it may be unmapped
IOUtils.closeQuietly(rwf);
if (rf != null) {
rf.delete();
rf = null;
}
throw cbi;
} catch (IOException ioe) {
LlapIoImpl.LOG.warn("Failed trying to allocate memory mapped arena", ioe);
// fail similarly when memory allocations fail
throw new OutOfMemoryError("Failed trying to allocate memory mapped arena: " + ioe.getMessage());
} finally {
// A mapping, once established, is not dependent upon the file channel that was used to
// create it. delete file and hold onto the map
IOUtils.closeQuietly(rwf);
if (rf != null) {
rf.delete();
}
}
}
return isDirect ? ByteBuffer.allocateDirect(arenaSize) : ByteBuffer.allocate(arenaSize);
}
use of java.nio.channels.ClosedByInterruptException in project ignite by apache.
the class FileHandleManagerImpl method nextHandle.
/**
* {@inheritDoc}
*/
@Override
public FileWriteHandle nextHandle(SegmentIO fileIO, RecordSerializer serializer) throws IOException {
SegmentedRingByteBuffer rbuf;
if (mmap) {
MappedByteBuffer buf = fileIO.map((int) maxWalSegmentSize);
rbuf = new SegmentedRingByteBuffer(buf, metrics);
} else
rbuf = currentHandle().buf.reset();
try {
return new FileWriteHandleImpl(cctx, fileIO, rbuf, serializer, metrics, walWriter, 0, mode, mmap, false, fsyncDelay, maxWalSegmentSize);
} catch (ClosedByInterruptException e) {
if (rbuf != null)
rbuf.free();
}
return null;
}
use of java.nio.channels.ClosedByInterruptException in project ignite by apache.
the class FilePageStore method reinit.
/**
* Reinit page store after file channel was closed by thread interruption.
*
* @param fileIO Old fileIO.
*/
private void reinit(FileIO fileIO) throws IOException {
if (!inited)
return;
if (fileIO != this.fileIO)
return;
lock.writeLock().lock();
try {
if (fileIO != this.fileIO)
return;
try {
boolean interrupted = false;
while (true) {
try {
fileIO = null;
File cfgFile = pathProvider.apply().toFile();
fileIO = ioFactory.create(cfgFile, CREATE, READ, WRITE);
fileExists = true;
checkFile(fileIO, cfgFile);
this.fileIO = fileIO;
if (interrupted)
Thread.currentThread().interrupt();
break;
} catch (ClosedByInterruptException e) {
interrupted = true;
Thread.interrupted();
}
}
} catch (IOException e) {
try {
if (fileIO != null)
fileIO.close();
} catch (IOException e0) {
e.addSuppressed(e0);
}
throw e;
}
} finally {
lock.writeLock().unlock();
}
}
use of java.nio.channels.ClosedByInterruptException in project ignite by apache.
the class FilePageStore method readWithFailover.
/**
* @param destBuf Destination buffer.
* @param position Position.
* @return Number of read bytes.
*/
private int readWithFailover(ByteBuffer destBuf, long position) throws IOException {
boolean interrupted = false;
int bufPos = destBuf.position();
while (true) {
FileIO fileIO = this.fileIO;
if (fileIO == null)
throw new IOException("FileIO has stopped");
try {
assert destBuf.remaining() > 0;
int bytesRead = fileIO.readFully(destBuf, position);
if (interrupted)
Thread.currentThread().interrupt();
return bytesRead;
} catch (ClosedChannelException e) {
destBuf.position(bufPos);
if (e instanceof ClosedByInterruptException) {
interrupted = true;
Thread.interrupted();
}
reinit(fileIO);
}
}
}
Aggregations