use of org.apache.hyracks.dataflow.common.io.RunFileReader in project asterixdb by apache.
the class MaterializerTaskState method writeOut.
public void writeOut(IFrameWriter writer, IFrame frame, boolean failed) throws HyracksDataException {
RunFileReader in = out.createReader();
writer.open();
try {
if (failed) {
writer.fail();
return;
}
in.open();
try {
while (in.nextFrame(frame)) {
writer.nextFrame(frame.getBuffer());
}
} finally {
in.close();
}
} catch (Exception e) {
writer.fail();
throw e;
} finally {
try {
writer.close();
} finally {
if (numConsumers.decrementAndGet() == 0) {
out.getFileReference().delete();
}
}
}
}
use of org.apache.hyracks.dataflow.common.io.RunFileReader in project asterixdb by apache.
the class OptimizedHybridHashJoin method loadSpilledPartitionToMem.
private boolean loadSpilledPartitionToMem(int pid, RunFileWriter wr) throws HyracksDataException {
RunFileReader r = wr.createReader();
try {
r.open();
if (reloadBuffer == null) {
reloadBuffer = new VSizeFrame(ctx);
}
while (r.nextFrame(reloadBuffer)) {
accessorBuild.reset(reloadBuffer.getBuffer());
for (int tid = 0; tid < accessorBuild.getTupleCount(); tid++) {
if (bufferManager.insertTuple(pid, accessorBuild, tid, tempPtr)) {
continue;
}
// for some reason (e.g. due to fragmentation) if the inserting failed,
// we need to clear the occupied frames
bufferManager.clearPartition(pid);
return false;
}
}
// Closes and deletes the run file if it is already loaded into memory.
r.setDeleteAfterClose(true);
} finally {
r.close();
}
spilledStatus.set(pid, false);
buildRFWriters[pid] = null;
return true;
}
use of org.apache.hyracks.dataflow.common.io.RunFileReader in project asterixdb by apache.
the class NestedLoopJoin method join.
public void join(ByteBuffer outerBuffer, IFrameWriter writer) throws HyracksDataException {
if (outerBufferMngr.insertFrame(outerBuffer) < 0) {
RunFileReader runFileReader = runFileWriter.createReader();
try {
runFileReader.open();
while (runFileReader.nextFrame(innerBuffer)) {
for (int i = 0; i < outerBufferMngr.getNumFrames(); i++) {
blockJoin(outerBufferMngr.getFrame(i, tempInfo), innerBuffer.getBuffer(), writer);
}
}
} finally {
runFileReader.close();
}
outerBufferMngr.reset();
if (outerBufferMngr.insertFrame(outerBuffer) < 0) {
throw new HyracksDataException("The given outer frame of size:" + outerBuffer.capacity() + " is too big to cache in the buffer. Please choose a larger buffer memory size");
}
}
}
use of org.apache.hyracks.dataflow.common.io.RunFileReader in project asterixdb by apache.
the class NestedLoopJoin method completeJoin.
public void completeJoin(IFrameWriter writer) throws HyracksDataException {
RunFileReader runFileReader = runFileWriter.createDeleteOnCloseReader();
try {
runFileReader.open();
while (runFileReader.nextFrame(innerBuffer)) {
for (int i = 0; i < outerBufferMngr.getNumFrames(); i++) {
blockJoin(outerBufferMngr.getFrame(i, tempInfo), innerBuffer.getBuffer(), writer);
}
}
} finally {
runFileReader.close();
}
appender.write(writer, true);
}
use of org.apache.hyracks.dataflow.common.io.RunFileReader in project asterixdb by apache.
the class AbstractExternalSortRunMerger method process.
public void process() throws HyracksDataException {
IFrameWriter finalWriter = null;
try {
if (runs.isEmpty()) {
finalWriter = prepareSkipMergingFinalResultWriter(writer);
finalWriter.open();
if (sorter != null) {
try {
if (sorter.hasRemaining()) {
sorter.flush(finalWriter);
}
} finally {
sorter.close();
}
}
} else {
/** recycle sort buffer */
if (sorter != null) {
sorter.close();
}
finalWriter = prepareFinalMergeResultWriter(writer);
finalWriter.open();
int maxMergeWidth = framesLimit - 1;
inFrames = new ArrayList<>(maxMergeWidth);
outputFrame = new VSizeFrame(ctx);
List<GeneratedRunFileReader> partialRuns = new ArrayList<>(maxMergeWidth);
int stop = runs.size();
currentGenerationRunAvailable.set(0, stop);
while (true) {
int unUsed = selectPartialRuns(maxMergeWidth * ctx.getInitialFrameSize(), runs, partialRuns, currentGenerationRunAvailable, stop);
prepareFrames(unUsed, inFrames, partialRuns);
if (!currentGenerationRunAvailable.isEmpty() || stop < runs.size()) {
GeneratedRunFileReader reader;
if (partialRuns.size() == 1) {
if (!currentGenerationRunAvailable.isEmpty()) {
throw new HyracksDataException("The record is too big to put into the merging frame, please" + " allocate more sorting memory");
} else {
reader = partialRuns.get(0);
}
} else {
RunFileWriter mergeFileWriter = prepareIntermediateMergeRunFile();
IFrameWriter mergeResultWriter = prepareIntermediateMergeResultWriter(mergeFileWriter);
try {
mergeResultWriter.open();
merge(mergeResultWriter, partialRuns);
} catch (Throwable t) {
mergeResultWriter.fail();
throw t;
} finally {
mergeResultWriter.close();
}
reader = mergeFileWriter.createReader();
}
runs.add(reader);
if (currentGenerationRunAvailable.isEmpty()) {
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("generated runs:" + stop);
}
runs.subList(0, stop).clear();
currentGenerationRunAvailable.clear();
currentGenerationRunAvailable.set(0, runs.size());
stop = runs.size();
}
} else {
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("final runs:" + stop);
}
merge(finalWriter, partialRuns);
break;
}
}
}
} catch (Exception e) {
if (finalWriter != null) {
finalWriter.fail();
}
throw HyracksDataException.create(e);
} finally {
try {
if (finalWriter != null) {
finalWriter.close();
}
} finally {
for (RunFileReader reader : runs) {
try {
// close is idempotent.
reader.close();
} catch (Exception e) {
if (LOGGER.isLoggable(Level.WARNING)) {
LOGGER.log(Level.WARNING, e.getMessage(), e);
}
}
}
}
}
}
Aggregations