use of org.apache.hyracks.api.comm.VSizeFrame in project asterixdb by apache.
the class FeedTupleForwarder method initialize.
@Override
public void initialize(IHyracksTaskContext ctx, IFrameWriter writer) throws HyracksDataException {
if (!initialized) {
this.frame = new VSizeFrame(ctx);
this.writer = writer;
this.appender = new FrameTupleAppender(frame);
initialized = true;
}
}
use of org.apache.hyracks.api.comm.VSizeFrame in project asterixdb by apache.
the class FrameFullTupleForwarder method initialize.
@Override
public void initialize(IHyracksTaskContext ctx, IFrameWriter writer) throws HyracksDataException {
this.appender = new FrameTupleAppender();
this.frame = new VSizeFrame(ctx);
this.writer = writer;
appender.reset(frame, true);
}
use of org.apache.hyracks.api.comm.VSizeFrame in project asterixdb by apache.
the class RateControlledTupleForwarder method initialize.
@Override
public void initialize(IHyracksTaskContext ctx, IFrameWriter writer) throws HyracksDataException {
this.appender = new FrameTupleAppender();
this.frame = new VSizeFrame(ctx);
this.writer = writer;
appender.reset(frame, true);
}
use of org.apache.hyracks.api.comm.VSizeFrame in project asterixdb by apache.
the class AbstractExternalSortRunMerger method process.
public void process() throws HyracksDataException {
IFrameWriter finalWriter = null;
try {
if (runs.isEmpty()) {
finalWriter = prepareSkipMergingFinalResultWriter(writer);
finalWriter.open();
if (sorter != null) {
try {
if (sorter.hasRemaining()) {
sorter.flush(finalWriter);
}
} finally {
sorter.close();
}
}
} else {
/** recycle sort buffer */
if (sorter != null) {
sorter.close();
}
finalWriter = prepareFinalMergeResultWriter(writer);
finalWriter.open();
int maxMergeWidth = framesLimit - 1;
inFrames = new ArrayList<>(maxMergeWidth);
outputFrame = new VSizeFrame(ctx);
List<GeneratedRunFileReader> partialRuns = new ArrayList<>(maxMergeWidth);
int stop = runs.size();
currentGenerationRunAvailable.set(0, stop);
while (true) {
int unUsed = selectPartialRuns(maxMergeWidth * ctx.getInitialFrameSize(), runs, partialRuns, currentGenerationRunAvailable, stop);
prepareFrames(unUsed, inFrames, partialRuns);
if (!currentGenerationRunAvailable.isEmpty() || stop < runs.size()) {
GeneratedRunFileReader reader;
if (partialRuns.size() == 1) {
if (!currentGenerationRunAvailable.isEmpty()) {
throw new HyracksDataException("The record is too big to put into the merging frame, please" + " allocate more sorting memory");
} else {
reader = partialRuns.get(0);
}
} else {
RunFileWriter mergeFileWriter = prepareIntermediateMergeRunFile();
IFrameWriter mergeResultWriter = prepareIntermediateMergeResultWriter(mergeFileWriter);
try {
mergeResultWriter.open();
merge(mergeResultWriter, partialRuns);
} catch (Throwable t) {
mergeResultWriter.fail();
throw t;
} finally {
mergeResultWriter.close();
}
reader = mergeFileWriter.createReader();
}
runs.add(reader);
if (currentGenerationRunAvailable.isEmpty()) {
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("generated runs:" + stop);
}
runs.subList(0, stop).clear();
currentGenerationRunAvailable.clear();
currentGenerationRunAvailable.set(0, runs.size());
stop = runs.size();
}
} else {
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("final runs:" + stop);
}
merge(finalWriter, partialRuns);
break;
}
}
}
} catch (Exception e) {
if (finalWriter != null) {
finalWriter.fail();
}
throw HyracksDataException.create(e);
} finally {
try {
if (finalWriter != null) {
finalWriter.close();
}
} finally {
for (RunFileReader reader : runs) {
try {
// close is idempotent.
reader.close();
} catch (Exception e) {
if (LOGGER.isLoggable(Level.WARNING)) {
LOGGER.log(Level.WARNING, e.getMessage(), e);
}
}
}
}
}
}
use of org.apache.hyracks.api.comm.VSizeFrame in project asterixdb by apache.
the class OptimizedHybridHashJoin method flushBigProbeObjectToDisk.
private void flushBigProbeObjectToDisk(int pid, FrameTupleAccessor accessorProbe, int i) throws HyracksDataException {
if (bigProbeFrameAppender == null) {
bigProbeFrameAppender = new FrameTupleAppender(new VSizeFrame(ctx));
}
RunFileWriter runFileWriter = getSpillWriterOrCreateNewOneIfNotExist(pid, SIDE.PROBE);
if (!bigProbeFrameAppender.append(accessorProbe, i)) {
throw new HyracksDataException("The given tuple is too big");
}
bigProbeFrameAppender.write(runFileWriter, true);
}
Aggregations