Search in sources :

Example 1 with OutOfMemoryException

use of org.apache.drill.exec.exception.OutOfMemoryException in project drill by apache.

the class IndirectRowSet method makeSv2.

private static SelectionVector2 makeSv2(BufferAllocator allocator, VectorContainer container) {
    int rowCount = container.getRecordCount();
    SelectionVector2 sv2 = new SelectionVector2(allocator);
    if (!sv2.allocateNewSafe(rowCount)) {
        throw new OutOfMemoryException("Unable to allocate sv2 buffer");
    }
    for (int i = 0; i < rowCount; i++) {
        sv2.setIndex(i, (char) i);
    }
    sv2.setRecordCount(rowCount);
    container.buildSchema(SelectionVectorMode.TWO_BYTE);
    return sv2;
}
Also used : SelectionVector2(org.apache.drill.exec.record.selection.SelectionVector2) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException)

Example 2 with OutOfMemoryException

use of org.apache.drill.exec.exception.OutOfMemoryException in project drill by apache.

the class ExternalSortBatch method newSV2.

private SelectionVector2 newSV2() throws OutOfMemoryException, InterruptedException {
    @SuppressWarnings("resource") SelectionVector2 sv2 = new SelectionVector2(oAllocator);
    if (!sv2.allocateNewSafe(incoming.getRecordCount())) {
        try {
            @SuppressWarnings("resource") final BatchGroup merged = mergeAndSpill(batchGroups);
            if (merged != null) {
                spilledBatchGroups.add(merged);
            } else {
                throw UserException.memoryError("Unable to allocate sv2 for %d records, and not enough batchGroups to spill.", incoming.getRecordCount()).addContext("batchGroups.size", batchGroups.size()).addContext("spilledBatchGroups.size", spilledBatchGroups.size()).addContext("allocated memory", oAllocator.getAllocatedMemory()).addContext("allocator limit", oAllocator.getLimit()).build(logger);
            }
        } catch (SchemaChangeException e) {
            throw new RuntimeException(e);
        }
        int waitTime = 1;
        while (true) {
            try {
                Thread.sleep(waitTime * 1000);
            } catch (final InterruptedException e) {
                if (!context.shouldContinue()) {
                    throw e;
                }
            }
            waitTime *= 2;
            if (sv2.allocateNewSafe(incoming.getRecordCount())) {
                break;
            }
            if (waitTime >= 32) {
                throw new OutOfMemoryException("Unable to allocate sv2 buffer after repeated attempts");
            }
        }
    }
    for (int i = 0; i < incoming.getRecordCount(); i++) {
        sv2.setIndex(i, (char) i);
    }
    sv2.setRecordCount(incoming.getRecordCount());
    return sv2;
}
Also used : SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) SelectionVector2(org.apache.drill.exec.record.selection.SelectionVector2) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException)

Example 3 with OutOfMemoryException

use of org.apache.drill.exec.exception.OutOfMemoryException in project drill by apache.

the class ExternalSortBatch method newSV2.

/**
   * Allocate and initialize the selection vector used as the sort index.
   * Assumes that memory is available for the vector since memory management
   * ensured space is available.
   *
   * @return a new, populated selection vector 2
   */
private SelectionVector2 newSV2() {
    SelectionVector2 sv2 = new SelectionVector2(allocator);
    if (!sv2.allocateNewSafe(incoming.getRecordCount())) {
        throw UserException.resourceError(new OutOfMemoryException("Unable to allocate sv2 buffer")).build(logger);
    }
    for (int i = 0; i < incoming.getRecordCount(); i++) {
        sv2.setIndex(i, (char) i);
    }
    sv2.setRecordCount(incoming.getRecordCount());
    return sv2;
}
Also used : SelectionVector2(org.apache.drill.exec.record.selection.SelectionVector2) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException)

Example 4 with OutOfMemoryException

use of org.apache.drill.exec.exception.OutOfMemoryException in project drill by axbaretto.

the class HashAggTemplate method checkGroupAndAggrValues.

// Check if a group is present in the hash table; if not, insert it in the hash table.
// The htIdxHolder contains the index of the group in the hash table container; this same
// index is also used for the aggregation values maintained by the hash aggregate.
private void checkGroupAndAggrValues(int incomingRowIdx) {
    assert incomingRowIdx >= 0;
    assert !earlyOutput;
    /**
     * for debugging
     *     Object tmp = (incoming).getValueAccessorById(0, BigIntVector.class).getValueVector();
     *     BigIntVector vv0 = null;
     *     BigIntHolder holder = null;
     *
     *     if (tmp != null) {
     *     vv0 = ((BigIntVector) tmp);
     *     holder = new BigIntHolder();
     *     holder.value = vv0.getAccessor().get(incomingRowIdx) ;
     *     }
     */
    /*
    if ( handlingSpills && ( incomingRowIdx == 0 ) ) {
      // for debugging -- show the first row from a spilled batch
      Object tmp0 = (incoming).getValueAccessorById(NullableVarCharVector.class, 0).getValueVector();
      Object tmp1 = (incoming).getValueAccessorById(NullableVarCharVector.class, 1).getValueVector();
      Object tmp2 = (incoming).getValueAccessorById(NullableBigIntVector.class, 2).getValueVector();

      if (tmp0 != null && tmp1 != null && tmp2 != null) {
        NullableVarCharVector vv0 = ((NullableVarCharVector) tmp0);
        NullableVarCharVector vv1 = ((NullableVarCharVector) tmp1);
        NullableBigIntVector  vv2 = ((NullableBigIntVector) tmp2);
        logger.debug("The first row = {} , {} , {}", vv0.getAccessor().get(incomingRowIdx), vv1.getAccessor().get(incomingRowIdx), vv2.getAccessor().get(incomingRowIdx));
      }
    }
    */
    // The hash code is computed once, then its lower bits are used to determine the
    // partition to use, and the higher bits determine the location in the hash table.
    int hashCode;
    try {
        // htables[0].updateBatches();
        hashCode = htables[0].getHashCode(incomingRowIdx);
    } catch (SchemaChangeException e) {
        throw new UnsupportedOperationException("Unexpected schema change", e);
    }
    // right shift hash code for secondary (or tertiary...) spilling
    for (int i = 0; i < cycleNum; i++) {
        hashCode >>>= bitsInMask;
    }
    int currentPartition = hashCode & partitionMask;
    hashCode >>>= bitsInMask;
    HashTable.PutStatus putStatus = null;
    long allocatedBeforeHTput = allocator.getAllocatedMemory();
    // Proactive spill - in case there is no reserve memory - spill and retry putting later
    if (reserveValueBatchMemory == 0 && canSpill) {
        logger.trace("Reserved memory runs short, trying to {} a partition and retry Hash Table put() again.", is1stPhase ? "early return" : "spill");
        // spill to free some memory
        doSpill(currentPartition);
        retrySameIndex = true;
        // to retry this put()
        return;
    }
    // ==========================================
    try {
        putStatus = htables[currentPartition].put(incomingRowIdx, htIdxHolder, hashCode);
    } catch (RetryAfterSpillException re) {
        if (!canSpill) {
            throw new OutOfMemoryException(getOOMErrorMsg("Can not spill"));
        }
        logger.trace("HT put failed with an OOM, trying to {} a partition and retry Hash Table put() again.", is1stPhase ? "early return" : "spill");
        // for debugging - in case there's a leak
        long memDiff = allocator.getAllocatedMemory() - allocatedBeforeHTput;
        if (memDiff > 0) {
            logger.warn("Leak: HashTable put() OOM left behind {} bytes allocated", memDiff);
        }
        // spill to free some memory
        doSpill(currentPartition);
        retrySameIndex = true;
        // to retry this put()
        return;
    } catch (OutOfMemoryException exc) {
        throw new OutOfMemoryException(getOOMErrorMsg("HT was: " + allocatedBeforeHTput), exc);
    } catch (SchemaChangeException e) {
        throw new UnsupportedOperationException("Unexpected schema change", e);
    }
    long allocatedBeforeAggCol = allocator.getAllocatedMemory();
    boolean needToCheckIfSpillIsNeeded = allocatedBeforeAggCol > allocatedBeforeHTput;
    // 
    if (putStatus == HashTable.PutStatus.NEW_BATCH_ADDED) {
        try {
            // try to preempt an OOM by using the reserve
            useReservedValuesMemory();
            // allocate a new (internal) values batch
            addBatchHolder(currentPartition);
            // restore the reserve, if possible
            restoreReservedMemory();
            // A reason to check for a spill - In case restore-reserve failed
            needToCheckIfSpillIsNeeded = (0 == reserveValueBatchMemory);
            // just allocated a planned batch
            if (plannedBatches > 0) {
                plannedBatches--;
            }
            long totalAddedMem = allocator.getAllocatedMemory() - allocatedBeforeHTput;
            long aggValuesAddedMem = allocator.getAllocatedMemory() - allocatedBeforeAggCol;
            logger.trace("MEMORY CHECK AGG: allocated now {}, added {}, total (with HT) added {}", allocator.getAllocatedMemory(), aggValuesAddedMem, totalAddedMem);
            // resize the batch estimates if needed (e.g., varchars may take more memory than estimated)
            if (totalAddedMem > estMaxBatchSize) {
                logger.trace("Adjusting Batch size estimate from {} to {}", estMaxBatchSize, totalAddedMem);
                estMaxBatchSize = totalAddedMem;
                needToCheckIfSpillIsNeeded = true;
            }
            if (aggValuesAddedMem > estValuesBatchSize) {
                logger.trace("Adjusting Values Batch size from {} to {}", estValuesBatchSize, aggValuesAddedMem);
                estValuesBatchSize = aggValuesAddedMem;
                needToCheckIfSpillIsNeeded = true;
            }
        } catch (OutOfMemoryException exc) {
            throw new OutOfMemoryException(getOOMErrorMsg("AGGR"), exc);
        }
    } else if (putStatus == HashTable.PutStatus.KEY_ADDED_LAST) {
        // If a batch just became full (i.e. another batch would be allocated soon) -- then need to
        // check (later, see below) if the memory limits are too close, and if so -- then spill !
        // planning to allocate one more batch
        plannedBatches++;
        needToCheckIfSpillIsNeeded = true;
    }
    // =================================================================
    // Locate the matching aggregate columns and perform the aggregation
    // =================================================================
    int currentIdx = htIdxHolder.value;
    BatchHolder bh = batchHolders[currentPartition].get((currentIdx >>> 16) & HashTable.BATCH_MASK);
    int idxWithinBatch = currentIdx & HashTable.BATCH_MASK;
    if (bh.updateAggrValues(incomingRowIdx, idxWithinBatch)) {
        numGroupedRecords++;
    }
    // ===================================================================================
    if (needToCheckIfSpillIsNeeded && canSpill && useMemoryPrediction) {
        spillIfNeeded(currentPartition);
    }
}
Also used : SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) ChainedHashTable(org.apache.drill.exec.physical.impl.common.ChainedHashTable) HashTable(org.apache.drill.exec.physical.impl.common.HashTable) RetryAfterSpillException(org.apache.drill.common.exceptions.RetryAfterSpillException) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException)

Example 5 with OutOfMemoryException

use of org.apache.drill.exec.exception.OutOfMemoryException in project drill by axbaretto.

the class BroadcastSenderRootExec method innerNext.

@Override
public boolean innerNext() {
    RecordBatch.IterOutcome out = next(incoming);
    logger.debug("Outcome of sender next {}", out);
    switch(out) {
        case OUT_OF_MEMORY:
            throw new OutOfMemoryException();
        case STOP:
        case NONE:
            for (int i = 0; i < tunnels.length; ++i) {
                FragmentWritableBatch b2 = FragmentWritableBatch.getEmptyLast(handle.getQueryId(), handle.getMajorFragmentId(), handle.getMinorFragmentId(), config.getOppositeMajorFragmentId(), receivingMinorFragments[i]);
                stats.startWait();
                try {
                    tunnels[i].sendRecordBatch(b2);
                } finally {
                    stats.stopWait();
                }
            }
            return false;
        case OK_NEW_SCHEMA:
        case OK:
            WritableBatch writableBatch = incoming.getWritableBatch().transfer(oContext.getAllocator());
            if (tunnels.length > 1) {
                writableBatch.retainBuffers(tunnels.length - 1);
            }
            for (int i = 0; i < tunnels.length; ++i) {
                FragmentWritableBatch batch = new FragmentWritableBatch(false, handle.getQueryId(), handle.getMajorFragmentId(), handle.getMinorFragmentId(), config.getOppositeMajorFragmentId(), receivingMinorFragments[i], writableBatch);
                updateStats(batch);
                stats.startWait();
                try {
                    tunnels[i].sendRecordBatch(batch);
                } finally {
                    stats.stopWait();
                }
            }
            return ok;
        case NOT_YET:
        default:
            throw new IllegalStateException();
    }
}
Also used : FragmentWritableBatch(org.apache.drill.exec.record.FragmentWritableBatch) RecordBatch(org.apache.drill.exec.record.RecordBatch) WritableBatch(org.apache.drill.exec.record.WritableBatch) FragmentWritableBatch(org.apache.drill.exec.record.FragmentWritableBatch) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) MinorFragmentEndpoint(org.apache.drill.exec.physical.MinorFragmentEndpoint)

Aggregations

OutOfMemoryException (org.apache.drill.exec.exception.OutOfMemoryException)44 DrillBuf (io.netty.buffer.DrillBuf)12 SelectionVector2 (org.apache.drill.exec.record.selection.SelectionVector2)10 Test (org.junit.Test)10 IOException (java.io.IOException)9 SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)8 ByteBuf (io.netty.buffer.ByteBuf)6 BufferAllocator (org.apache.drill.exec.memory.BufferAllocator)6 LogFixture (org.apache.drill.test.LogFixture)6 LogFixtureBuilder (org.apache.drill.test.LogFixture.LogFixtureBuilder)6 SubOperatorTest (org.apache.drill.test.SubOperatorTest)6 MemoryTest (org.apache.drill.categories.MemoryTest)4 RetryAfterSpillException (org.apache.drill.common.exceptions.RetryAfterSpillException)4 Accountant (org.apache.drill.exec.memory.Accountant)4 RecordBatchData (org.apache.drill.exec.physical.impl.sort.RecordBatchData)3 DrillbitEndpoint (org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint)3 ValueVector (org.apache.drill.exec.vector.ValueVector)3 Stopwatch (com.google.common.base.Stopwatch)2 CompositeByteBuf (io.netty.buffer.CompositeByteBuf)2 CorruptedFrameException (io.netty.handler.codec.CorruptedFrameException)2