Search in sources :

Example 16 with OutOfMemoryException

use of org.apache.drill.exec.exception.OutOfMemoryException in project drill by apache.

the class TestLenientAllocation method testLenient.

/**
 * Use a test-time hack to force the allocator to be lenient,
 * regardless of whether we are in debug mode or not.
 */
@Test
public void testLenient() {
    LogFixtureBuilder logBuilder = LogFixture.builder().logger(Accountant.class, Level.WARN);
    try (LogFixture logFixture = logBuilder.build()) {
        // Test can't run without assertions
        assertTrue(AssertionUtil.isAssertionsEnabled());
        // Create a child allocator
        BufferAllocator allocator = fixture.allocator().newChildAllocator("test", 10 * 1024, 128 * 1024);
        ((Accountant) allocator).forceLenient();
        // Allocate most of the available memory
        DrillBuf buf1 = allocator.buffer(64 * 1024);
        // Oops, we did our math wrong; allocate too large a buffer.
        DrillBuf buf2 = allocator.buffer(128 * 1024);
        assertEquals(192 * 1024, allocator.getAllocatedMemory());
        // We keep making mistakes.
        DrillBuf buf3 = allocator.buffer(32 * 1024);
        // Right up to the hard limit
        DrillBuf buf4 = allocator.buffer(32 * 1024);
        assertEquals(256 * 1024, allocator.getAllocatedMemory());
        try {
            allocator.buffer(8);
            fail();
        } catch (OutOfMemoryException e) {
        // Expected
        }
        // Recover from our excesses
        buf2.close();
        buf3.close();
        buf4.close();
        assertEquals(64 * 1024, allocator.getAllocatedMemory());
        // We're back in the good graces of the allocator,
        // can allocate more.
        DrillBuf buf5 = allocator.buffer(8);
        // Clean up
        buf1.close();
        buf5.close();
        allocator.close();
    }
}
Also used : LogFixture(org.apache.drill.test.LogFixture) Accountant(org.apache.drill.exec.memory.Accountant) LogFixtureBuilder(org.apache.drill.test.LogFixture.LogFixtureBuilder) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) BufferAllocator(org.apache.drill.exec.memory.BufferAllocator) DrillBuf(io.netty.buffer.DrillBuf) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 17 with OutOfMemoryException

use of org.apache.drill.exec.exception.OutOfMemoryException in project drill by apache.

the class HashTableTemplate method resizeAndRehashIfNeeded.

// Resize the hash table if needed by creating a new one with double the number of buckets.
// For each entry in the old hash table, re-hash it to the new table and update the metadata
// in the new table.. the metadata consists of the startIndices, links and hashValues.
// Note that the keys stored in the BatchHolders are not moved around.
private void resizeAndRehashIfNeeded() {
    if (numEntries < threshold) {
        return;
    }
    if (EXTRA_DEBUG) {
        logger.debug("Hash table numEntries = {}, threshold = {}; resizing the table...", numEntries, threshold);
    }
    // future attempts to resize will return immediately.
    if (tableSize == MAXIMUM_CAPACITY) {
        threshold = Integer.MAX_VALUE;
        return;
    }
    int newTableSize = 2 * tableSize;
    newTableSize = roundUpToPowerOf2(newTableSize);
    // the new hash-values (to replace the existing ones - inside rehash() ), then OOM
    if (4 * /* sizeof(int) */
    (newTableSize + 2 * HashTable.BATCH_SIZE) >= allocator.getLimit() - allocator.getAllocatedMemory()) {
        throw new OutOfMemoryException("Resize Hash Table");
    }
    tableSize = newTableSize;
    if (tableSize > MAXIMUM_CAPACITY) {
        tableSize = MAXIMUM_CAPACITY;
    }
    long t0 = System.currentTimeMillis();
    // set the new threshold based on the new table size and load factor
    threshold = (int) Math.ceil(tableSize * htConfig.getLoadFactor());
    IntVector newStartIndices = allocMetadataVector(tableSize, EMPTY_SLOT);
    for (int i = 0; i < batchHolders.size(); i++) {
        BatchHolder bh = batchHolders.get(i);
        int batchStartIdx = i * BATCH_SIZE;
        bh.rehash(tableSize, newStartIndices, batchStartIdx);
    }
    startIndices.clear();
    startIndices = newStartIndices;
    if (EXTRA_DEBUG) {
        logger.debug("After resizing and rehashing, dumping the hash table...");
        logger.debug("Number of buckets = {}.", startIndices.getAccessor().getValueCount());
        for (int i = 0; i < startIndices.getAccessor().getValueCount(); i++) {
            logger.debug("Bucket: {}, startIdx[ {} ] = {}.", i, i, startIndices.getAccessor().get(i));
            int startIdx = startIndices.getAccessor().get(i);
            BatchHolder bh = batchHolders.get((startIdx >>> 16) & BATCH_MASK);
            bh.dump(startIdx);
        }
    }
    resizingTime += System.currentTimeMillis() - t0;
    numResizing++;
}
Also used : BigIntVector(org.apache.drill.exec.vector.BigIntVector) IntVector(org.apache.drill.exec.vector.IntVector) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException)

Example 18 with OutOfMemoryException

use of org.apache.drill.exec.exception.OutOfMemoryException in project drill by apache.

the class FragmentExecutor method run.

@Override
public void run() {
    final Thread myThread = Thread.currentThread();
    if (!myThreadRef.compareAndSet(null, myThread)) {
        return;
    }
    final String originalThreadName = myThread.getName();
    final FragmentHandle fragmentHandle = fragmentContext.getHandle();
    final ClusterCoordinator clusterCoordinator = fragmentContext.getClusterCoordinator();
    final DrillbitStatusListener drillbitStatusListener = new FragmentDrillbitStatusListener();
    final String newThreadName = QueryIdHelper.getExecutorThreadName(fragmentHandle);
    try {
        myThread.setName(newThreadName);
        // if we didn't get the root operator when the executor was created, create it now.
        final FragmentRoot rootOperator = this.rootOperator != null ? this.rootOperator : fragmentContext.getPlanReader().readFragmentRoot(fragment.getFragmentJson());
        root = ImplCreator.getExec(fragmentContext, rootOperator);
        if (root == null) {
            return;
        }
        clusterCoordinator.addDrillbitStatusListener(drillbitStatusListener);
        updateState(FragmentState.RUNNING);
        eventProcessor.start();
        injector.injectPause(fragmentContext.getExecutionControls(), "fragment-running", logger);
        final DrillbitEndpoint endpoint = fragmentContext.getEndpoint();
        logger.debug("Starting fragment {}:{} on {}:{}", fragmentHandle.getMajorFragmentId(), fragmentHandle.getMinorFragmentId(), endpoint.getAddress(), endpoint.getUserPort());
        final UserGroupInformation queryUserUgi = fragmentContext.isImpersonationEnabled() ? ImpersonationUtil.createProxyUgi(fragmentContext.getQueryUserName()) : ImpersonationUtil.getProcessUserUGI();
        queryUserUgi.doAs((PrivilegedExceptionAction<Void>) () -> {
            injector.injectChecked(fragmentContext.getExecutionControls(), "fragment-execution", IOException.class);
            while (shouldContinue()) {
                for (FragmentHandle fragmentHandle1; (fragmentHandle1 = receiverFinishedQueue.poll()) != null; ) {
                    // See if we have any finished requests. If so execute them.
                    root.receivingFragmentFinished(fragmentHandle1);
                }
                if (!root.next()) {
                    // Fragment has processed all of its data
                    break;
                }
            }
            return null;
        });
    } catch (QueryCancelledException e) {
    // Ignore: indicates query cancelled by this executor
    } catch (OutOfMemoryError | OutOfMemoryException e) {
        if (FailureUtils.isDirectMemoryOOM(e)) {
            root.dumpBatches(e);
            fail(UserException.memoryError(e).build(logger));
        } else {
            // we have a heap out of memory error. The JVM is unstable, exit.
            FailureUtils.unrecoverableFailure(e, "Unable to handle out of memory condition in FragmentExecutor.", EXIT_CODE_HEAP_OOM);
        }
    } catch (InterruptedException e) {
        // Swallow interrupted exceptions since we intentionally interrupt the root when cancelling a query
        logger.trace("Interrupted root: {}", root, e);
    } catch (Throwable t) {
        if (root != null) {
            root.dumpBatches(t);
        }
        fail(t);
    } finally {
        // Don't process any more termination requests, we are done.
        eventProcessor.terminate();
        // Clear the interrupt flag if it is set.
        Thread.interrupted();
        // here we could be in FAILED, RUNNING, or CANCELLATION_REQUESTED
        // FAILED state will be because of any Exception in execution loop root.next()
        // CANCELLATION_REQUESTED because of a CANCEL request received by Foreman.
        // ELSE will be in FINISHED state.
        cleanup(FragmentState.FINISHED);
        clusterCoordinator.removeDrillbitStatusListener(drillbitStatusListener);
        myThread.setName(originalThreadName);
    }
}
Also used : FragmentRoot(org.apache.drill.exec.physical.base.FragmentRoot) FragmentHandle(org.apache.drill.exec.proto.ExecProtos.FragmentHandle) ClusterCoordinator(org.apache.drill.exec.coord.ClusterCoordinator) IOException(java.io.IOException) DrillbitStatusListener(org.apache.drill.exec.work.foreman.DrillbitStatusListener) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) QueryCancelledException(org.apache.drill.exec.ops.QueryCancelledException) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 19 with OutOfMemoryException

use of org.apache.drill.exec.exception.OutOfMemoryException in project drill by apache.

the class TestBaseAllocator method testAllocator_overAllocateParent.

@Test
public void testAllocator_overAllocateParent() throws Exception {
    try (final RootAllocator rootAllocator = new RootAllocator(MAX_ALLOCATION)) {
        try (final BufferAllocator childAllocator = rootAllocator.newChildAllocator("overAllocateParent", 0, MAX_ALLOCATION)) {
            final DrillBuf drillBuf1 = rootAllocator.buffer(MAX_ALLOCATION / 2);
            assertNotNull("allocation failed", drillBuf1);
            final DrillBuf drillBuf2 = childAllocator.buffer(MAX_ALLOCATION / 2);
            assertNotNull("allocation failed", drillBuf2);
            try {
                childAllocator.buffer(MAX_ALLOCATION / 4);
                fail("allocated memory beyond max allowed");
            } catch (OutOfMemoryException e) {
            // expected
            }
            drillBuf1.release();
            drillBuf2.release();
        }
    }
}
Also used : OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) DrillBuf(io.netty.buffer.DrillBuf) MemoryTest(org.apache.drill.categories.MemoryTest) Test(org.junit.Test) BaseTest(org.apache.drill.test.BaseTest)

Example 20 with OutOfMemoryException

use of org.apache.drill.exec.exception.OutOfMemoryException in project drill by apache.

the class TestAllocators method testAllocators.

@Test
public void testAllocators() throws Exception {
    // Setup a drillbit (initializes a root allocator)
    final DrillConfig config = DrillConfig.create(TEST_CONFIGURATIONS);
    try (final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
        final Drillbit bit = new Drillbit(config, serviceSet)) {
        bit.run();
        final DrillbitContext bitContext = bit.getContext();
        FunctionImplementationRegistry functionRegistry = bitContext.getFunctionImplementationRegistry();
        StoragePluginRegistry storageRegistry = new StoragePluginRegistryImpl(bitContext);
        // Create a few Fragment Contexts
        BitControl.PlanFragment.Builder pfBuilder1 = BitControl.PlanFragment.newBuilder();
        pfBuilder1.setMemInitial(1500000);
        BitControl.PlanFragment pf1 = pfBuilder1.build();
        BitControl.PlanFragment.Builder pfBuilder2 = BitControl.PlanFragment.newBuilder();
        pfBuilder2.setMemInitial(500000);
        BitControl.PlanFragment pf2 = pfBuilder1.build();
        FragmentContextImpl fragmentContext1 = new FragmentContextImpl(bitContext, pf1, null, functionRegistry);
        FragmentContextImpl fragmentContext2 = new FragmentContextImpl(bitContext, pf2, null, functionRegistry);
        // Get a few physical operators. Easiest way is to read a physical plan.
        PhysicalPlanReader planReader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(bitContext, storageRegistry);
        PhysicalPlan plan = planReader.readPhysicalPlan(Files.asCharSource(DrillFileUtils.getResourceAsFile(planFile), Charsets.UTF_8).read());
        List<PhysicalOperator> physicalOperators = plan.getSortedOperators();
        Iterator<PhysicalOperator> physicalOperatorIterator = physicalOperators.iterator();
        PhysicalOperator physicalOperator1 = physicalOperatorIterator.next();
        PhysicalOperator physicalOperator2 = physicalOperatorIterator.next();
        PhysicalOperator physicalOperator3 = physicalOperatorIterator.next();
        PhysicalOperator physicalOperator4 = physicalOperatorIterator.next();
        PhysicalOperator physicalOperator5 = physicalOperatorIterator.next();
        PhysicalOperator physicalOperator6 = physicalOperatorIterator.next();
        // Create some bogus Operator profile defs and stats to create operator contexts
        OpProfileDef def;
        OperatorStats stats;
        // Use some bogus operator type to create a new operator context.
        def = new OpProfileDef(physicalOperator1.getOperatorId(), MockSubScanPOP.OPERATOR_TYPE, OperatorUtilities.getChildCount(physicalOperator1));
        stats = fragmentContext1.getStats().newOperatorStats(def, fragmentContext1.getAllocator());
        // Add a couple of Operator Contexts
        // Initial allocation = 1000000 bytes for all operators
        OperatorContext oContext11 = fragmentContext1.newOperatorContext(physicalOperator1);
        DrillBuf b11 = oContext11.getAllocator().buffer(1000000);
        OperatorContext oContext12 = fragmentContext1.newOperatorContext(physicalOperator2, stats);
        DrillBuf b12 = oContext12.getAllocator().buffer(500000);
        OperatorContext oContext21 = fragmentContext1.newOperatorContext(physicalOperator3);
        def = new OpProfileDef(physicalOperator4.getOperatorId(), TextFormatPlugin.WRITER_OPERATOR_TYPE, OperatorUtilities.getChildCount(physicalOperator4));
        stats = fragmentContext2.getStats().newOperatorStats(def, fragmentContext2.getAllocator());
        OperatorContext oContext22 = fragmentContext2.newOperatorContext(physicalOperator4, stats);
        DrillBuf b22 = oContext22.getAllocator().buffer(2000000);
        // New Fragment begins
        BitControl.PlanFragment.Builder pfBuilder3 = BitControl.PlanFragment.newBuilder();
        pfBuilder3.setMemInitial(1000000);
        BitControl.PlanFragment pf3 = pfBuilder3.build();
        FragmentContextImpl fragmentContext3 = new FragmentContextImpl(bitContext, pf3, null, functionRegistry);
        // New fragment starts an operator that allocates an amount within the limit
        def = new OpProfileDef(physicalOperator5.getOperatorId(), UnionAll.OPERATOR_TYPE, OperatorUtilities.getChildCount(physicalOperator5));
        stats = fragmentContext3.getStats().newOperatorStats(def, fragmentContext3.getAllocator());
        OperatorContext oContext31 = fragmentContext3.newOperatorContext(physicalOperator5, stats);
        DrillBuf b31a = oContext31.getAllocator().buffer(200000);
        // Previously running operator completes
        b22.release();
        ((AutoCloseable) oContext22).close();
        // Fragment 3 asks for more and fails
        boolean outOfMem = false;
        try {
            oContext31.getAllocator().buffer(44000000);
            fail("Fragment 3 should fail to allocate buffer");
        } catch (OutOfMemoryException e) {
            // Expected.
            outOfMem = true;
        }
        assertTrue(outOfMem);
        // Operator is Exempt from Fragment limits. Fragment 3 asks for more and succeeds
        OperatorContext oContext32 = fragmentContext3.newOperatorContext(physicalOperator6);
        try {
            DrillBuf b32 = oContext32.getAllocator().buffer(4400000);
            b32.release();
        } catch (OutOfMemoryException e) {
            fail("Fragment 3 failed to allocate buffer");
        } finally {
            closeOp(oContext32);
        }
        b11.release();
        closeOp(oContext11);
        b12.release();
        closeOp(oContext12);
        closeOp(oContext21);
        b31a.release();
        closeOp(oContext31);
        fragmentContext1.close();
        fragmentContext2.close();
        fragmentContext3.close();
    }
}
Also used : DrillbitContext(org.apache.drill.exec.server.DrillbitContext) StoragePluginRegistry(org.apache.drill.exec.store.StoragePluginRegistry) PhysicalPlan(org.apache.drill.exec.physical.PhysicalPlan) OpProfileDef(org.apache.drill.exec.ops.OpProfileDef) PhysicalPlanReader(org.apache.drill.exec.planner.PhysicalPlanReader) BitControl(org.apache.drill.exec.proto.BitControl) FragmentContextImpl(org.apache.drill.exec.ops.FragmentContextImpl) OperatorStats(org.apache.drill.exec.ops.OperatorStats) DrillConfig(org.apache.drill.common.config.DrillConfig) Drillbit(org.apache.drill.exec.server.Drillbit) RemoteServiceSet(org.apache.drill.exec.server.RemoteServiceSet) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) StoragePluginRegistryImpl(org.apache.drill.exec.store.StoragePluginRegistryImpl) OperatorContext(org.apache.drill.exec.ops.OperatorContext) FunctionImplementationRegistry(org.apache.drill.exec.expr.fn.FunctionImplementationRegistry) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) DrillBuf(io.netty.buffer.DrillBuf) MemoryTest(org.apache.drill.categories.MemoryTest) DrillTest(org.apache.drill.test.DrillTest) Test(org.junit.Test)

Aggregations

OutOfMemoryException (org.apache.drill.exec.exception.OutOfMemoryException)44 DrillBuf (io.netty.buffer.DrillBuf)12 SelectionVector2 (org.apache.drill.exec.record.selection.SelectionVector2)10 Test (org.junit.Test)10 IOException (java.io.IOException)9 SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)8 ByteBuf (io.netty.buffer.ByteBuf)6 BufferAllocator (org.apache.drill.exec.memory.BufferAllocator)6 LogFixture (org.apache.drill.test.LogFixture)6 LogFixtureBuilder (org.apache.drill.test.LogFixture.LogFixtureBuilder)6 SubOperatorTest (org.apache.drill.test.SubOperatorTest)6 MemoryTest (org.apache.drill.categories.MemoryTest)4 RetryAfterSpillException (org.apache.drill.common.exceptions.RetryAfterSpillException)4 Accountant (org.apache.drill.exec.memory.Accountant)4 RecordBatchData (org.apache.drill.exec.physical.impl.sort.RecordBatchData)3 DrillbitEndpoint (org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint)3 ValueVector (org.apache.drill.exec.vector.ValueVector)3 Stopwatch (com.google.common.base.Stopwatch)2 CompositeByteBuf (io.netty.buffer.CompositeByteBuf)2 CorruptedFrameException (io.netty.handler.codec.CorruptedFrameException)2