Search in sources :

Example 6 with SimpleRowIngestionMeters

use of org.apache.druid.segment.incremental.SimpleRowIngestionMeters in project druid by druid-io.

the class FilteringCloseableInputRowIteratorTest method setup.

@Before
public void setup() {
    rowIngestionMeters = new SimpleRowIngestionMeters();
    parseExceptionHandler = new ParseExceptionHandler(rowIngestionMeters, false, Integer.MAX_VALUE, // do not use Integer.MAX_VALUE since it will create an object array of this length
    1024);
}
Also used : ParseExceptionHandler(org.apache.druid.segment.incremental.ParseExceptionHandler) SimpleRowIngestionMeters(org.apache.druid.segment.incremental.SimpleRowIngestionMeters) Before(org.junit.Before)

Example 7 with SimpleRowIngestionMeters

use of org.apache.druid.segment.incremental.SimpleRowIngestionMeters in project druid by druid-io.

the class StreamAppenderatorTest method testMaxBytesInMemoryWithSkipBytesInMemoryOverheadCheckConfig.

@Test
public void testMaxBytesInMemoryWithSkipBytesInMemoryOverheadCheckConfig() throws Exception {
    try (final StreamAppenderatorTester tester = new StreamAppenderatorTester(100, 1024, null, true, new SimpleRowIngestionMeters(), true)) {
        final Appenderator appenderator = tester.getAppenderator();
        final AtomicInteger eventCount = new AtomicInteger(0);
        final Supplier<Committer> committerSupplier = () -> {
            final Object metadata = ImmutableMap.of(eventCount, eventCount.get());
            return new Committer() {

                @Override
                public Object getMetadata() {
                    return metadata;
                }

                @Override
                public void run() {
                // Do nothing
                }
            };
        };
        appenderator.startJob();
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "foo", 1), committerSupplier);
        // expectedSizeInBytes = 44(map overhead) + 28 (TimeAndDims overhead) + 56 (aggregator metrics) + 54 (dimsKeySize) = 182 + 1 byte when null handling is enabled
        int nullHandlingOverhead = NullHandling.sqlCompatible() ? 1 : 0;
        Assert.assertEquals(182 + nullHandlingOverhead, ((StreamAppenderator) appenderator).getBytesInMemory(IDENTIFIERS.get(0)));
        appenderator.add(IDENTIFIERS.get(1), ir("2000", "bar", 1), committerSupplier);
        Assert.assertEquals(182 + nullHandlingOverhead, ((StreamAppenderator) appenderator).getBytesInMemory(IDENTIFIERS.get(1)));
        appenderator.close();
        Assert.assertEquals(0, ((StreamAppenderator) appenderator).getRowsInMemory());
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) SimpleRowIngestionMeters(org.apache.druid.segment.incremental.SimpleRowIngestionMeters) Committer(org.apache.druid.data.input.Committer) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 8 with SimpleRowIngestionMeters

use of org.apache.druid.segment.incremental.SimpleRowIngestionMeters in project druid by druid-io.

the class ClosedSegmentsSinksBatchAppenderatorTest method testMaxBytesInMemoryInMultipleSinksWithSkipBytesInMemoryOverheadCheckConfig.

@Test
public void testMaxBytesInMemoryInMultipleSinksWithSkipBytesInMemoryOverheadCheckConfig() throws Exception {
    try (final ClosedSegmensSinksBatchAppenderatorTester tester = new ClosedSegmensSinksBatchAppenderatorTester(100, 1024, null, true, new SimpleRowIngestionMeters(), true)) {
        final Appenderator appenderator = tester.getAppenderator();
        appenderator.startJob();
        appenderator.add(IDENTIFIERS.get(0), createInputRow("2000", "foo", 1), null);
        // expectedSizeInBytes = 44(map overhead) + 28 (TimeAndDims overhead) + 56 (aggregator metrics) + 54 (dimsKeySize) = 182
        int nullHandlingOverhead = NullHandling.sqlCompatible() ? 1 : 0;
        Assert.assertEquals(182 + nullHandlingOverhead, ((BatchAppenderator) appenderator).getBytesCurrentlyInMemory());
        appenderator.add(IDENTIFIERS.get(1), createInputRow("2000", "bar", 1), null);
        Assert.assertEquals(364 + 2 * nullHandlingOverhead, ((BatchAppenderator) appenderator).getBytesCurrentlyInMemory());
        Assert.assertEquals(2, appenderator.getSegments().size());
        appenderator.close();
        Assert.assertEquals(0, ((BatchAppenderator) appenderator).getRowsInMemory());
    }
}
Also used : SimpleRowIngestionMeters(org.apache.druid.segment.incremental.SimpleRowIngestionMeters) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 9 with SimpleRowIngestionMeters

use of org.apache.druid.segment.incremental.SimpleRowIngestionMeters in project druid by druid-io.

the class ClosedSegmentsSinksBatchAppenderatorTest method testTaskDoesNotFailAsExceededMemoryWithSkipBytesInMemoryOverheadCheckConfig.

@Test
public void testTaskDoesNotFailAsExceededMemoryWithSkipBytesInMemoryOverheadCheckConfig() throws Exception {
    try (final ClosedSegmensSinksBatchAppenderatorTester tester = new ClosedSegmensSinksBatchAppenderatorTester(100, 10, null, true, new SimpleRowIngestionMeters(), true)) {
        final Appenderator appenderator = tester.getAppenderator();
        appenderator.startJob();
        appenderator.add(IDENTIFIERS.get(0), createInputRow("2000", "foo", 1), null);
        // Expected 0 since we persisted after the add
        Assert.assertEquals(0, ((BatchAppenderator) appenderator).getBytesCurrentlyInMemory());
        appenderator.add(IDENTIFIERS.get(0), createInputRow("2000", "foo", 1), null);
        Assert.assertEquals(0, ((BatchAppenderator) appenderator).getBytesCurrentlyInMemory());
    }
}
Also used : SimpleRowIngestionMeters(org.apache.druid.segment.incremental.SimpleRowIngestionMeters) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 10 with SimpleRowIngestionMeters

use of org.apache.druid.segment.incremental.SimpleRowIngestionMeters in project druid by druid-io.

the class ClosedSegmentsSinksBatchAppenderatorTest method testPushContract.

@Test(timeout = 10000L)
public void testPushContract() throws Exception {
    final RowIngestionMeters rowIngestionMeters = new SimpleRowIngestionMeters();
    try (final ClosedSegmensSinksBatchAppenderatorTester tester = new ClosedSegmensSinksBatchAppenderatorTester(1, 50000L, null, false, rowIngestionMeters)) {
        final Appenderator appenderator = tester.getAppenderator();
        appenderator.startJob();
        appenderator.add(IDENTIFIERS.get(0), createInputRow("2000", "bar", 1), null);
        appenderator.add(IDENTIFIERS.get(0), createInputRow("2000", "bar2", 1), null);
        appenderator.add(IDENTIFIERS.get(1), createInputRow("2000", "bar3", 1), null);
        // push only a single segment
        final SegmentsAndCommitMetadata segmentsAndCommitMetadata = appenderator.push(Collections.singletonList(IDENTIFIERS.get(0)), null, false).get();
        // only one segment must have been pushed:
        Assert.assertEquals(Collections.singletonList(IDENTIFIERS.get(0)), Lists.transform(segmentsAndCommitMetadata.getSegments(), SegmentIdWithShardSpec::fromDataSegment).stream().sorted().collect(Collectors.toList()));
        Assert.assertEquals(tester.getPushedSegments().stream().sorted().collect(Collectors.toList()), segmentsAndCommitMetadata.getSegments().stream().sorted().collect(Collectors.toList()));
        // the responsability for dropping is in the BatchAppenderatorDriver, drop manually:
        appenderator.drop(IDENTIFIERS.get(0));
        // and the segment that was not pushed should still be active
        Assert.assertEquals(Collections.singletonList(IDENTIFIERS.get(1)), appenderator.getSegments());
    }
}
Also used : SimpleRowIngestionMeters(org.apache.druid.segment.incremental.SimpleRowIngestionMeters) SimpleRowIngestionMeters(org.apache.druid.segment.incremental.SimpleRowIngestionMeters) RowIngestionMeters(org.apache.druid.segment.incremental.RowIngestionMeters) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Aggregations

SimpleRowIngestionMeters (org.apache.druid.segment.incremental.SimpleRowIngestionMeters)11 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)10 Test (org.junit.Test)10 RowIngestionMeters (org.apache.druid.segment.incremental.RowIngestionMeters)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3 Committer (org.apache.druid.data.input.Committer)3 ParseExceptionHandler (org.apache.druid.segment.incremental.ParseExceptionHandler)1 Before (org.junit.Before)1