Search in sources :

Example 6 with Committer

use of org.apache.druid.data.input.Committer in project druid by druid-io.

the class StreamAppenderatorTest method testRestoreFromDisk.

@Test
public void testRestoreFromDisk() throws Exception {
    final RealtimeTuningConfig tuningConfig;
    try (final StreamAppenderatorTester tester = new StreamAppenderatorTester(2, true)) {
        final Appenderator appenderator = tester.getAppenderator();
        tuningConfig = tester.getTuningConfig();
        final AtomicInteger eventCount = new AtomicInteger(0);
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {

            @Override
            public Committer get() {
                final Object metadata = ImmutableMap.of("eventCount", eventCount.get());
                return new Committer() {

                    @Override
                    public Object getMetadata() {
                        return metadata;
                    }

                    @Override
                    public void run() {
                    // Do nothing
                    }
                };
            }
        };
        appenderator.startJob();
        eventCount.incrementAndGet();
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "foo", 1), committerSupplier);
        eventCount.incrementAndGet();
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "bar", 2), committerSupplier);
        eventCount.incrementAndGet();
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "baz", 3), committerSupplier);
        eventCount.incrementAndGet();
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "qux", 4), committerSupplier);
        eventCount.incrementAndGet();
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "bob", 5), committerSupplier);
        appenderator.close();
        try (final StreamAppenderatorTester tester2 = new StreamAppenderatorTester(2, -1, tuningConfig.getBasePersistDirectory(), true)) {
            final Appenderator appenderator2 = tester2.getAppenderator();
            Assert.assertEquals(ImmutableMap.of("eventCount", 4), appenderator2.startJob());
            Assert.assertEquals(ImmutableList.of(IDENTIFIERS.get(0)), appenderator2.getSegments());
            Assert.assertEquals(4, appenderator2.getRowCount(IDENTIFIERS.get(0)));
        }
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Supplier(com.google.common.base.Supplier) Committer(org.apache.druid.data.input.Committer) RealtimeTuningConfig(org.apache.druid.segment.indexing.RealtimeTuningConfig) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 7 with Committer

use of org.apache.druid.data.input.Committer in project druid by druid-io.

the class StreamAppenderatorTest method testIgnoreMaxBytesInMemory.

@Test
public void testIgnoreMaxBytesInMemory() throws Exception {
    try (final StreamAppenderatorTester tester = new StreamAppenderatorTester(100, -1, true)) {
        final Appenderator appenderator = tester.getAppenderator();
        final AtomicInteger eventCount = new AtomicInteger(0);
        final Supplier<Committer> committerSupplier = () -> {
            final Object metadata = ImmutableMap.of(eventCount, eventCount.get());
            return new Committer() {

                @Override
                public Object getMetadata() {
                    return metadata;
                }

                @Override
                public void run() {
                // Do nothing
                }
            };
        };
        Assert.assertEquals(0, ((StreamAppenderator) appenderator).getRowsInMemory());
        appenderator.startJob();
        Assert.assertEquals(0, ((StreamAppenderator) appenderator).getRowsInMemory());
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "foo", 1), committerSupplier);
        // we still calculate the size even when ignoring it to make persist decision
        int nullHandlingOverhead = NullHandling.sqlCompatible() ? 1 : 0;
        Assert.assertEquals(182 + nullHandlingOverhead, ((StreamAppenderator) appenderator).getBytesInMemory(IDENTIFIERS.get(0)));
        Assert.assertEquals(1, ((StreamAppenderator) appenderator).getRowsInMemory());
        appenderator.add(IDENTIFIERS.get(1), ir("2000", "bar", 1), committerSupplier);
        int sinkSizeOverhead = 2 * StreamAppenderator.ROUGH_OVERHEAD_PER_SINK;
        Assert.assertEquals((364 + 2 * nullHandlingOverhead) + sinkSizeOverhead, ((StreamAppenderator) appenderator).getBytesCurrentlyInMemory());
        Assert.assertEquals(2, ((StreamAppenderator) appenderator).getRowsInMemory());
        appenderator.close();
        Assert.assertEquals(0, ((StreamAppenderator) appenderator).getRowsInMemory());
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Committer(org.apache.druid.data.input.Committer) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 8 with Committer

use of org.apache.druid.data.input.Committer in project druid by druid-io.

the class StreamAppenderatorTest method testMaxBytesInMemory.

@Test
public void testMaxBytesInMemory() throws Exception {
    try (final StreamAppenderatorTester tester = new StreamAppenderatorTester(100, 15000, true)) {
        final Appenderator appenderator = tester.getAppenderator();
        final AtomicInteger eventCount = new AtomicInteger(0);
        final Supplier<Committer> committerSupplier = () -> {
            final Object metadata = ImmutableMap.of(eventCount, eventCount.get());
            return new Committer() {

                @Override
                public Object getMetadata() {
                    return metadata;
                }

                @Override
                public void run() {
                // Do nothing
                }
            };
        };
        appenderator.startJob();
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "foo", 1), committerSupplier);
        // Still under maxSizeInBytes after the add. Hence, we do not persist yet
        // expectedSizeInBytes = 44(map overhead) + 28 (TimeAndDims overhead) + 56 (aggregator metrics) + 54 (dimsKeySize) = 182 + 1 byte when null handling is enabled
        int nullHandlingOverhead = NullHandling.sqlCompatible() ? 1 : 0;
        int currentInMemoryIndexSize = 182 + nullHandlingOverhead;
        int sinkSizeOverhead = 1 * StreamAppenderator.ROUGH_OVERHEAD_PER_SINK;
        // currHydrant in the sink still has > 0 bytesInMemory since we do not persist yet
        Assert.assertEquals(currentInMemoryIndexSize, ((StreamAppenderator) appenderator).getBytesInMemory(IDENTIFIERS.get(0)));
        Assert.assertEquals(currentInMemoryIndexSize + sinkSizeOverhead, ((StreamAppenderator) appenderator).getBytesCurrentlyInMemory());
        // We do multiple more adds to the same sink to cause persist.
        for (int i = 0; i < 53; i++) {
            appenderator.add(IDENTIFIERS.get(0), ir("2000", "bar_" + i, 1), committerSupplier);
        }
        sinkSizeOverhead = 1 * StreamAppenderator.ROUGH_OVERHEAD_PER_SINK;
        // currHydrant size is 0 since we just persist all indexes to disk.
        currentInMemoryIndexSize = 0;
        // We are now over maxSizeInBytes after the add. Hence, we do a persist.
        // currHydrant in the sink has 0 bytesInMemory since we just did a persist
        Assert.assertEquals(currentInMemoryIndexSize, ((StreamAppenderator) appenderator).getBytesInMemory(IDENTIFIERS.get(0)));
        // Mapped index size is the memory still needed after we persisted indexes. Note that the segments have
        // 1 dimension columns, 2 metric column, 1 time column.
        int mappedIndexSize = 1012 + (2 * StreamAppenderator.ROUGH_OVERHEAD_PER_METRIC_COLUMN_HOLDER) + StreamAppenderator.ROUGH_OVERHEAD_PER_DIMENSION_COLUMN_HOLDER + StreamAppenderator.ROUGH_OVERHEAD_PER_TIME_COLUMN_HOLDER;
        Assert.assertEquals(currentInMemoryIndexSize + sinkSizeOverhead + mappedIndexSize, ((StreamAppenderator) appenderator).getBytesCurrentlyInMemory());
        // Add a single row after persisted
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "bob", 1), committerSupplier);
        // currHydrant in the sink still has > 0 bytesInMemory since we do not persist yet
        currentInMemoryIndexSize = 182 + nullHandlingOverhead;
        Assert.assertEquals(currentInMemoryIndexSize, ((StreamAppenderator) appenderator).getBytesInMemory(IDENTIFIERS.get(0)));
        Assert.assertEquals(currentInMemoryIndexSize + sinkSizeOverhead + mappedIndexSize, ((StreamAppenderator) appenderator).getBytesCurrentlyInMemory());
        // We do multiple more adds to the same sink to cause persist.
        for (int i = 0; i < 31; i++) {
            appenderator.add(IDENTIFIERS.get(0), ir("2000", "bar_" + i, 1), committerSupplier);
        }
        // currHydrant size is 0 since we just persist all indexes to disk.
        currentInMemoryIndexSize = 0;
        // We are now over maxSizeInBytes after the add. Hence, we do a persist.
        // currHydrant in the sink has 0 bytesInMemory since we just did a persist
        Assert.assertEquals(currentInMemoryIndexSize, ((StreamAppenderator) appenderator).getBytesInMemory(IDENTIFIERS.get(0)));
        // Mapped index size is the memory still needed after we persisted indexes. Note that the segments have
        // 1 dimension columns, 2 metric column, 1 time column. However, we have two indexes now from the two pervious
        // persists.
        mappedIndexSize = 2 * (1012 + (2 * StreamAppenderator.ROUGH_OVERHEAD_PER_METRIC_COLUMN_HOLDER) + StreamAppenderator.ROUGH_OVERHEAD_PER_DIMENSION_COLUMN_HOLDER + StreamAppenderator.ROUGH_OVERHEAD_PER_TIME_COLUMN_HOLDER);
        Assert.assertEquals(currentInMemoryIndexSize + sinkSizeOverhead + mappedIndexSize, ((StreamAppenderator) appenderator).getBytesCurrentlyInMemory());
        appenderator.close();
        Assert.assertEquals(0, ((StreamAppenderator) appenderator).getRowsInMemory());
        Assert.assertEquals(0, ((StreamAppenderator) appenderator).getBytesCurrentlyInMemory());
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Committer(org.apache.druid.data.input.Committer) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 9 with Committer

use of org.apache.druid.data.input.Committer in project druid by druid-io.

the class StreamAppenderatorTest method testMaxBytesInMemoryInMultipleSinksWithSkipBytesInMemoryOverheadCheckConfig.

@Test
public void testMaxBytesInMemoryInMultipleSinksWithSkipBytesInMemoryOverheadCheckConfig() throws Exception {
    try (final StreamAppenderatorTester tester = new StreamAppenderatorTester(100, 1024, null, true, new SimpleRowIngestionMeters(), true)) {
        final Appenderator appenderator = tester.getAppenderator();
        final AtomicInteger eventCount = new AtomicInteger(0);
        final Supplier<Committer> committerSupplier = () -> {
            final Object metadata = ImmutableMap.of(eventCount, eventCount.get());
            return new Committer() {

                @Override
                public Object getMetadata() {
                    return metadata;
                }

                @Override
                public void run() {
                // Do nothing
                }
            };
        };
        appenderator.startJob();
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "foo", 1), committerSupplier);
        // expectedSizeInBytes = 44(map overhead) + 28 (TimeAndDims overhead) + 56 (aggregator metrics) + 54 (dimsKeySize) = 182
        int nullHandlingOverhead = NullHandling.sqlCompatible() ? 1 : 0;
        Assert.assertEquals(182 + nullHandlingOverhead, ((StreamAppenderator) appenderator).getBytesCurrentlyInMemory());
        appenderator.add(IDENTIFIERS.get(1), ir("2000", "bar", 1), committerSupplier);
        Assert.assertEquals(364 + 2 * nullHandlingOverhead, ((StreamAppenderator) appenderator).getBytesCurrentlyInMemory());
        appenderator.close();
        Assert.assertEquals(0, ((StreamAppenderator) appenderator).getRowsInMemory());
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) SimpleRowIngestionMeters(org.apache.druid.segment.incremental.SimpleRowIngestionMeters) Committer(org.apache.druid.data.input.Committer) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 10 with Committer

use of org.apache.druid.data.input.Committer in project druid by druid-io.

the class StreamAppenderatorTest method testTaskFailAsPersistCannotFreeAnyMoreMemory.

@Test(expected = RuntimeException.class)
public void testTaskFailAsPersistCannotFreeAnyMoreMemory() throws Exception {
    try (final StreamAppenderatorTester tester = new StreamAppenderatorTester(100, 5180, true)) {
        final Appenderator appenderator = tester.getAppenderator();
        final AtomicInteger eventCount = new AtomicInteger(0);
        final Supplier<Committer> committerSupplier = () -> {
            final Object metadata = ImmutableMap.of(eventCount, eventCount.get());
            return new Committer() {

                @Override
                public Object getMetadata() {
                    return metadata;
                }

                @Override
                public void run() {
                // Do nothing
                }
            };
        };
        appenderator.startJob();
        appenderator.add(IDENTIFIERS.get(0), ir("2000", "foo", 1), committerSupplier);
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Committer(org.apache.druid.data.input.Committer) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Aggregations

Committer (org.apache.druid.data.input.Committer)22 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)13 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)11 Test (org.junit.Test)11 Supplier (com.google.common.base.Supplier)8 IOException (java.io.IOException)8 List (java.util.List)7 ISE (org.apache.druid.java.util.common.ISE)7 VisibleForTesting (com.google.common.annotations.VisibleForTesting)6 Preconditions (com.google.common.base.Preconditions)6 Futures (com.google.common.util.concurrent.Futures)6 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)6 File (java.io.File)6 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)6 Map (java.util.Map)6 CountDownLatch (java.util.concurrent.CountDownLatch)6 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)5 Function (com.google.common.base.Function)5 Lists (com.google.common.collect.Lists)5