Search in sources :

Example 16 with MemoryManager

use of org.apache.flink.runtime.memory.MemoryManager in project flink by apache.

the class LargeRecordHandlerITCase method fileTest.

@Test
public void fileTest() {
    final int PAGE_SIZE = 4 * 1024;
    final int NUM_PAGES = 4;
    final int NUM_RECORDS = 10;
    FileIOChannel.ID channel = null;
    try (final IOManager ioMan = new IOManagerAsync()) {
        final MemoryManager memMan = MemoryManagerBuilder.newBuilder().setMemorySize(NUM_PAGES * PAGE_SIZE).setPageSize(PAGE_SIZE).build();
        final AbstractInvokable owner = new DummyInvokable();
        final List<MemorySegment> memory = memMan.allocatePages(owner, NUM_PAGES);
        final TypeInformation<?>[] types = new TypeInformation<?>[] { BasicTypeInfo.LONG_TYPE_INFO, new ValueTypeInfo<SomeVeryLongValue>(SomeVeryLongValue.class), BasicTypeInfo.BYTE_TYPE_INFO };
        final TupleTypeInfo<Tuple3<Long, SomeVeryLongValue, Byte>> typeInfo = new TupleTypeInfo<Tuple3<Long, SomeVeryLongValue, Byte>>(types);
        final TypeSerializer<Tuple3<Long, SomeVeryLongValue, Byte>> serializer = typeInfo.createSerializer(new ExecutionConfig());
        channel = ioMan.createChannel();
        FileChannelOutputView out = new FileChannelOutputView(ioMan.createBlockChannelWriter(channel), memMan, memory, PAGE_SIZE);
        // add the test data
        Random rnd = new Random();
        List<Long> offsets = new ArrayList<Long>();
        for (int i = 0; i < NUM_RECORDS; i++) {
            offsets.add(out.getWriteOffset());
            long val = rnd.nextLong();
            Tuple3<Long, SomeVeryLongValue, Byte> next = new Tuple3<Long, SomeVeryLongValue, Byte>(val, new SomeVeryLongValue((int) val), (byte) val);
            serializer.serialize(next, out);
        }
        out.close();
        for (int i = 1; i < offsets.size(); i++) {
            assertTrue(offsets.get(i) > offsets.get(i - 1));
        }
        memMan.allocatePages(owner, memory, NUM_PAGES);
        SeekableFileChannelInputView in = new SeekableFileChannelInputView(ioMan, channel, memMan, memory, out.getBytesInLatestSegment());
        for (int i = 0; i < NUM_RECORDS; i++) {
            in.seek(offsets.get(i));
            Tuple3<Long, SomeVeryLongValue, Byte> next = serializer.deserialize(in);
            // key and value must be equal
            assertTrue(next.f0.intValue() == next.f1.val());
            assertTrue(next.f0.byteValue() == next.f2);
        }
        in.closeAndDelete();
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ArrayList(java.util.ArrayList) FileIOChannel(org.apache.flink.runtime.io.disk.iomanager.FileIOChannel) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) AbstractInvokable(org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) IOManagerAsync(org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync) Random(java.util.Random) DummyInvokable(org.apache.flink.runtime.operators.testutils.DummyInvokable) ValueTypeInfo(org.apache.flink.api.java.typeutils.ValueTypeInfo) SeekableFileChannelInputView(org.apache.flink.runtime.io.disk.SeekableFileChannelInputView) FileChannelOutputView(org.apache.flink.runtime.io.disk.FileChannelOutputView) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) MemorySegment(org.apache.flink.core.memory.MemorySegment) TupleTypeInfo(org.apache.flink.api.java.typeutils.TupleTypeInfo) IOException(java.io.IOException) Tuple3(org.apache.flink.api.java.tuple.Tuple3) Test(org.junit.Test)

Example 17 with MemoryManager

use of org.apache.flink.runtime.memory.MemoryManager in project flink by apache.

the class LargeRecordHandlerITCase method testRecordHandlerCompositeKey.

@Test
public void testRecordHandlerCompositeKey() {
    final int PAGE_SIZE = 4 * 1024;
    final int NUM_PAGES = 1000;
    final int NUM_RECORDS = 10;
    try (final IOManager ioMan = new IOManagerAsync()) {
        final MemoryManager memMan = MemoryManagerBuilder.newBuilder().setMemorySize(NUM_PAGES * PAGE_SIZE).setPageSize(PAGE_SIZE).build();
        final AbstractInvokable owner = new DummyInvokable();
        final List<MemorySegment> initialMemory = memMan.allocatePages(owner, 6);
        final List<MemorySegment> sortMemory = memMan.allocatePages(owner, NUM_PAGES - 6);
        final TypeInformation<?>[] types = new TypeInformation<?>[] { BasicTypeInfo.LONG_TYPE_INFO, new ValueTypeInfo<SomeVeryLongValue>(SomeVeryLongValue.class), BasicTypeInfo.BYTE_TYPE_INFO };
        final TupleTypeInfo<Tuple3<Long, SomeVeryLongValue, Byte>> typeInfo = new TupleTypeInfo<Tuple3<Long, SomeVeryLongValue, Byte>>(types);
        final TypeSerializer<Tuple3<Long, SomeVeryLongValue, Byte>> serializer = typeInfo.createSerializer(new ExecutionConfig());
        final TypeComparator<Tuple3<Long, SomeVeryLongValue, Byte>> comparator = typeInfo.createComparator(new int[] { 2, 0 }, new boolean[] { true, true }, 0, new ExecutionConfig());
        LargeRecordHandler<Tuple3<Long, SomeVeryLongValue, Byte>> handler = new LargeRecordHandler<Tuple3<Long, SomeVeryLongValue, Byte>>(serializer, comparator, ioMan, memMan, initialMemory, owner, 128, owner.getExecutionConfig());
        assertFalse(handler.hasData());
        // add the test data
        Random rnd = new Random();
        for (int i = 0; i < NUM_RECORDS; i++) {
            long val = rnd.nextLong();
            handler.addRecord(new Tuple3<Long, SomeVeryLongValue, Byte>(val, new SomeVeryLongValue((int) val), (byte) val));
            assertTrue(handler.hasData());
        }
        MutableObjectIterator<Tuple3<Long, SomeVeryLongValue, Byte>> sorted = handler.finishWriteAndSortKeys(sortMemory);
        try {
            handler.addRecord(new Tuple3<Long, SomeVeryLongValue, Byte>(92L, null, (byte) 1));
            fail("should throw an exception");
        } catch (IllegalStateException e) {
        // expected
        }
        Tuple3<Long, SomeVeryLongValue, Byte> previous = null;
        Tuple3<Long, SomeVeryLongValue, Byte> next;
        while ((next = sorted.next(null)) != null) {
            // key and value must be equal
            assertTrue(next.f0.intValue() == next.f1.val());
            assertTrue(next.f0.byteValue() == next.f2);
            // order must be correct
            if (previous != null) {
                assertTrue(previous.f2 <= next.f2);
                assertTrue(previous.f2.byteValue() != next.f2.byteValue() || previous.f0 <= next.f0);
            }
            previous = next;
        }
        handler.close();
        assertFalse(handler.hasData());
        handler.close();
        try {
            handler.addRecord(new Tuple3<Long, SomeVeryLongValue, Byte>(92L, null, (byte) 1));
            fail("should throw an exception");
        } catch (IllegalStateException e) {
        // expected
        }
        assertTrue(memMan.verifyEmpty());
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) AbstractInvokable(org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) IOManagerAsync(org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync) Random(java.util.Random) DummyInvokable(org.apache.flink.runtime.operators.testutils.DummyInvokable) ValueTypeInfo(org.apache.flink.api.java.typeutils.ValueTypeInfo) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) MemorySegment(org.apache.flink.core.memory.MemorySegment) TupleTypeInfo(org.apache.flink.api.java.typeutils.TupleTypeInfo) IOException(java.io.IOException) Tuple3(org.apache.flink.api.java.tuple.Tuple3) Test(org.junit.Test)

Example 18 with MemoryManager

use of org.apache.flink.runtime.memory.MemoryManager in project flink by apache.

the class StreamMultipleInputProcessorFactory method create.

@SuppressWarnings({ "unchecked", "rawtypes" })
public static StreamMultipleInputProcessor create(TaskInvokable ownerTask, CheckpointedInputGate[] checkpointedInputGates, StreamConfig.InputConfig[] configuredInputs, IOManager ioManager, MemoryManager memoryManager, TaskIOMetricGroup ioMetricGroup, Counter mainOperatorRecordsIn, MultipleInputStreamOperator<?> mainOperator, WatermarkGauge[] inputWatermarkGauges, StreamConfig streamConfig, Configuration taskManagerConfig, Configuration jobConfig, ExecutionConfig executionConfig, ClassLoader userClassloader, OperatorChain<?, ?> operatorChain, InflightDataRescalingDescriptor inflightDataRescalingDescriptor, Function<Integer, StreamPartitioner<?>> gatePartitioners, TaskInfo taskInfo) {
    checkNotNull(operatorChain);
    List<Input> operatorInputs = mainOperator.getInputs();
    int inputsCount = operatorInputs.size();
    StreamOneInputProcessor<?>[] inputProcessors = new StreamOneInputProcessor[inputsCount];
    Counter networkRecordsIn = new SimpleCounter();
    ioMetricGroup.reuseRecordsInputCounter(networkRecordsIn);
    checkState(configuredInputs.length == inputsCount, "Number of configured inputs in StreamConfig [%s] doesn't match the main operator's number of inputs [%s]", configuredInputs.length, inputsCount);
    StreamTaskInput[] inputs = new StreamTaskInput[inputsCount];
    for (int i = 0; i < inputsCount; i++) {
        StreamConfig.InputConfig configuredInput = configuredInputs[i];
        if (configuredInput instanceof StreamConfig.NetworkInputConfig) {
            StreamConfig.NetworkInputConfig networkInput = (StreamConfig.NetworkInputConfig) configuredInput;
            inputs[i] = StreamTaskNetworkInputFactory.create(checkpointedInputGates[networkInput.getInputGateIndex()], networkInput.getTypeSerializer(), ioManager, new StatusWatermarkValve(checkpointedInputGates[networkInput.getInputGateIndex()].getNumberOfInputChannels()), i, inflightDataRescalingDescriptor, gatePartitioners, taskInfo);
        } else if (configuredInput instanceof StreamConfig.SourceInputConfig) {
            StreamConfig.SourceInputConfig sourceInput = (StreamConfig.SourceInputConfig) configuredInput;
            inputs[i] = operatorChain.getSourceTaskInput(sourceInput);
        } else {
            throw new UnsupportedOperationException("Unknown input type: " + configuredInput);
        }
    }
    InputSelectable inputSelectable = mainOperator instanceof InputSelectable ? (InputSelectable) mainOperator : null;
    StreamConfig.InputConfig[] inputConfigs = streamConfig.getInputs(userClassloader);
    boolean anyRequiresSorting = Arrays.stream(inputConfigs).anyMatch(StreamConfig::requiresSorting);
    if (anyRequiresSorting) {
        if (inputSelectable != null) {
            throw new IllegalStateException("The InputSelectable interface is not supported with sorting inputs");
        }
        StreamTaskInput[] sortingInputs = IntStream.range(0, inputsCount).filter(idx -> requiresSorting(inputConfigs[idx])).mapToObj(idx -> inputs[idx]).toArray(StreamTaskInput[]::new);
        KeySelector[] sortingInputKeySelectors = IntStream.range(0, inputsCount).filter(idx -> requiresSorting(inputConfigs[idx])).mapToObj(idx -> streamConfig.getStatePartitioner(idx, userClassloader)).toArray(KeySelector[]::new);
        TypeSerializer[] sortingInputKeySerializers = IntStream.range(0, inputsCount).filter(idx -> requiresSorting(inputConfigs[idx])).mapToObj(idx -> streamConfig.getTypeSerializerIn(idx, userClassloader)).toArray(TypeSerializer[]::new);
        StreamTaskInput[] passThroughInputs = IntStream.range(0, inputsCount).filter(idx -> !requiresSorting(inputConfigs[idx])).mapToObj(idx -> inputs[idx]).toArray(StreamTaskInput[]::new);
        SelectableSortingInputs selectableSortingInputs = MultiInputSortingDataInput.wrapInputs(ownerTask, sortingInputs, sortingInputKeySelectors, sortingInputKeySerializers, streamConfig.getStateKeySerializer(userClassloader), passThroughInputs, memoryManager, ioManager, executionConfig.isObjectReuseEnabled(), streamConfig.getManagedMemoryFractionOperatorUseCaseOfSlot(ManagedMemoryUseCase.OPERATOR, taskManagerConfig, userClassloader), jobConfig, executionConfig);
        StreamTaskInput<?>[] sortedInputs = selectableSortingInputs.getSortedInputs();
        StreamTaskInput<?>[] passedThroughInputs = selectableSortingInputs.getPassThroughInputs();
        int sortedIndex = 0;
        int passThroughIndex = 0;
        for (int i = 0; i < inputs.length; i++) {
            if (requiresSorting(inputConfigs[i])) {
                inputs[i] = sortedInputs[sortedIndex];
                sortedIndex++;
            } else {
                inputs[i] = passedThroughInputs[passThroughIndex];
                passThroughIndex++;
            }
        }
        inputSelectable = selectableSortingInputs.getInputSelectable();
    }
    for (int i = 0; i < inputsCount; i++) {
        StreamConfig.InputConfig configuredInput = configuredInputs[i];
        if (configuredInput instanceof StreamConfig.NetworkInputConfig) {
            StreamTaskNetworkOutput dataOutput = new StreamTaskNetworkOutput<>(operatorChain.getFinishedOnRestoreInputOrDefault(operatorInputs.get(i)), inputWatermarkGauges[i], mainOperatorRecordsIn, networkRecordsIn);
            inputProcessors[i] = new StreamOneInputProcessor(inputs[i], dataOutput, operatorChain);
        } else if (configuredInput instanceof StreamConfig.SourceInputConfig) {
            StreamConfig.SourceInputConfig sourceInput = (StreamConfig.SourceInputConfig) configuredInput;
            OperatorChain.ChainedSource chainedSource = operatorChain.getChainedSource(sourceInput);
            inputProcessors[i] = new StreamOneInputProcessor(inputs[i], new StreamTaskSourceOutput(chainedSource.getSourceOutput(), inputWatermarkGauges[i], chainedSource.getSourceTaskInput().getOperator().getSourceMetricGroup()), operatorChain);
        } else {
            throw new UnsupportedOperationException("Unknown input type: " + configuredInput);
        }
    }
    return new StreamMultipleInputProcessor(new MultipleInputSelectionHandler(inputSelectable, inputsCount), inputProcessors);
}
Also used : IntStream(java.util.stream.IntStream) TaskIOMetricGroup(org.apache.flink.runtime.metrics.groups.TaskIOMetricGroup) Arrays(java.util.Arrays) StreamConfig(org.apache.flink.streaming.api.graph.StreamConfig) InputSelectable(org.apache.flink.streaming.api.operators.InputSelectable) TaskInvokable(org.apache.flink.runtime.jobgraph.tasks.TaskInvokable) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) CheckpointedInputGate(org.apache.flink.streaming.runtime.io.checkpointing.CheckpointedInputGate) StreamConfig.requiresSorting(org.apache.flink.streaming.api.graph.StreamConfig.requiresSorting) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) SelectableSortingInputs(org.apache.flink.streaming.api.operators.sort.MultiInputSortingDataInput.SelectableSortingInputs) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Function(java.util.function.Function) InflightDataRescalingDescriptor(org.apache.flink.runtime.checkpoint.InflightDataRescalingDescriptor) StreamPartitioner(org.apache.flink.streaming.runtime.partitioner.StreamPartitioner) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) ManagedMemoryUseCase(org.apache.flink.core.memory.ManagedMemoryUseCase) SourceOperatorStreamTask(org.apache.flink.streaming.runtime.tasks.SourceOperatorStreamTask) SimpleCounter(org.apache.flink.metrics.SimpleCounter) Preconditions.checkNotNull(org.apache.flink.util.Preconditions.checkNotNull) WatermarkGaugeExposingOutput(org.apache.flink.streaming.runtime.tasks.WatermarkGaugeExposingOutput) WatermarkStatus(org.apache.flink.streaming.runtime.watermarkstatus.WatermarkStatus) Preconditions.checkState(org.apache.flink.util.Preconditions.checkState) StatusWatermarkValve(org.apache.flink.streaming.runtime.watermarkstatus.StatusWatermarkValve) TypeSerializer(org.apache.flink.api.common.typeutils.TypeSerializer) KeySelector(org.apache.flink.api.java.functions.KeySelector) Configuration(org.apache.flink.configuration.Configuration) TaskInfo(org.apache.flink.api.common.TaskInfo) MultipleInputStreamOperator(org.apache.flink.streaming.api.operators.MultipleInputStreamOperator) InternalSourceReaderMetricGroup(org.apache.flink.runtime.metrics.groups.InternalSourceReaderMetricGroup) List(java.util.List) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) OperatorChain(org.apache.flink.streaming.runtime.tasks.OperatorChain) Internal(org.apache.flink.annotation.Internal) MultiInputSortingDataInput(org.apache.flink.streaming.api.operators.sort.MultiInputSortingDataInput) LatencyMarker(org.apache.flink.streaming.runtime.streamrecord.LatencyMarker) Counter(org.apache.flink.metrics.Counter) WatermarkGauge(org.apache.flink.streaming.runtime.metrics.WatermarkGauge) Input(org.apache.flink.streaming.api.operators.Input) InputSelectable(org.apache.flink.streaming.api.operators.InputSelectable) KeySelector(org.apache.flink.api.java.functions.KeySelector) SelectableSortingInputs(org.apache.flink.streaming.api.operators.sort.MultiInputSortingDataInput.SelectableSortingInputs) MultiInputSortingDataInput(org.apache.flink.streaming.api.operators.sort.MultiInputSortingDataInput) Input(org.apache.flink.streaming.api.operators.Input) SimpleCounter(org.apache.flink.metrics.SimpleCounter) Counter(org.apache.flink.metrics.Counter) SimpleCounter(org.apache.flink.metrics.SimpleCounter) TypeSerializer(org.apache.flink.api.common.typeutils.TypeSerializer) StreamConfig(org.apache.flink.streaming.api.graph.StreamConfig) StatusWatermarkValve(org.apache.flink.streaming.runtime.watermarkstatus.StatusWatermarkValve)

Example 19 with MemoryManager

use of org.apache.flink.runtime.memory.MemoryManager in project flink by apache.

the class RocksDBOperationUtils method allocateSharedCachesIfConfigured.

@Nullable
public static OpaqueMemoryResource<RocksDBSharedResources> allocateSharedCachesIfConfigured(RocksDBMemoryConfiguration memoryConfig, MemoryManager memoryManager, double memoryFraction, Logger logger) throws IOException {
    if (!memoryConfig.isUsingFixedMemoryPerSlot() && !memoryConfig.isUsingManagedMemory()) {
        return null;
    }
    final double highPriorityPoolRatio = memoryConfig.getHighPriorityPoolRatio();
    final double writeBufferRatio = memoryConfig.getWriteBufferRatio();
    final boolean usingPartitionedIndexFilters = memoryConfig.isUsingPartitionedIndexFilters();
    final LongFunctionWithException<RocksDBSharedResources, Exception> allocator = (size) -> RocksDBMemoryControllerUtils.allocateRocksDBSharedResources(size, writeBufferRatio, highPriorityPoolRatio, usingPartitionedIndexFilters);
    try {
        if (memoryConfig.isUsingFixedMemoryPerSlot()) {
            assert memoryConfig.getFixedMemoryPerSlot() != null;
            logger.info("Getting fixed-size shared cache for RocksDB.");
            return memoryManager.getExternalSharedMemoryResource(FIXED_SLOT_MEMORY_RESOURCE_ID, allocator, memoryConfig.getFixedMemoryPerSlot().getBytes());
        } else {
            logger.info("Getting managed memory shared cache for RocksDB.");
            return memoryManager.getSharedMemoryResourceForManagedMemory(MANAGED_MEMORY_RESOURCE_ID, allocator, memoryFraction);
        }
    } catch (Exception e) {
        throw new IOException("Failed to acquire shared cache resource for RocksDB", e);
    }
}
Also used : OpaqueMemoryResource(org.apache.flink.runtime.memory.OpaqueMemoryResource) Arrays(java.util.Arrays) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) LoggerFactory(org.slf4j.LoggerFactory) RocksDbTtlCompactFiltersManager(org.apache.flink.contrib.streaming.state.ttl.RocksDbTtlCompactFiltersManager) Function(java.util.function.Function) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) ArrayList(java.util.ArrayList) ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) RocksDB(org.rocksdb.RocksDB) Map(java.util.Map) ConfigConstants(org.apache.flink.configuration.ConfigConstants) RocksDBException(org.rocksdb.RocksDBException) MERGE_OPERATOR_NAME(org.apache.flink.contrib.streaming.state.RocksDBKeyedStateBackend.MERGE_OPERATOR_NAME) Nullable(javax.annotation.Nullable) ReadOptions(org.rocksdb.ReadOptions) IOUtils(org.apache.flink.util.IOUtils) Logger(org.slf4j.Logger) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) DBOptions(org.rocksdb.DBOptions) IOException(java.io.IOException) OperatingSystem(org.apache.flink.util.OperatingSystem) Preconditions(org.apache.flink.util.Preconditions) LongFunctionWithException(org.apache.flink.util.function.LongFunctionWithException) List(java.util.List) RegisteredStateMetaInfoBase(org.apache.flink.runtime.state.RegisteredStateMetaInfoBase) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) IOException(java.io.IOException) RocksDBException(org.rocksdb.RocksDBException) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) IOException(java.io.IOException) LongFunctionWithException(org.apache.flink.util.function.LongFunctionWithException) Nullable(javax.annotation.Nullable)

Example 20 with MemoryManager

use of org.apache.flink.runtime.memory.MemoryManager in project flink by apache.

the class BinaryExternalSorterTest method testSortTwoBufferInMemory.

@Test
public void testSortTwoBufferInMemory() throws Exception {
    int size = 1_000_000;
    MockBinaryRowReader reader = new MockBinaryRowReader(size);
    LOG.debug("initializing sortmerger");
    // there are two sort buffer if sortMemory > 100 * 1024 * 1024.
    MemoryManager memoryManager = MemoryManagerBuilder.newBuilder().setMemorySize(1024 * 1024 * 101).build();
    long minMemorySize = memoryManager.computeNumberOfPages(1) * MemoryManager.DEFAULT_PAGE_SIZE;
    BinaryExternalSorter sorter = new BinaryExternalSorter(new Object(), memoryManager, minMemorySize, this.ioManager, (AbstractRowDataSerializer) serializer, serializer, IntNormalizedKeyComputer.INSTANCE, IntRecordComparator.INSTANCE, conf, 1f);
    sorter.startThreads();
    sorter.write(reader);
    MutableObjectIterator<BinaryRowData> iterator = sorter.getIterator();
    BinaryRowData next = serializer.createInstance();
    for (int i = 0; i < size; i++) {
        next = iterator.next(next);
        Assert.assertEquals(i, next.getInt(0));
        Assert.assertEquals(getString(i), next.getString(1).toString());
    }
    sorter.close();
    Assert.assertTrue(memoryManager.verifyEmpty());
    memoryManager.shutdown();
}
Also used : BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) Test(org.junit.Test)

Aggregations

MemoryManager (org.apache.flink.runtime.memory.MemoryManager)69 Test (org.junit.Test)37 IOManager (org.apache.flink.runtime.io.disk.iomanager.IOManager)22 BinaryRowData (org.apache.flink.table.data.binary.BinaryRowData)21 IOManagerAsync (org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync)18 IOException (java.io.IOException)16 ArrayList (java.util.ArrayList)14 DummyInvokable (org.apache.flink.runtime.operators.testutils.DummyInvokable)14 UniformBinaryRowGenerator (org.apache.flink.table.runtime.util.UniformBinaryRowGenerator)14 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)13 MemorySegment (org.apache.flink.core.memory.MemorySegment)12 Configuration (org.apache.flink.configuration.Configuration)9 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)8 TypeHint (org.apache.flink.api.common.typeinfo.TypeHint)7 TupleTypeInfo (org.apache.flink.api.java.typeutils.TupleTypeInfo)7 File (java.io.File)6 MutableObjectIterator (org.apache.flink.util.MutableObjectIterator)6 Map (java.util.Map)5 AbstractInvokable (org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable)5 BufferedReader (java.io.BufferedReader)4