Search in sources :

Example 21 with IOManager

use of org.apache.flink.runtime.io.disk.iomanager.IOManager in project flink by apache.

the class FileChannelStreamsTest method testCloseAndDeleteOutputView.

@Test
public void testCloseAndDeleteOutputView() {
    final IOManager ioManager = new IOManagerAsync();
    try {
        MemoryManager memMan = new MemoryManager(4 * 16 * 1024, 1, 16 * 1024, MemoryType.HEAP, true);
        List<MemorySegment> memory = new ArrayList<MemorySegment>();
        memMan.allocatePages(new DummyInvokable(), memory, 4);
        FileIOChannel.ID channel = ioManager.createChannel();
        BlockChannelWriter<MemorySegment> writer = ioManager.createBlockChannelWriter(channel);
        FileChannelOutputView out = new FileChannelOutputView(writer, memMan, memory, memMan.getPageSize());
        new StringValue("Some test text").write(out);
        // close for the first time, make sure all memory returns
        out.close();
        assertTrue(memMan.verifyEmpty());
        // close again, should not cause an exception
        out.close();
        // delete, make sure file is removed
        out.closeAndDelete();
        assertFalse(new File(channel.getPath()).exists());
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    } finally {
        ioManager.shutdown();
    }
}
Also used : IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) ArrayList(java.util.ArrayList) FileIOChannel(org.apache.flink.runtime.io.disk.iomanager.FileIOChannel) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) MemorySegment(org.apache.flink.core.memory.MemorySegment) IOManagerAsync(org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync) DummyInvokable(org.apache.flink.runtime.operators.testutils.DummyInvokable) StringValue(org.apache.flink.types.StringValue) File(java.io.File) Test(org.junit.Test)

Example 22 with IOManager

use of org.apache.flink.runtime.io.disk.iomanager.IOManager in project flink by apache.

the class TaskExecutorITCase method testSlotAllocation.

@Test
public void testSlotAllocation() throws Exception {
    TestingFatalErrorHandler testingFatalErrorHandler = new TestingFatalErrorHandler();
    TestingHighAvailabilityServices testingHAServices = new TestingHighAvailabilityServices();
    final Configuration configuration = new Configuration();
    final ScheduledExecutorService scheduledExecutorService = new ScheduledThreadPoolExecutor(1);
    final ResourceID taskManagerResourceId = new ResourceID("foobar");
    final UUID rmLeaderId = UUID.randomUUID();
    final TestingLeaderElectionService rmLeaderElectionService = new TestingLeaderElectionService();
    final TestingLeaderRetrievalService rmLeaderRetrievalService = new TestingLeaderRetrievalService();
    final String rmAddress = "rm";
    final String jmAddress = "jm";
    final UUID jmLeaderId = UUID.randomUUID();
    final JobID jobId = new JobID();
    final ResourceProfile resourceProfile = new ResourceProfile(1.0, 1);
    testingHAServices.setResourceManagerLeaderElectionService(rmLeaderElectionService);
    testingHAServices.setResourceManagerLeaderRetriever(rmLeaderRetrievalService);
    testingHAServices.setJobMasterLeaderRetriever(jobId, new TestingLeaderRetrievalService(jmAddress, jmLeaderId));
    TestingSerialRpcService rpcService = new TestingSerialRpcService();
    ResourceManagerConfiguration resourceManagerConfiguration = new ResourceManagerConfiguration(Time.milliseconds(500L), Time.milliseconds(500L), Time.minutes(5L));
    SlotManagerFactory slotManagerFactory = new DefaultSlotManager.Factory();
    JobLeaderIdService jobLeaderIdService = new JobLeaderIdService(testingHAServices, rpcService.getScheduledExecutor(), resourceManagerConfiguration.getJobTimeout());
    MetricRegistry metricRegistry = mock(MetricRegistry.class);
    HeartbeatServices heartbeatServices = mock(HeartbeatServices.class, RETURNS_MOCKS);
    final TaskManagerConfiguration taskManagerConfiguration = TaskManagerConfiguration.fromConfiguration(configuration);
    final TaskManagerLocation taskManagerLocation = new TaskManagerLocation(taskManagerResourceId, InetAddress.getLocalHost(), 1234);
    final MemoryManager memoryManager = mock(MemoryManager.class);
    final IOManager ioManager = mock(IOManager.class);
    final NetworkEnvironment networkEnvironment = mock(NetworkEnvironment.class);
    final TaskManagerMetricGroup taskManagerMetricGroup = mock(TaskManagerMetricGroup.class);
    final BroadcastVariableManager broadcastVariableManager = mock(BroadcastVariableManager.class);
    final FileCache fileCache = mock(FileCache.class);
    final TaskSlotTable taskSlotTable = new TaskSlotTable(Arrays.asList(resourceProfile), new TimerService<AllocationID>(scheduledExecutorService, 100L));
    final JobManagerTable jobManagerTable = new JobManagerTable();
    final JobLeaderService jobLeaderService = new JobLeaderService(taskManagerLocation);
    ResourceManager<ResourceID> resourceManager = new StandaloneResourceManager(rpcService, resourceManagerConfiguration, testingHAServices, slotManagerFactory, metricRegistry, jobLeaderIdService, testingFatalErrorHandler);
    TaskExecutor taskExecutor = new TaskExecutor(taskManagerConfiguration, taskManagerLocation, rpcService, memoryManager, ioManager, networkEnvironment, testingHAServices, heartbeatServices, metricRegistry, taskManagerMetricGroup, broadcastVariableManager, fileCache, taskSlotTable, jobManagerTable, jobLeaderService, testingFatalErrorHandler);
    JobMasterGateway jmGateway = mock(JobMasterGateway.class);
    when(jmGateway.registerTaskManager(any(String.class), any(TaskManagerLocation.class), eq(jmLeaderId), any(Time.class))).thenReturn(FlinkCompletableFuture.<RegistrationResponse>completed(new JMTMRegistrationSuccess(taskManagerResourceId, 1234)));
    when(jmGateway.getHostname()).thenReturn(jmAddress);
    rpcService.registerGateway(rmAddress, resourceManager.getSelf());
    rpcService.registerGateway(jmAddress, jmGateway);
    final AllocationID allocationId = new AllocationID();
    final SlotRequest slotRequest = new SlotRequest(jobId, allocationId, resourceProfile);
    final SlotOffer slotOffer = new SlotOffer(allocationId, 0, resourceProfile);
    try {
        resourceManager.start();
        taskExecutor.start();
        // notify the RM that it is the leader
        rmLeaderElectionService.isLeader(rmLeaderId);
        // notify the TM about the new RM leader
        rmLeaderRetrievalService.notifyListener(rmAddress, rmLeaderId);
        Future<RegistrationResponse> registrationResponseFuture = resourceManager.registerJobManager(rmLeaderId, jmLeaderId, jmAddress, jobId);
        RegistrationResponse registrationResponse = registrationResponseFuture.get();
        assertTrue(registrationResponse instanceof JobMasterRegistrationSuccess);
        resourceManager.requestSlot(jmLeaderId, rmLeaderId, slotRequest);
        verify(jmGateway).offerSlots(eq(taskManagerResourceId), (Iterable<SlotOffer>) argThat(Matchers.contains(slotOffer)), eq(jmLeaderId), any(Time.class));
    } finally {
        if (testingFatalErrorHandler.hasExceptionOccurred()) {
            testingFatalErrorHandler.rethrowError();
        }
    }
}
Also used : ResourceManagerConfiguration(org.apache.flink.runtime.resourcemanager.ResourceManagerConfiguration) Configuration(org.apache.flink.configuration.Configuration) TestingLeaderRetrievalService(org.apache.flink.runtime.leaderelection.TestingLeaderRetrievalService) ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) JobLeaderIdService(org.apache.flink.runtime.resourcemanager.JobLeaderIdService) SlotManagerFactory(org.apache.flink.runtime.resourcemanager.slotmanager.SlotManagerFactory) Time(org.apache.flink.api.common.time.Time) StandaloneResourceManager(org.apache.flink.runtime.resourcemanager.StandaloneResourceManager) JobMasterGateway(org.apache.flink.runtime.jobmaster.JobMasterGateway) SlotRequest(org.apache.flink.runtime.resourcemanager.SlotRequest) TestingHighAvailabilityServices(org.apache.flink.runtime.highavailability.TestingHighAvailabilityServices) ResourceID(org.apache.flink.runtime.clusterframework.types.ResourceID) BroadcastVariableManager(org.apache.flink.runtime.broadcast.BroadcastVariableManager) TestingSerialRpcService(org.apache.flink.runtime.rpc.TestingSerialRpcService) UUID(java.util.UUID) RegistrationResponse(org.apache.flink.runtime.registration.RegistrationResponse) TestingFatalErrorHandler(org.apache.flink.runtime.util.TestingFatalErrorHandler) HeartbeatServices(org.apache.flink.runtime.heartbeat.HeartbeatServices) ResourceProfile(org.apache.flink.runtime.clusterframework.types.ResourceProfile) JMTMRegistrationSuccess(org.apache.flink.runtime.jobmaster.JMTMRegistrationSuccess) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingLeaderElectionService(org.apache.flink.runtime.leaderelection.TestingLeaderElectionService) SlotOffer(org.apache.flink.runtime.taskexecutor.slot.SlotOffer) JobMasterRegistrationSuccess(org.apache.flink.runtime.jobmaster.JobMasterRegistrationSuccess) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) MetricRegistry(org.apache.flink.runtime.metrics.MetricRegistry) TaskManagerMetricGroup(org.apache.flink.runtime.metrics.groups.TaskManagerMetricGroup) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) ResourceManagerConfiguration(org.apache.flink.runtime.resourcemanager.ResourceManagerConfiguration) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) FileCache(org.apache.flink.runtime.filecache.FileCache) SlotManagerFactory(org.apache.flink.runtime.resourcemanager.slotmanager.SlotManagerFactory) TaskSlotTable(org.apache.flink.runtime.taskexecutor.slot.TaskSlotTable) NetworkEnvironment(org.apache.flink.runtime.io.network.NetworkEnvironment) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 23 with IOManager

use of org.apache.flink.runtime.io.disk.iomanager.IOManager in project flink by apache.

the class TaskManagerComponentsStartupShutdownTest method testComponentsStartupShutdown.

/**
	 * Makes sure that all components are shut down when the TaskManager
	 * actor is shut down.
	 */
@Test
public void testComponentsStartupShutdown() {
    final String[] TMP_DIR = new String[] { ConfigConstants.DEFAULT_TASK_MANAGER_TMP_PATH };
    final Time timeout = Time.seconds(100);
    final int BUFFER_SIZE = 32 * 1024;
    Configuration config = new Configuration();
    config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_INTERVAL, "200 ms");
    config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_PAUSE, "1 s");
    config.setInteger(ConfigConstants.AKKA_WATCH_THRESHOLD, 1);
    ActorSystem actorSystem = null;
    try {
        actorSystem = AkkaUtils.createLocalActorSystem(config);
        final ActorRef jobManager = JobManager.startJobManagerActors(config, actorSystem, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), JobManager.class, MemoryArchivist.class)._1();
        FlinkResourceManager.startResourceManagerActors(config, actorSystem, LeaderRetrievalUtils.createLeaderRetrievalService(config, jobManager), StandaloneResourceManager.class);
        final int numberOfSlots = 1;
        // create the components for the TaskManager manually
        final TaskManagerConfiguration tmConfig = new TaskManagerConfiguration(numberOfSlots, TMP_DIR, timeout, null, Time.milliseconds(500), Time.seconds(30), Time.seconds(10), // cleanup interval
        1000000, config, // exit-jvm-on-fatal-error
        false);
        final NetworkEnvironmentConfiguration netConf = new NetworkEnvironmentConfiguration(32, BUFFER_SIZE, MemoryType.HEAP, IOManager.IOMode.SYNC, 0, 0, 2, 8, null);
        ResourceID taskManagerId = ResourceID.generate();
        final TaskManagerLocation connectionInfo = new TaskManagerLocation(taskManagerId, InetAddress.getLocalHost(), 10000);
        final MemoryManager memManager = new MemoryManager(32 * BUFFER_SIZE, 1, BUFFER_SIZE, MemoryType.HEAP, false);
        final IOManager ioManager = new IOManagerAsync(TMP_DIR);
        final NetworkEnvironment network = new NetworkEnvironment(new NetworkBufferPool(netConf.numNetworkBuffers(), netConf.networkBufferSize(), netConf.memoryType()), new LocalConnectionManager(), new ResultPartitionManager(), new TaskEventDispatcher(), new KvStateRegistry(), null, netConf.ioMode(), netConf.partitionRequestInitialBackoff(), netConf.partitionRequestMaxBackoff(), netConf.networkBuffersPerChannel(), netConf.extraNetworkBuffersPerGate());
        network.start();
        LeaderRetrievalService leaderRetrievalService = new StandaloneLeaderRetrievalService(jobManager.path().toString());
        MetricRegistryConfiguration metricRegistryConfiguration = MetricRegistryConfiguration.fromConfiguration(config);
        // create the task manager
        final Props tmProps = Props.create(TaskManager.class, tmConfig, taskManagerId, connectionInfo, memManager, ioManager, network, numberOfSlots, leaderRetrievalService, new MetricRegistry(metricRegistryConfiguration));
        final ActorRef taskManager = actorSystem.actorOf(tmProps);
        new JavaTestKit(actorSystem) {

            {
                // wait for the TaskManager to be registered
                new Within(new FiniteDuration(5000, TimeUnit.SECONDS)) {

                    @Override
                    protected void run() {
                        taskManager.tell(TaskManagerMessages.getNotifyWhenRegisteredAtJobManagerMessage(), getTestActor());
                        expectMsgEquals(TaskManagerMessages.getRegisteredAtJobManagerMessage());
                    }
                };
            }
        };
        // shut down all actors and the actor system
        // Kill the Task down the JobManager
        taskManager.tell(Kill.getInstance(), ActorRef.noSender());
        jobManager.tell(Kill.getInstance(), ActorRef.noSender());
        // shut down the actors and the actor system
        actorSystem.shutdown();
        actorSystem.awaitTermination();
        actorSystem = null;
        // now that the TaskManager is shut down, the components should be shut down as well
        assertTrue(network.isShutdown());
        assertTrue(ioManager.isProperlyShutDown());
        assertTrue(memManager.isShutdown());
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    } finally {
        if (actorSystem != null) {
            actorSystem.shutdown();
        }
    }
}
Also used : ActorSystem(akka.actor.ActorSystem) KvStateRegistry(org.apache.flink.runtime.query.KvStateRegistry) MemoryArchivist(org.apache.flink.runtime.jobmanager.MemoryArchivist) MetricRegistryConfiguration(org.apache.flink.runtime.metrics.MetricRegistryConfiguration) Configuration(org.apache.flink.configuration.Configuration) TaskManagerConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerConfiguration) ActorRef(akka.actor.ActorRef) Time(org.apache.flink.api.common.time.Time) JobManager(org.apache.flink.runtime.jobmanager.JobManager) MetricRegistryConfiguration(org.apache.flink.runtime.metrics.MetricRegistryConfiguration) Props(akka.actor.Props) IOManagerAsync(org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync) ResourceID(org.apache.flink.runtime.clusterframework.types.ResourceID) TaskManagerConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerConfiguration) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) MetricRegistry(org.apache.flink.runtime.metrics.MetricRegistry) FiniteDuration(scala.concurrent.duration.FiniteDuration) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) ResultPartitionManager(org.apache.flink.runtime.io.network.partition.ResultPartitionManager) NetworkBufferPool(org.apache.flink.runtime.io.network.buffer.NetworkBufferPool) LocalConnectionManager(org.apache.flink.runtime.io.network.LocalConnectionManager) LeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService) StandaloneLeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.StandaloneLeaderRetrievalService) StandaloneLeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.StandaloneLeaderRetrievalService) NetworkEnvironment(org.apache.flink.runtime.io.network.NetworkEnvironment) TaskEventDispatcher(org.apache.flink.runtime.io.network.TaskEventDispatcher) JavaTestKit(akka.testkit.JavaTestKit) Test(org.junit.Test)

Example 24 with IOManager

use of org.apache.flink.runtime.io.disk.iomanager.IOManager in project flink by apache.

the class HashTableRecordWidthCombinations method main.

public static void main(String[] args) throws Exception {
    @SuppressWarnings("unchecked") final TypeSerializer<Tuple2<Long, byte[]>> buildSerializer = new TupleSerializer<Tuple2<Long, byte[]>>((Class<Tuple2<Long, byte[]>>) (Class<?>) Tuple2.class, new TypeSerializer<?>[] { LongSerializer.INSTANCE, BytePrimitiveArraySerializer.INSTANCE });
    final TypeSerializer<Long> probeSerializer = LongSerializer.INSTANCE;
    final TypeComparator<Tuple2<Long, byte[]>> buildComparator = new TupleComparator<Tuple2<Long, byte[]>>(new int[] { 0 }, new TypeComparator<?>[] { new LongComparator(true) }, new TypeSerializer<?>[] { LongSerializer.INSTANCE });
    final TypeComparator<Long> probeComparator = new LongComparator(true);
    final TypePairComparator<Long, Tuple2<Long, byte[]>> pairComparator = new TypePairComparator<Long, Tuple2<Long, byte[]>>() {

        private long ref;

        @Override
        public void setReference(Long reference) {
            ref = reference;
        }

        @Override
        public boolean equalToReference(Tuple2<Long, byte[]> candidate) {
            //noinspection UnnecessaryUnboxing
            return candidate.f0.longValue() == ref;
        }

        @Override
        public int compareToReference(Tuple2<Long, byte[]> candidate) {
            long x = ref;
            long y = candidate.f0;
            return (x < y) ? -1 : ((x == y) ? 0 : 1);
        }
    };
    final IOManager ioMan = new IOManagerAsync();
    try {
        final int pageSize = 32 * 1024;
        final int numSegments = 34;
        for (int num = 3400; num < 3550; num++) {
            final int numRecords = num;
            for (int recordLen = 270; recordLen < 320; recordLen++) {
                final byte[] payload = new byte[recordLen - 8 - 4];
                System.out.println("testing " + numRecords + " / " + recordLen);
                List<MemorySegment> memory = getMemory(numSegments, pageSize);
                // we create a hash table that thinks the records are super large. that makes it choose initially
                // a lot of memory for the partition buffers, and start with a smaller hash table. that way
                // we trigger a hash table growth early.
                MutableHashTable<Tuple2<Long, byte[]>, Long> table = new MutableHashTable<>(buildSerializer, probeSerializer, buildComparator, probeComparator, pairComparator, memory, ioMan, 16, false);
                final MutableObjectIterator<Tuple2<Long, byte[]>> buildInput = new MutableObjectIterator<Tuple2<Long, byte[]>>() {

                    private int count = 0;

                    @Override
                    public Tuple2<Long, byte[]> next(Tuple2<Long, byte[]> reuse) {
                        return next();
                    }

                    @Override
                    public Tuple2<Long, byte[]> next() {
                        if (count++ < numRecords) {
                            return new Tuple2<>(42L, payload);
                        } else {
                            return null;
                        }
                    }
                };
                // probe side
                final MutableObjectIterator<Long> probeInput = new MutableObjectIterator<Long>() {

                    private final long numRecords = 10000;

                    private long value = 0;

                    @Override
                    public Long next(Long aLong) {
                        return next();
                    }

                    @Override
                    public Long next() {
                        if (value < numRecords) {
                            return value++;
                        } else {
                            return null;
                        }
                    }
                };
                table.open(buildInput, probeInput);
                try {
                    while (table.nextRecord()) {
                        MutableObjectIterator<Tuple2<Long, byte[]>> matches = table.getBuildSideIterator();
                        while (matches.next() != null) ;
                    }
                } catch (RuntimeException e) {
                    if (!e.getMessage().contains("exceeded maximum number of recursions")) {
                        throw e;
                    }
                } finally {
                    table.close();
                }
                // make sure no temp files are left
                checkNoTempFilesRemain(ioMan);
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    } finally {
        ioMan.shutdown();
    }
}
Also used : MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) TupleComparator(org.apache.flink.api.java.typeutils.runtime.TupleComparator) TupleSerializer(org.apache.flink.api.java.typeutils.runtime.TupleSerializer) IOManagerAsync(org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) TypePairComparator(org.apache.flink.api.common.typeutils.TypePairComparator) LongComparator(org.apache.flink.api.common.typeutils.base.LongComparator) MemorySegment(org.apache.flink.core.memory.MemorySegment) Tuple2(org.apache.flink.api.java.tuple.Tuple2) MutableHashTable(org.apache.flink.runtime.operators.hash.MutableHashTable)

Example 25 with IOManager

use of org.apache.flink.runtime.io.disk.iomanager.IOManager in project flink by apache.

the class MassiveStringValueSorting method testStringValueSorting.

public void testStringValueSorting() {
    File input = null;
    File sorted = null;
    try {
        // the source file
        input = generateFileWithStrings(300000, "http://some-uri.com/that/is/a/common/prefix/to/all");
        // the sorted file
        sorted = File.createTempFile("sorted_strings", "txt");
        String[] command = { "/bin/bash", "-c", "export LC_ALL=\"C\" && cat \"" + input.getAbsolutePath() + "\" | sort > \"" + sorted.getAbsolutePath() + "\"" };
        Process p = null;
        try {
            p = Runtime.getRuntime().exec(command);
            int retCode = p.waitFor();
            if (retCode != 0) {
                throw new Exception("Command failed with return code " + retCode);
            }
            p = null;
        } finally {
            if (p != null) {
                p.destroy();
            }
        }
        // sort the data
        UnilateralSortMerger<StringValue> sorter = null;
        BufferedReader reader = null;
        BufferedReader verifyReader = null;
        try {
            MemoryManager mm = new MemoryManager(1024 * 1024, 1);
            IOManager ioMan = new IOManagerAsync();
            TypeSerializer<StringValue> serializer = new CopyableValueSerializer<StringValue>(StringValue.class);
            TypeComparator<StringValue> comparator = new CopyableValueComparator<StringValue>(true, StringValue.class);
            reader = new BufferedReader(new FileReader(input));
            MutableObjectIterator<StringValue> inputIterator = new StringValueReaderMutableObjectIterator(reader);
            sorter = new UnilateralSortMerger<StringValue>(mm, ioMan, inputIterator, new DummyInvokable(), new RuntimeSerializerFactory<StringValue>(serializer, StringValue.class), comparator, 1.0, 4, 0.8f, true, /* use large record handler */
            true);
            MutableObjectIterator<StringValue> sortedData = sorter.getIterator();
            reader.close();
            // verify
            verifyReader = new BufferedReader(new FileReader(sorted));
            String nextVerify;
            StringValue nextFromFlinkSort = new StringValue();
            while ((nextVerify = verifyReader.readLine()) != null) {
                nextFromFlinkSort = sortedData.next(nextFromFlinkSort);
                Assert.assertNotNull(nextFromFlinkSort);
                Assert.assertEquals(nextVerify, nextFromFlinkSort.getValue());
            }
        } finally {
            if (reader != null) {
                reader.close();
            }
            if (verifyReader != null) {
                verifyReader.close();
            }
            if (sorter != null) {
                sorter.close();
            }
        }
    } catch (Exception e) {
        System.err.println(e.getMessage());
        e.printStackTrace();
        Assert.fail(e.getMessage());
    } finally {
        if (input != null) {
            //noinspection ResultOfMethodCallIgnored
            input.delete();
        }
        if (sorted != null) {
            //noinspection ResultOfMethodCallIgnored
            sorted.delete();
        }
    }
}
Also used : IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) RuntimeSerializerFactory(org.apache.flink.api.java.typeutils.runtime.RuntimeSerializerFactory) CopyableValueSerializer(org.apache.flink.api.java.typeutils.runtime.CopyableValueSerializer) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) IOException(java.io.IOException) CopyableValueComparator(org.apache.flink.api.java.typeutils.runtime.CopyableValueComparator) IOManagerAsync(org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync) BufferedReader(java.io.BufferedReader) FileReader(java.io.FileReader) DummyInvokable(org.apache.flink.runtime.operators.testutils.DummyInvokable) StringValue(org.apache.flink.types.StringValue) File(java.io.File)

Aggregations

IOManager (org.apache.flink.runtime.io.disk.iomanager.IOManager)30 IOManagerAsync (org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync)22 MemoryManager (org.apache.flink.runtime.memory.MemoryManager)19 Test (org.junit.Test)19 MemorySegment (org.apache.flink.core.memory.MemorySegment)15 DummyInvokable (org.apache.flink.runtime.operators.testutils.DummyInvokable)12 IOException (java.io.IOException)10 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)9 TupleTypeInfo (org.apache.flink.api.java.typeutils.TupleTypeInfo)7 File (java.io.File)6 ArrayList (java.util.ArrayList)6 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)6 AbstractInvokable (org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable)5 BufferedReader (java.io.BufferedReader)4 FileReader (java.io.FileReader)4 Random (java.util.Random)4 JobID (org.apache.flink.api.common.JobID)3 Tuple3 (org.apache.flink.api.java.tuple.Tuple3)3 RuntimeSerializerFactory (org.apache.flink.api.java.typeutils.runtime.RuntimeSerializerFactory)3 Configuration (org.apache.flink.configuration.Configuration)3