use of org.apache.flink.runtime.io.disk.iomanager.FileIOChannel in project flink by apache.
the class UnilateralSortMerger method close.
/**
* Shuts down all the threads initiated by this sort/merger. Also releases all previously allocated
* memory, if it has not yet been released by the threads, and closes and deletes all channels (removing
* the temporary files).
* <p>
* The threads are set to exit directly, but depending on their operation, it may take a while to actually happen.
* The sorting thread will for example not finish before the current batch is sorted. This method attempts to wait
* for the working thread to exit. If it is however interrupted, the method exits immediately and is not guaranteed
* how long the threads continue to exist and occupy resources afterwards.
*
* @see java.io.Closeable#close()
*/
@Override
public void close() {
// check if the sorter has been closed before
synchronized (this) {
if (this.closed) {
return;
}
// mark as closed
this.closed = true;
}
// we need to make sure that all the memory is released.
try {
// if the result iterator has not been obtained yet, set the exception
synchronized (this.iteratorLock) {
if (this.iteratorException == null) {
this.iteratorException = new IOException("The sorter has been closed.");
this.iteratorLock.notifyAll();
}
}
// stop all the threads
if (this.readThread != null) {
try {
this.readThread.shutdown();
} catch (Throwable t) {
LOG.error("Error shutting down reader thread: " + t.getMessage(), t);
}
}
if (this.sortThread != null) {
try {
this.sortThread.shutdown();
} catch (Throwable t) {
LOG.error("Error shutting down sorter thread: " + t.getMessage(), t);
}
}
if (this.spillThread != null) {
try {
this.spillThread.shutdown();
} catch (Throwable t) {
LOG.error("Error shutting down spilling thread: " + t.getMessage(), t);
}
}
try {
if (this.readThread != null) {
this.readThread.join();
}
if (this.sortThread != null) {
this.sortThread.join();
}
if (this.spillThread != null) {
this.spillThread.join();
}
} catch (InterruptedException iex) {
LOG.debug("Closing of sort/merger was interrupted. " + "The reading/sorting/spilling threads may still be working.", iex);
}
} finally {
// exceptions, because their memory segments are freed
try {
if (!this.writeMemory.isEmpty()) {
this.memoryManager.release(this.writeMemory);
}
this.writeMemory.clear();
} catch (Throwable t) {
}
try {
if (!this.sortReadMemory.isEmpty()) {
this.memoryManager.release(this.sortReadMemory);
}
this.sortReadMemory.clear();
} catch (Throwable t) {
}
// we have to loop this, because it may fail with a concurrent modification exception
while (!this.openChannels.isEmpty()) {
try {
for (Iterator<FileIOChannel> channels = this.openChannels.iterator(); channels.hasNext(); ) {
final FileIOChannel channel = channels.next();
channels.remove();
channel.closeAndDelete();
}
} catch (Throwable t) {
}
}
// we have to loop this, because it may fail with a concurrent modification exception
while (!this.channelsToDeleteAtShutdown.isEmpty()) {
try {
for (Iterator<FileIOChannel.ID> channels = this.channelsToDeleteAtShutdown.iterator(); channels.hasNext(); ) {
final FileIOChannel.ID channel = channels.next();
channels.remove();
try {
final File f = new File(channel.getPath());
if (f.exists()) {
f.delete();
}
} catch (Throwable t) {
}
}
} catch (Throwable t) {
}
}
try {
if (this.largeRecordHandler != null) {
this.largeRecordHandler.close();
}
} catch (Throwable t) {
}
}
}
use of org.apache.flink.runtime.io.disk.iomanager.FileIOChannel in project flink by apache.
the class SpillingThread method mergeChannels.
/**
* Merges the sorted runs described by the given Channel IDs into a single sorted run. The
* merging process uses the given read and write buffers.
*
* @param channelIDs The IDs of the runs' channels.
* @param readBuffers The buffers for the readers that read the sorted runs.
* @param writeBuffers The buffers for the writer that writes the merged channel.
* @return The ID and number of blocks of the channel that describes the merged run.
*/
private ChannelWithBlockCount mergeChannels(List<ChannelWithBlockCount> channelIDs, List<List<MemorySegment>> readBuffers, List<MemorySegment> writeBuffers) throws IOException {
// the list with the readers, to be closed at shutdown
final List<FileIOChannel> channelAccesses = new ArrayList<>(channelIDs.size());
// the list with the target iterators
final MergeIterator<E> mergeIterator = getMergingIterator(channelIDs, readBuffers, channelAccesses, null);
// create a new channel writer
final FileIOChannel.ID mergedChannelID = this.ioManager.createChannel();
spillChannelManager.registerChannelToBeRemovedAtShutdown(mergedChannelID);
final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(mergedChannelID);
spillChannelManager.registerOpenChannelToBeRemovedAtShutdown(writer);
final ChannelWriterOutputView output = new ChannelWriterOutputView(writer, writeBuffers, this.memManager.getPageSize());
openSpillingBehaviour();
spillingBehaviour.mergeRecords(mergeIterator, output);
output.close();
final int numBlocksWritten = output.getBlockCount();
// register merged result to be removed at shutdown
spillChannelManager.unregisterOpenChannelToBeRemovedAtShutdown(writer);
// remove the merged channel readers from the clear-at-shutdown list
for (FileIOChannel access : channelAccesses) {
access.closeAndDelete();
spillChannelManager.unregisterOpenChannelToBeRemovedAtShutdown(access);
}
return new ChannelWithBlockCount(mergedChannelID, numBlocksWritten);
}
use of org.apache.flink.runtime.io.disk.iomanager.FileIOChannel in project flink by apache.
the class SpillChannelManager method addOpenChannels.
/**
* Open File channels.
*/
public synchronized void addOpenChannels(List<FileIOChannel> toOpen) {
checkArgument(!closed);
for (FileIOChannel channel : toOpen) {
openChannels.add(channel);
channels.remove(channel.getChannelID());
}
}
use of org.apache.flink.runtime.io.disk.iomanager.FileIOChannel in project flink by apache.
the class SpillChannelManager method close.
@Override
public synchronized void close() {
if (this.closed) {
return;
}
this.closed = true;
for (Iterator<FileIOChannel> channels = this.openChannels.iterator(); channels.hasNext(); ) {
try {
final FileIOChannel channel = channels.next();
channels.remove();
channel.closeAndDelete();
} catch (Throwable ignored) {
}
}
for (Iterator<FileIOChannel.ID> channels = this.channelsToDeleteAtShutdown.iterator(); channels.hasNext(); ) {
try {
final FileIOChannel.ID channel = channels.next();
channels.remove();
final File f = new File(channel.getPath());
if (f.exists()) {
f.delete();
}
} catch (Throwable ignored) {
}
}
}
use of org.apache.flink.runtime.io.disk.iomanager.FileIOChannel in project flink by apache.
the class AbstractBinaryExternalMerger method mergeChannels.
/**
* Merges the sorted runs described by the given Channel IDs into a single sorted run.
*
* @param channelIDs The IDs of the runs' channels.
* @return The ID and number of blocks of the channel that describes the merged run.
*/
private ChannelWithMeta mergeChannels(List<ChannelWithMeta> channelIDs) throws IOException {
// the list with the target iterators
List<FileIOChannel> openChannels = new ArrayList<>(channelIDs.size());
final BinaryMergeIterator<Entry> mergeIterator = getMergingIterator(channelIDs, openChannels);
// create a new channel writer
final FileIOChannel.ID mergedChannelID = ioManager.createChannel();
channelManager.addChannel(mergedChannelID);
AbstractChannelWriterOutputView output = null;
int numBytesInLastBlock;
int numBlocksWritten;
try {
output = FileChannelUtil.createOutputView(ioManager, mergedChannelID, compressionEnable, compressionCodecFactory, compressionBlockSize, pageSize);
writeMergingOutput(mergeIterator, output);
numBytesInLastBlock = output.close();
numBlocksWritten = output.getBlockCount();
} catch (IOException e) {
if (output != null) {
output.close();
output.getChannel().deleteChannel();
}
throw e;
}
// remove, close and delete channels
for (FileIOChannel channel : openChannels) {
channelManager.removeChannel(channel.getChannelID());
try {
channel.closeAndDelete();
} catch (Throwable ignored) {
}
}
return new ChannelWithMeta(mergedChannelID, numBlocksWritten, numBytesInLastBlock);
}
Aggregations