Search in sources :

Example 26 with ClosedByInterruptException

use of java.nio.channels.ClosedByInterruptException in project j2objc by google.

the class FileChannelImpl method transferToTrustedChannel.

private long transferToTrustedChannel(long position, long count, WritableByteChannel target) throws IOException {
    boolean isSelChImpl = (target instanceof SelChImpl);
    if (!((target instanceof FileChannelImpl) || isSelChImpl))
        return IOStatus.UNSUPPORTED;
    // Trusted target: Use a mapped buffer
    long remaining = count;
    while (remaining > 0L) {
        long size = Math.min(remaining, MAPPED_TRANSFER_SIZE);
        try {
            MappedByteBuffer dbb = map(MapMode.READ_ONLY, position, size);
            try {
                // ## Bug: Closing this channel will not terminate the write
                int n = target.write(dbb);
                assert n >= 0;
                remaining -= n;
                if (isSelChImpl) {
                    // one attempt to write to selectable channel
                    break;
                }
                assert n > 0;
                position += n;
            } finally {
                unmap(dbb);
            }
        } catch (ClosedByInterruptException e) {
            // to be thrown after closing this channel.
            assert !target.isOpen();
            try {
                close();
            } catch (Throwable suppressed) {
                e.addSuppressed(suppressed);
            }
            throw e;
        } catch (IOException ioe) {
            // Only throw exception if no bytes have been written
            if (remaining == count)
                throw ioe;
            break;
        }
    }
    return count - remaining;
}
Also used : ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) MappedByteBuffer(java.nio.MappedByteBuffer) IOException(java.io.IOException)

Example 27 with ClosedByInterruptException

use of java.nio.channels.ClosedByInterruptException in project hudson-2.x by hudson.

the class UDPBroadcastThread method run.

@Override
public void run() {
    try {
        mcs.joinGroup(MULTICAST);
        ready.signal();
        while (true) {
            byte[] buf = new byte[2048];
            DatagramPacket p = new DatagramPacket(buf, buf.length);
            mcs.receive(p);
            SocketAddress sender = p.getSocketAddress();
            // prepare a response
            TcpSlaveAgentListener tal = hudson.getTcpSlaveAgentListener();
            StringBuilder rsp = new StringBuilder("<hudson>");
            tag(rsp, "version", Hudson.VERSION);
            tag(rsp, "url", hudson.getRootUrl());
            tag(rsp, "slave-port", tal == null ? null : tal.getPort());
            for (UDPBroadcastFragment f : UDPBroadcastFragment.all()) f.buildFragment(rsp, sender);
            rsp.append("</hudson>");
            byte[] response = rsp.toString().getBytes("UTF-8");
            mcs.send(new DatagramPacket(response, response.length, sender));
        }
    } catch (ClosedByInterruptException e) {
    // shut down
    } catch (BindException e) {
        // if we failed to listen to UDP, just silently abandon it, as a stack trace
        // makes people unnecessarily concerned, for a feature that currently does no good.
        LOGGER.log(Level.WARNING, "Failed to listen to UDP port " + PORT, e);
    } catch (IOException e) {
        // forcibly closed
        if (shutdown)
            return;
        LOGGER.log(Level.WARNING, "UDP handling problem", e);
    }
}
Also used : ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) DatagramPacket(java.net.DatagramPacket) BindException(java.net.BindException) IOException(java.io.IOException) SocketAddress(java.net.SocketAddress)

Example 28 with ClosedByInterruptException

use of java.nio.channels.ClosedByInterruptException in project hive by apache.

the class HiveInputFormat method getRecordReader.

@Override
public RecordReader getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
    HiveInputSplit hsplit = (HiveInputSplit) split;
    String inputFormatClassName = null;
    Class inputFormatClass = null;
    try {
        inputFormatClassName = hsplit.inputFormatClassName();
        inputFormatClass = job.getClassByName(inputFormatClassName);
    } catch (Exception e) {
        throw new IOException("cannot find class " + inputFormatClassName, e);
    }
    if (this.mrwork == null || pathToPartitionInfo == null) {
        init(job);
    }
    boolean nonNative = false;
    PartitionDesc part = HiveFileFormatUtils.getFromPathRecursively(pathToPartitionInfo, hsplit.getPath(), null);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Found spec for " + hsplit.getPath() + " " + part + " from " + pathToPartitionInfo);
    }
    try {
        if ((part != null) && (part.getTableDesc() != null)) {
            Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), job);
            nonNative = part.getTableDesc().isNonNative();
        }
    } catch (HiveException e) {
        throw new IOException(e);
    }
    Path splitPath = hsplit.getPath();
    pushProjectionsAndFiltersAndAsOf(job, splitPath);
    InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job);
    if (HiveConf.getBoolVar(job, ConfVars.LLAP_IO_ENABLED, LlapProxy.isDaemon())) {
        try {
            inputFormat = HiveInputFormat.wrapForLlap(inputFormat, job, part);
        } catch (HiveException e) {
            throw new IOException(e);
        }
    }
    RecordReader innerReader = null;
    try {
        // Handle the special header/footer skipping cases here.
        innerReader = RecordReaderWrapper.create(inputFormat, hsplit, part.getTableDesc(), job, reporter);
    } catch (Exception e) {
        Throwable rootCause = JavaUtils.findRootCause(e);
        if (checkLimitReached(job) && (rootCause instanceof InterruptedException || rootCause instanceof ClosedByInterruptException)) {
            LOG.info("Ignoring exception while getting record reader as limit is reached", rootCause);
            innerReader = new NullRowsRecordReader(job, split);
        } else {
            innerReader = HiveIOExceptionHandlerUtil.handleRecordReaderCreationException(e, job);
        }
    }
    HiveRecordReader<K, V> rr = new HiveRecordReader(innerReader, job);
    rr.initIOContext(hsplit, job, inputFormatClass, innerReader);
    return rr;
}
Also used : Path(org.apache.hadoop.fs.Path) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) NullRowsRecordReader(org.apache.hadoop.hive.ql.io.NullRowsInputFormat.NullRowsRecordReader) RecordReader(org.apache.hadoop.mapred.RecordReader) IOException(java.io.IOException) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) NullRowsRecordReader(org.apache.hadoop.hive.ql.io.NullRowsInputFormat.NullRowsRecordReader) InputFormat(org.apache.hadoop.mapred.InputFormat) FileInputFormat(org.apache.hadoop.mapred.FileInputFormat) TextInputFormat(org.apache.hadoop.mapred.TextInputFormat) PartitionDesc(org.apache.hadoop.hive.ql.plan.PartitionDesc)

Example 29 with ClosedByInterruptException

use of java.nio.channels.ClosedByInterruptException in project graylog2-server by Graylog2.

the class LocalKafkaJournal method read.

/**
 * Read from the journal, starting at the given offset. If the underlying journal implementation returns an empty
 * list of entries, it will be returned even if we know there are more entries in the journal.
 *
 * @param readOffset Offset to start reading at
 * @param requestedMaximumCount Maximum number of entries to return.
 * @return A list of entries
 */
public List<JournalReadEntry> read(long readOffset, long requestedMaximumCount) {
    // Always read at least one!
    final long maximumCount = Math.max(1, requestedMaximumCount);
    long maxOffset = readOffset + maximumCount;
    if (shuttingDown) {
        return Collections.emptyList();
    }
    final List<JournalReadEntry> messages = new ArrayList<>(Ints.saturatedCast(maximumCount));
    try (Timer.Context ignored = readTime.time()) {
        final long logStartOffset = getLogStartOffset();
        if (readOffset < logStartOffset) {
            LOG.info("Read offset {} before start of log at {}, starting to read from the beginning of the journal.", readOffset, logStartOffset);
            readOffset = logStartOffset;
            maxOffset = readOffset + maximumCount;
        }
        LOG.debug("Requesting to read a maximum of {} messages (or 5MB) from the journal, offset interval [{}, {})", maximumCount, readOffset, maxOffset);
        // TODO benchmark and make read-ahead strategy configurable for performance tuning
        final MessageSet messageSet = kafkaLog.read(readOffset, 5 * 1024 * 1024, Option.<Object>apply(maxOffset)).messageSet();
        final Iterator<MessageAndOffset> iterator = messageSet.iterator();
        long firstOffset = Long.MIN_VALUE;
        long lastOffset = Long.MIN_VALUE;
        long totalBytes = 0;
        while (iterator.hasNext()) {
            final MessageAndOffset messageAndOffset = iterator.next();
            if (firstOffset == Long.MIN_VALUE) {
                firstOffset = messageAndOffset.offset();
            }
            // always remember the last seen offset for debug purposes below
            lastOffset = messageAndOffset.offset();
            final byte[] payloadBytes = ByteBufferUtils.readBytes(messageAndOffset.message().payload());
            if (LOG.isTraceEnabled()) {
                final byte[] keyBytes = ByteBufferUtils.readBytes(messageAndOffset.message().key());
                LOG.trace("Read message {} contains {}", bytesToHex(keyBytes), bytesToHex(payloadBytes));
            }
            totalBytes += payloadBytes.length;
            messages.add(new JournalReadEntry(payloadBytes, messageAndOffset.offset()));
            // remember where to read from
            nextReadOffset = messageAndOffset.nextOffset();
        }
        if (messages.isEmpty()) {
            LOG.debug("No messages available to read for offset interval [{}, {}).", readOffset, maxOffset);
        } else {
            LOG.debug("Read {} messages, total payload size {}, from journal, offset interval [{}, {}], requested read at {}", messages.size(), totalBytes, firstOffset, lastOffset, readOffset);
        }
    } catch (OffsetOutOfRangeException e) {
        // This is fine, the reader tries to read faster than the writer committed data. Next read will get the data.
        LOG.debug("Offset out of range, no messages available starting at offset {}", readOffset);
    } catch (Exception e) {
        // sigh.
        if (shuttingDown) {
            LOG.debug("Caught exception during shutdown, ignoring it because we might have been blocked on a read.");
            return Collections.emptyList();
        }
        // noinspection ConstantConditions
        if (e instanceof ClosedByInterruptException) {
            LOG.debug("Interrupted while reading from journal, during shutdown this is harmless and ignored.", e);
        } else {
            throw e;
        }
    }
    readMessages.mark(messages.size());
    return messages;
}
Also used : ArrayList(java.util.ArrayList) MessageAndOffset(org.graylog.shaded.kafka09.message.MessageAndOffset) UncheckedIOException(java.io.UncheckedIOException) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) SyncFailedException(java.io.SyncFailedException) OffsetOutOfRangeException(org.graylog.shaded.kafka09.common.OffsetOutOfRangeException) AccessDeniedException(java.nio.file.AccessDeniedException) IOException(java.io.IOException) KafkaException(org.graylog.shaded.kafka09.common.KafkaException) ByteBufferMessageSet(org.graylog.shaded.kafka09.message.ByteBufferMessageSet) MessageSet(org.graylog.shaded.kafka09.message.MessageSet) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) HdrTimer(org.graylog2.shared.metrics.HdrTimer) Timer(com.codahale.metrics.Timer) OffsetOutOfRangeException(org.graylog.shaded.kafka09.common.OffsetOutOfRangeException)

Example 30 with ClosedByInterruptException

use of java.nio.channels.ClosedByInterruptException in project canal by alibaba.

the class BioSocketChannel method read.

@Override
public void read(byte[] data, int off, int len, int timeout) throws IOException {
    InputStream input = this.input;
    int accTimeout = 0;
    if (input == null) {
        throw new SocketException("Socket already closed.");
    }
    int n = 0;
    while (n < len && accTimeout < timeout) {
        try {
            int read = input.read(data, off + n, len - n);
            if (read > -1) {
                n += read;
            } else {
                throw new IOException("EOF encountered.");
            }
        } catch (SocketTimeoutException te) {
            if (Thread.interrupted()) {
                throw new ClosedByInterruptException();
            }
            accTimeout += SO_TIMEOUT;
        }
    }
    if (n < len && accTimeout >= timeout) {
        throw new SocketTimeoutException("Timeout occurred, failed to read total " + len + " bytes in " + timeout + " milliseconds, actual read only " + n + " bytes");
    }
}
Also used : ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) SocketException(java.net.SocketException) SocketTimeoutException(java.net.SocketTimeoutException) BufferedInputStream(java.io.BufferedInputStream) InputStream(java.io.InputStream) IOException(java.io.IOException)

Aggregations

ClosedByInterruptException (java.nio.channels.ClosedByInterruptException)79 IOException (java.io.IOException)48 ByteBuffer (java.nio.ByteBuffer)15 ClosedChannelException (java.nio.channels.ClosedChannelException)11 SocketTimeoutException (java.net.SocketTimeoutException)9 InetSocketAddress (java.net.InetSocketAddress)7 MappedByteBuffer (java.nio.MappedByteBuffer)7 SocketChannel (java.nio.channels.SocketChannel)7 File (java.io.File)6 ServerSocketChannel (java.nio.channels.ServerSocketChannel)6 ServerSocket (java.net.ServerSocket)5 FileChannel (java.nio.channels.FileChannel)5 FileLockInterruptionException (java.nio.channels.FileLockInterruptionException)5 InterruptedIOException (java.io.InterruptedIOException)4 Path (java.nio.file.Path)4 Test (org.junit.Test)4 BuildId (com.facebook.buck.model.BuildId)3 FileNotFoundException (java.io.FileNotFoundException)3 InputStream (java.io.InputStream)3 SocketException (java.net.SocketException)3