use of java.io.EOFException in project hadoop by apache.
the class DecompressorStream method decompress.
protected int decompress(byte[] b, int off, int len) throws IOException {
int n;
while ((n = decompressor.decompress(b, off, len)) == 0) {
if (decompressor.needsDictionary()) {
eof = true;
return -1;
}
if (decompressor.finished()) {
// First see if there was any leftover buffered input from previous
// stream; if not, attempt to refill buffer. If refill -> EOF, we're
// all done; else reset, fix up input buffer, and get ready for next
// concatenated substream/"member".
int nRemaining = decompressor.getRemaining();
if (nRemaining == 0) {
int m = getCompressedData();
if (m == -1) {
// apparently the previous end-of-stream was also end-of-file:
// return success, as if we had never called getCompressedData()
eof = true;
return -1;
}
decompressor.reset();
decompressor.setInput(buffer, 0, m);
lastBytesSent = m;
} else {
// looks like it's a concatenated stream: reset low-level zlib (or
// other engine) and buffers, then "resend" remaining input data
decompressor.reset();
int leftoverOffset = lastBytesSent - nRemaining;
assert (leftoverOffset >= 0);
// this recopies userBuf -> direct buffer if using native libraries:
decompressor.setInput(buffer, leftoverOffset, nRemaining);
// NOTE: this is the one place we do NOT want to save the number
// of bytes sent (nRemaining here) into lastBytesSent: since we
// are resending what we've already sent before, offset is nonzero
// in general (only way it could be zero is if it already equals
// nRemaining), which would then screw up the offset calculation
// _next_ time around. IOW, getRemaining() is in terms of the
// original, zero-offset bufferload, so lastBytesSent must be as
// well. Cheesy ASCII art:
//
// <------------ m, lastBytesSent ----------->
// +===============================================+
// buffer: |1111111111|22222222222222222|333333333333| |
// +===============================================+
// #1: <-- off -->|<-------- nRemaining --------->
// #2: <----------- off ----------->|<-- nRem. -->
// #3: (final substream: nRemaining == 0; eof = true)
//
// If lastBytesSent is anything other than m, as shown, then "off"
// will be calculated incorrectly.
}
} else if (decompressor.needsInput()) {
int m = getCompressedData();
if (m == -1) {
throw new EOFException("Unexpected end of input stream");
}
decompressor.setInput(buffer, 0, m);
lastBytesSent = m;
}
}
return n;
}
use of java.io.EOFException in project flink by apache.
the class BlobClient method deleteInternal.
/**
* Delete one or multiple BLOBs from the BLOB server.
*
* @param jobId The job ID to identify the BLOB(s) to be deleted.
* @param key The key to identify the specific BLOB to delete or <code>null</code> to delete
* all BLOBs associated with the job id.
* @param bKey The blob key to identify a specific content addressable BLOB. This parameter
* is exclusive with jobId and key.
* @throws IOException Thrown if an I/O error occurs while transferring the request to the BLOB server.
*/
private void deleteInternal(JobID jobId, String key, BlobKey bKey) throws IOException {
if ((jobId != null && bKey != null) || (jobId == null && bKey == null)) {
throw new IllegalArgumentException();
}
try {
final OutputStream outputStream = this.socket.getOutputStream();
final InputStream inputStream = this.socket.getInputStream();
// Signal type of operation
outputStream.write(DELETE_OPERATION);
// Check if DELETE should be done in content-addressable manner
if (jobId == null) {
// delete blob key
outputStream.write(CONTENT_ADDRESSABLE);
bKey.writeToOutputStream(outputStream);
} else if (key != null) {
// delete BLOB for jobID and name key
outputStream.write(NAME_ADDRESSABLE);
// Send job ID and the key
byte[] idBytes = jobId.getBytes();
byte[] keyBytes = key.getBytes(BlobUtils.DEFAULT_CHARSET);
outputStream.write(idBytes);
writeLength(keyBytes.length, outputStream);
outputStream.write(keyBytes);
} else {
// delete all blobs for JobID
outputStream.write(JOB_ID_SCOPE);
byte[] idBytes = jobId.getBytes();
outputStream.write(idBytes);
}
int response = inputStream.read();
if (response < 0) {
throw new EOFException("Premature end of response");
}
if (response == RETURN_ERROR) {
Throwable cause = readExceptionFromStream(inputStream);
throw new IOException("Server side error: " + cause.getMessage(), cause);
} else if (response != RETURN_OKAY) {
throw new IOException("Unrecognized response");
}
} catch (Throwable t) {
BlobUtils.closeSilently(socket, LOG);
throw new IOException("DELETE operation failed: " + t.getMessage(), t);
}
}
use of java.io.EOFException in project flink by apache.
the class BlobServerConnection method get.
// --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
/**
* Handles an incoming GET request from a BLOB client.
*
* @param inputStream
* the input stream to read incoming data from
* @param outputStream
* the output stream to send data back to the client
* @param buf
* an auxiliary buffer for data serialization/deserialization
* @throws IOException
* thrown if an I/O error occurs while reading/writing data from/to the respective streams
*/
private void get(InputStream inputStream, OutputStream outputStream, byte[] buf) throws IOException {
/**
* Retrieve the file from the (distributed?) BLOB store and store it
* locally, then send it to the service which requested it.
*
* Instead, we could send it from the distributed store directly but
* chances are high that if there is one request, there will be more
* so a local cache makes more sense.
*/
File blobFile;
try {
final int contentAddressable = inputStream.read();
if (contentAddressable < 0) {
throw new EOFException("Premature end of GET request");
}
if (contentAddressable == NAME_ADDRESSABLE) {
// Receive the job ID and key
byte[] jidBytes = new byte[JobID.SIZE];
readFully(inputStream, jidBytes, 0, JobID.SIZE, "JobID");
JobID jobID = JobID.fromByteArray(jidBytes);
String key = readKey(buf, inputStream);
blobFile = this.blobServer.getStorageLocation(jobID, key);
if (!blobFile.exists()) {
blobStore.get(jobID, key, blobFile);
}
} else if (contentAddressable == CONTENT_ADDRESSABLE) {
final BlobKey key = BlobKey.readFromInputStream(inputStream);
blobFile = blobServer.getStorageLocation(key);
if (!blobFile.exists()) {
blobStore.get(key, blobFile);
}
} else {
throw new IOException("Unknown type of BLOB addressing.");
}
// Check if BLOB exists
if (!blobFile.exists()) {
throw new IOException("Cannot find required BLOB at " + blobFile.getAbsolutePath());
}
if (blobFile.length() > Integer.MAX_VALUE) {
throw new IOException("BLOB size exceeds the maximum size (2 GB).");
}
outputStream.write(RETURN_OKAY);
// up to here, an error can give a good message
} catch (Throwable t) {
LOG.error("GET operation failed", t);
try {
writeErrorToStream(outputStream, t);
} catch (IOException e) {
// since we are in an exception case, it means not much that we could not send the error
// ignore this
}
clientSocket.close();
return;
}
// from here on, we started sending data, so all we can do is close the connection when something happens
try {
int blobLen = (int) blobFile.length();
writeLength(blobLen, outputStream);
try (FileInputStream fis = new FileInputStream(blobFile)) {
int bytesRemaining = blobLen;
while (bytesRemaining > 0) {
int read = fis.read(buf);
if (read < 0) {
throw new IOException("Premature end of BLOB file stream for " + blobFile.getAbsolutePath());
}
outputStream.write(buf, 0, read);
bytesRemaining -= read;
}
}
} catch (SocketException e) {
// happens when the other side disconnects
LOG.debug("Socket connection closed", e);
} catch (Throwable t) {
LOG.error("GET operation failed", t);
clientSocket.close();
}
}
use of java.io.EOFException in project hadoop by apache.
the class CryptoStreamsTestBase method testSeek.
/** Test seek to different position. */
@Test(timeout = 120000)
public void testSeek() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
InputStream in = getInputStream(defaultBufferSize);
// Pos: 1/3 dataLen
seekCheck(in, dataLen / 3);
// Pos: 0
seekCheck(in, 0);
// Pos: 1/2 dataLen
seekCheck(in, dataLen / 2);
final long pos = ((Seekable) in).getPos();
// Pos: -3
try {
seekCheck(in, -3);
Assert.fail("Seek to negative offset should fail.");
} catch (EOFException e) {
GenericTestUtils.assertExceptionContains(FSExceptionMessages.NEGATIVE_SEEK, e);
}
Assert.assertEquals(pos, ((Seekable) in).getPos());
// Pos: dataLen + 3
try {
seekCheck(in, dataLen + 3);
Assert.fail("Seek after EOF should fail.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Cannot seek after EOF", e);
}
Assert.assertEquals(pos, ((Seekable) in).getPos());
in.close();
}
use of java.io.EOFException in project flink by apache.
the class ChannelReaderInputView method nextSegment.
// --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
/**
* Gets the next segment from the asynchronous block reader. If more requests are to be issued, the method
* first sends a new request with the current memory segment. If no more requests are pending, the method
* adds the segment to the readers return queue, which thereby effectively collects all memory segments.
* Secondly, the method fetches the next non-consumed segment
* returned by the reader. If no further segments are available, this method thrown an {@link EOFException}.
*
* @param current The memory segment used for the next request.
* @return The memory segment to read from next.
*
* @throws EOFException Thrown, if no further segments are available.
* @throws IOException Thrown, if an I/O error occurred while reading
* @see AbstractPagedInputView#nextSegment(org.apache.flink.core.memory.MemorySegment)
*/
@Override
protected MemorySegment nextSegment(MemorySegment current) throws IOException {
// check if we are at our end
if (this.inLastBlock) {
throw new EOFException();
}
// the next lines
if (current != null) {
sendReadRequest(current);
}
// get the next segment
final MemorySegment seg = this.reader.getNextReturnedBlock();
// check the header
if (seg.getShort(0) != ChannelWriterOutputView.HEADER_MAGIC_NUMBER) {
throw new IOException("The current block does not belong to a ChannelWriterOutputView / " + "ChannelReaderInputView: Wrong magic number.");
}
if ((seg.getShort(ChannelWriterOutputView.HEADER_FLAGS_OFFSET) & ChannelWriterOutputView.FLAG_LAST_BLOCK) != 0) {
// last block
this.numRequestsRemaining = 0;
this.inLastBlock = true;
}
return seg;
}
Aggregations