use of io.pravega.segmentstore.storage.chunklayer.InvalidOffsetException in project pravega by pravega.
the class FileSystemChunkStorage method doWrite.
@Override
protected int doWrite(ChunkHandle handle, long offset, int length, InputStream data) throws ChunkStorageException {
Path path = getFilePath(handle.getChunkName());
long totalBytesWritten = 0;
try {
FileChannel channel = fileSystem.getFileChannel(path, StandardOpenOption.WRITE);
long fileSize = channel.size();
if (fileSize != offset) {
throw new InvalidOffsetException(handle.getChunkName(), fileSize, offset, "doWrite");
}
// Wrap the input data into a ReadableByteChannel, but do not close it. Doing so will result in closing
// the underlying InputStream, which is not desirable if it is to be reused.
ReadableByteChannel sourceChannel = Channels.newChannel(data);
while (length > 0) {
long bytesWritten = channel.transferFrom(sourceChannel, offset, length);
assert bytesWritten > 0 : "Unable to make any progress transferring data.";
offset += bytesWritten;
totalBytesWritten += bytesWritten;
length -= bytesWritten;
}
channel.force(true);
} catch (IOException e) {
throw convertException(handle.getChunkName(), "doWrite", e);
}
return (int) totalBytesWritten;
}
use of io.pravega.segmentstore.storage.chunklayer.InvalidOffsetException in project pravega by pravega.
the class ExtendedS3ChunkStorage method doWrite.
@Override
protected int doWrite(ChunkHandle handle, long offset, int length, InputStream data) throws ChunkStorageException {
Preconditions.checkState(supportsAppend, "supportsAppend is false.");
try {
val objectPath = getObjectPath(handle.getChunkName());
// Check object exists.
val metadata = client.getObjectMetadata(config.getBucket(), objectPath);
if (metadata.getContentLength() != offset) {
throw new InvalidOffsetException(handle.getChunkName(), metadata.getContentLength(), offset, "doWrite");
}
// Put data.
client.putObject(this.config.getBucket(), objectPath, Range.fromOffsetLength(offset, length), data);
return length;
} catch (Exception e) {
throw convertException(handle.getChunkName(), "doWrite", e);
}
}
use of io.pravega.segmentstore.storage.chunklayer.InvalidOffsetException in project pravega by pravega.
the class HDFSChunkStorage method doWrite.
@Override
protected int doWrite(ChunkHandle handle, long offset, int length, InputStream data) throws ChunkStorageException {
ensureInitializedAndNotClosed();
try (FSDataOutputStream stream = this.fileSystem.append(getFilePath(handle.getChunkName()))) {
if (stream.getPos() != offset) {
// Looks like the filesystem changed from underneath us. This could be our bug, but it could be something else.
throw new InvalidOffsetException(handle.getChunkName(), stream.getPos(), offset, "doWrite");
}
if (length == 0) {
// Note: IOUtils.copyBytes with length == 0 will enter an infinite loop, hence the need for this check.
return 0;
}
// We need to be very careful with IOUtils.copyBytes. There are many overloads with very similar signatures.
// There is a difference between (InputStream, OutputStream, int, boolean) and (InputStream, OutputStream, long, boolean),
// in that the one with "int" uses the third arg as a buffer size, and the one with "long" uses it as the number
// of bytes to copy.
IOUtils.copyBytes(data, stream, (long) length, false);
stream.flush();
} catch (IOException e) {
throw convertException(handle.getChunkName(), "doWrite", e);
}
return length;
}
use of io.pravega.segmentstore.storage.chunklayer.InvalidOffsetException in project pravega by pravega.
the class InMemoryChunkStorage method doWriteInternal.
private int doWriteInternal(ChunkHandle handle, long offset, int length, InputStream data) throws ChunkStorageException {
InMemoryChunk chunk = getInMemoryChunk(handle);
long oldLength = chunk.getLength();
if (chunk.isReadOnly) {
throw new ChunkStorageException(handle.getChunkName(), "chunk is readonly");
}
if (offset != chunk.getLength()) {
throw new InvalidOffsetException(handle.getChunkName(), chunk.getLength(), offset, "doWrite");
}
if (length == 0) {
return 0;
}
ByteArrayOutputStream out = new ByteArrayOutputStream(length);
byte[] bytes = new byte[length];
int totalBytesRead = 0;
int bytesRead = 0;
try {
while ((bytesRead = data.read(bytes)) != -1) {
out.write(bytes, 0, bytesRead);
totalBytesRead += bytesRead;
}
} catch (IOException e) {
throw new ChunkStorageException(handle.getChunkName(), "Error while reading", e);
}
Preconditions.checkState(length == totalBytesRead);
byte[] writtenBytes = out.toByteArray();
Preconditions.checkState(writtenBytes.length == totalBytesRead);
chunk.append(writtenBytes);
Preconditions.checkState(oldLength + totalBytesRead == chunk.getLength());
return totalBytesRead;
}
Aggregations