use of org.apache.cassandra.io.FSReadError in project cassandra by apache.
the class CompressionMetadata method readChunkOffsets.
/**
* Read offsets of the individual chunks from the given input.
*
* @param input Source of the data.
*
* @return collection of the chunk offsets.
*/
private Memory readChunkOffsets(DataInput input) {
final int chunkCount;
try {
chunkCount = input.readInt();
if (chunkCount <= 0)
throw new IOException("Compressed file with 0 chunks encountered: " + input);
} catch (IOException e) {
throw new FSReadError(e, indexFilePath);
}
@SuppressWarnings("resource") Memory offsets = Memory.allocate(chunkCount * 8L);
int i = 0;
try {
for (i = 0; i < chunkCount; i++) {
offsets.setLong(i * 8L, input.readLong());
}
return offsets;
} catch (IOException e) {
if (offsets != null)
offsets.close();
if (e instanceof EOFException) {
String msg = String.format("Corrupted Index File %s: read %d but expected %d chunks.", indexFilePath, i, chunkCount);
throw new CorruptSSTableException(new IOException(msg, e), indexFilePath);
}
throw new FSReadError(e, indexFilePath);
}
}
use of org.apache.cassandra.io.FSReadError in project cassandra by apache.
the class CompressedSequentialWriter method resetAndTruncate.
@Override
public synchronized void resetAndTruncate(DataPosition mark) {
assert mark instanceof CompressedFileWriterMark;
CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;
// reset position
long truncateTarget = realMark.uncDataOffset;
if (realMark.chunkOffset == chunkOffset) {
// simply drop bytes to the right of our mark
buffer.position(realMark.validBufferBytes);
return;
}
// synchronize current buffer with disk - we don't want any data loss
syncInternal();
chunkOffset = realMark.chunkOffset;
// compressed chunk size (- 4 bytes reserved for checksum)
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
if (compressed.capacity() < chunkSize)
compressed = compressor.preferredBufferType().allocate(chunkSize);
try {
compressed.clear();
compressed.limit(chunkSize);
fchannel.position(chunkOffset);
fchannel.read(compressed);
try {
// Repopulate buffer from compressed data
buffer.clear();
compressed.flip();
if (chunkSize <= maxCompressedLength)
compressor.uncompress(compressed, buffer);
else
buffer.put(compressed);
} catch (IOException e) {
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize, e);
}
CRC32 checksum = new CRC32();
compressed.rewind();
checksum.update(compressed);
crcCheckBuffer.clear();
fchannel.read(crcCheckBuffer);
crcCheckBuffer.flip();
if (crcCheckBuffer.getInt() != (int) checksum.getValue())
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
} catch (CorruptBlockException e) {
throw new CorruptSSTableException(e, getPath());
} catch (EOFException e) {
throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
} catch (IOException e) {
throw new FSReadError(e, getPath());
}
// Mark as dirty so we can guarantee the newly buffered bytes won't be lost on a rebuffer
buffer.position(realMark.validBufferBytes);
bufferOffset = truncateTarget - buffer.position();
chunkCount = realMark.nextChunkIndex - 1;
// truncate data and index file
truncate(chunkOffset);
metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
}
use of org.apache.cassandra.io.FSReadError in project cassandra by apache.
the class SequentialWriter method resetAndTruncate.
/**
* Drops all buffered data that's past the limits of our new file mark + buffer capacity, or syncs and truncates
* the underlying file to the marked position
*/
public void resetAndTruncate(DataPosition mark) {
assert mark instanceof BufferedFileWriterMark;
long previous = current();
long truncateTarget = ((BufferedFileWriterMark) mark).pointer;
// the right of the desired mark.
if (previous - truncateTarget <= buffer.position()) {
buffer.position(buffer.position() - ((int) (previous - truncateTarget)));
return;
}
// synchronize current buffer with disk - we don't want any data loss
syncInternal();
// truncate file to given position
truncate(truncateTarget);
try {
fchannel.position(truncateTarget);
} catch (IOException e) {
throw new FSReadError(e, getPath());
}
bufferOffset = truncateTarget;
resetBuffer();
}
Aggregations