use of org.apache.cassandra.io.sstable.CorruptSSTableException in project cassandra by apache.
the class CompressionMetadata method readChunkOffsets.
/**
* Read offsets of the individual chunks from the given input.
*
* @param input Source of the data.
*
* @return collection of the chunk offsets.
*/
private Memory readChunkOffsets(DataInput input) {
final int chunkCount;
try {
chunkCount = input.readInt();
if (chunkCount <= 0)
throw new IOException("Compressed file with 0 chunks encountered: " + input);
} catch (IOException e) {
throw new FSReadError(e, indexFilePath);
}
@SuppressWarnings("resource") Memory offsets = Memory.allocate(chunkCount * 8L);
int i = 0;
try {
for (i = 0; i < chunkCount; i++) {
offsets.setLong(i * 8L, input.readLong());
}
return offsets;
} catch (IOException e) {
if (offsets != null)
offsets.close();
if (e instanceof EOFException) {
String msg = String.format("Corrupted Index File %s: read %d but expected %d chunks.", indexFilePath, i, chunkCount);
throw new CorruptSSTableException(new IOException(msg, e), indexFilePath);
}
throw new FSReadError(e, indexFilePath);
}
}
use of org.apache.cassandra.io.sstable.CorruptSSTableException in project cassandra by apache.
the class StandaloneVerifier method main.
public static void main(String[] args) {
Options options = Options.parseArgs(args);
Util.initDatabaseDescriptor();
try {
// load keyspace descriptions.
Schema.instance.loadFromDisk(false);
boolean hasFailed = false;
if (Schema.instance.getTableMetadataRef(options.keyspaceName, options.cfName) == null)
throw new IllegalArgumentException(String.format("Unknown keyspace/table %s.%s", options.keyspaceName, options.cfName));
// Do not load sstables since they might be broken
Keyspace keyspace = Keyspace.openWithoutSSTables(options.keyspaceName);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(options.cfName);
OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
Directories.SSTableLister lister = cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true);
boolean extended = options.extended;
List<SSTableReader> sstables = new ArrayList<>();
// Verify sstables
for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
Set<Component> components = entry.getValue();
if (!components.contains(Component.DATA) || !components.contains(Component.PRIMARY_INDEX))
continue;
try {
SSTableReader sstable = SSTableReader.openNoValidation(entry.getKey(), components, cfs);
sstables.add(sstable);
} catch (Exception e) {
JVMStabilityInspector.inspectThrowable(e);
System.err.println(String.format("Error Loading %s: %s", entry.getKey(), e.getMessage()));
if (options.debug)
e.printStackTrace(System.err);
}
}
for (SSTableReader sstable : sstables) {
try {
try (Verifier verifier = new Verifier(cfs, sstable, handler, true)) {
verifier.verify(extended);
} catch (CorruptSSTableException cs) {
System.err.println(String.format("Error verifying %s: %s", sstable, cs.getMessage()));
hasFailed = true;
}
} catch (Exception e) {
System.err.println(String.format("Error verifying %s: %s", sstable, e.getMessage()));
e.printStackTrace(System.err);
}
}
CompactionManager.instance.finishCompactionsAndShutdown(5, TimeUnit.MINUTES);
// We need that to stop non daemonized threads
System.exit(hasFailed ? 1 : 0);
} catch (Exception e) {
System.err.println(e.getMessage());
if (options.debug)
e.printStackTrace(System.err);
System.exit(1);
}
}
use of org.apache.cassandra.io.sstable.CorruptSSTableException in project cassandra by apache.
the class BigTableScanner method seekToCurrentRangeStart.
private void seekToCurrentRangeStart() {
long indexPosition = sstable.getIndexScanPosition(currentRange.left);
ifile.seek(indexPosition);
try {
while (!ifile.isEOF()) {
indexPosition = ifile.getFilePointer();
DecoratedKey indexDecoratedKey = sstable.decorateKey(ByteBufferUtil.readWithShortLength(ifile));
if (indexDecoratedKey.compareTo(currentRange.left) > 0 || currentRange.contains(indexDecoratedKey)) {
// Found, just read the dataPosition and seek into index and data files
long dataPosition = RowIndexEntry.Serializer.readPosition(ifile);
ifile.seek(indexPosition);
dfile.seek(dataPosition);
break;
} else {
RowIndexEntry.Serializer.skip(ifile, sstable.descriptor.version);
}
}
} catch (IOException e) {
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
use of org.apache.cassandra.io.sstable.CorruptSSTableException in project cassandra by apache.
the class CompressedSequentialWriter method resetAndTruncate.
@Override
public synchronized void resetAndTruncate(DataPosition mark) {
assert mark instanceof CompressedFileWriterMark;
CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;
// reset position
long truncateTarget = realMark.uncDataOffset;
if (realMark.chunkOffset == chunkOffset) {
// simply drop bytes to the right of our mark
buffer.position(realMark.validBufferBytes);
return;
}
// synchronize current buffer with disk - we don't want any data loss
syncInternal();
chunkOffset = realMark.chunkOffset;
// compressed chunk size (- 4 bytes reserved for checksum)
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
if (compressed.capacity() < chunkSize)
compressed = compressor.preferredBufferType().allocate(chunkSize);
try {
compressed.clear();
compressed.limit(chunkSize);
fchannel.position(chunkOffset);
fchannel.read(compressed);
try {
// Repopulate buffer from compressed data
buffer.clear();
compressed.flip();
if (chunkSize <= maxCompressedLength)
compressor.uncompress(compressed, buffer);
else
buffer.put(compressed);
} catch (IOException e) {
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize, e);
}
CRC32 checksum = new CRC32();
compressed.rewind();
checksum.update(compressed);
crcCheckBuffer.clear();
fchannel.read(crcCheckBuffer);
crcCheckBuffer.flip();
if (crcCheckBuffer.getInt() != (int) checksum.getValue())
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
} catch (CorruptBlockException e) {
throw new CorruptSSTableException(e, getPath());
} catch (EOFException e) {
throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
} catch (IOException e) {
throw new FSReadError(e, getPath());
}
// Mark as dirty so we can guarantee the newly buffered bytes won't be lost on a rebuffer
buffer.position(realMark.validBufferBytes);
bufferOffset = truncateTarget - buffer.position();
chunkCount = realMark.nextChunkIndex - 1;
// truncate data and index file
truncate(chunkOffset);
metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
}
use of org.apache.cassandra.io.sstable.CorruptSSTableException in project cassandra by apache.
the class CompressionMetadata method chunkFor.
/**
* Get a chunk of compressed data (offset, length) corresponding to given position
*
* @param position Position in the file.
* @return pair of chunk offset and length.
*/
public Chunk chunkFor(long position) {
// position of the chunk
int idx = 8 * (int) (position / parameters.chunkLength());
if (idx >= chunkOffsetsSize)
throw new CorruptSSTableException(new EOFException(), indexFilePath);
long chunkOffset = chunkOffsets.getLong(idx);
long nextChunkOffset = (idx + 8 == chunkOffsetsSize) ? compressedFileLength : chunkOffsets.getLong(idx + 8);
// "4" bytes reserved for checksum
return new Chunk(chunkOffset, (int) (nextChunkOffset - chunkOffset - 4));
}
Aggregations