use of org.apache.cassandra.io.FSReadError in project cassandra by apache.
the class CompressedChecksummedDataInput method readBuffer.
@Override
protected void readBuffer() {
sourcePosition = filePosition;
if (isEOF())
return;
metadataBuffer.clear();
channel.read(metadataBuffer, filePosition);
filePosition += CompressedHintsWriter.METADATA_SIZE;
metadataBuffer.rewind();
int uncompressedSize = metadataBuffer.getInt();
int compressedSize = metadataBuffer.getInt();
if (compressedBuffer == null || compressedSize > compressedBuffer.capacity()) {
// allocate +5% to cover variability in compressed size
int bufferSize = compressedSize + (compressedSize / 20);
if (compressedBuffer != null) {
BufferPool.put(compressedBuffer);
}
compressedBuffer = BufferPool.get(bufferSize, compressor.preferredBufferType());
}
compressedBuffer.clear();
compressedBuffer.limit(compressedSize);
channel.read(compressedBuffer, filePosition);
compressedBuffer.rewind();
filePosition += compressedSize;
if (buffer.capacity() < uncompressedSize) {
int bufferSize = uncompressedSize + (uncompressedSize / 20);
BufferPool.put(buffer);
buffer = BufferPool.get(bufferSize, compressor.preferredBufferType());
}
buffer.clear();
buffer.limit(uncompressedSize);
try {
compressor.uncompress(compressedBuffer, buffer);
buffer.flip();
} catch (IOException e) {
throw new FSReadError(e, getPath());
}
}
use of org.apache.cassandra.io.FSReadError in project cassandra by apache.
the class EncryptedChecksummedDataInput method readBuffer.
@Override
protected void readBuffer() {
this.sourcePosition = readChannel.getCurrentPosition();
if (isEOF())
return;
try {
ByteBuffer byteBuffer = reusableBuffers.get();
ByteBuffer decrypted = EncryptionUtils.decrypt(readChannel, byteBuffer, true, cipher);
buffer = EncryptionUtils.uncompress(decrypted, buffer, true, compressor);
if (decrypted.capacity() > byteBuffer.capacity())
reusableBuffers.set(decrypted);
} catch (IOException ioe) {
throw new FSReadError(ioe, getPath());
}
}
use of org.apache.cassandra.io.FSReadError in project cassandra by apache.
the class SASIIndexBuilder method build.
public void build() {
AbstractType<?> keyValidator = cfs.metadata().partitionKeyType;
for (Map.Entry<SSTableReader, Map<ColumnMetadata, ColumnIndex>> e : sstables.entrySet()) {
SSTableReader sstable = e.getKey();
Map<ColumnMetadata, ColumnIndex> indexes = e.getValue();
try (RandomAccessReader dataFile = sstable.openDataReader()) {
PerSSTableIndexWriter indexWriter = SASIIndex.newWriter(keyValidator, sstable.descriptor, indexes, OperationType.COMPACTION);
long previousKeyPosition = 0;
try (KeyIterator keys = new KeyIterator(sstable.descriptor, cfs.metadata())) {
while (keys.hasNext()) {
if (isStopRequested())
throw new CompactionInterruptedException(getCompactionInfo());
final DecoratedKey key = keys.next();
final long keyPosition = keys.getKeyPosition();
indexWriter.startPartition(key, keyPosition);
try {
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
dataFile.seek(indexEntry.position);
// key
ByteBufferUtil.readWithShortLength(dataFile);
try (SSTableIdentityIterator partition = SSTableIdentityIterator.create(sstable, dataFile, key)) {
// if the row has statics attached, it has to be indexed separately
if (cfs.metadata().hasStaticColumns())
indexWriter.nextUnfilteredCluster(partition.staticRow());
while (partition.hasNext()) indexWriter.nextUnfilteredCluster(partition.next());
}
} catch (IOException ex) {
throw new FSReadError(ex, sstable.getFilename());
}
bytesProcessed += keyPosition - previousKeyPosition;
previousKeyPosition = keyPosition;
}
completeSSTable(indexWriter, sstable, indexes.values());
}
}
}
}
use of org.apache.cassandra.io.FSReadError in project cassandra by apache.
the class CompressionMetadata method readChunkOffsets.
/**
* Read offsets of the individual chunks from the given input.
*
* @param input Source of the data.
*
* @return collection of the chunk offsets.
*/
private Memory readChunkOffsets(DataInput input) {
final int chunkCount;
try {
chunkCount = input.readInt();
if (chunkCount <= 0)
throw new IOException("Compressed file with 0 chunks encountered: " + input);
} catch (IOException e) {
throw new FSReadError(e, indexFilePath);
}
@SuppressWarnings("resource") Memory offsets = Memory.allocate(chunkCount * 8L);
int i = 0;
try {
for (i = 0; i < chunkCount; i++) {
offsets.setLong(i * 8L, input.readLong());
}
return offsets;
} catch (IOException e) {
if (offsets != null)
offsets.close();
if (e instanceof EOFException) {
String msg = String.format("Corrupted Index File %s: read %d but expected %d chunks.", indexFilePath, i, chunkCount);
throw new CorruptSSTableException(new IOException(msg, e), indexFilePath);
}
throw new FSReadError(e, indexFilePath);
}
}
use of org.apache.cassandra.io.FSReadError in project cassandra by apache.
the class SSTableFlushObserverTest method testFlushObserver.
@Test
public void testFlushObserver() {
TableMetadata cfm = TableMetadata.builder(KS_NAME, CF_NAME).addPartitionKeyColumn("id", UTF8Type.instance).addRegularColumn("first_name", UTF8Type.instance).addRegularColumn("age", Int32Type.instance).addRegularColumn("height", LongType.instance).build();
LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.COMPACTION);
FlushObserver observer = new FlushObserver();
String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME);
directory.deleteOnExit();
if (!directory.exists() && !directory.mkdirs())
throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath());
SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(), directory, KS_NAME, CF_NAME, 0, sstableFormat), 10L, 0L, null, TableMetadataRef.forOfflineTools(cfm), new MetadataCollector(cfm.comparator).sstableLevel(0), new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS), Collections.singletonList(observer), transaction);
SSTableReader reader = null;
Multimap<ByteBuffer, Cell> expected = ArrayListMultimap.create();
try {
final long now = System.currentTimeMillis();
ByteBuffer key = UTF8Type.instance.fromString("key1");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(27)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jack")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(183L))));
writer.append(new RowIterator(cfm, key.duplicate(), Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key2");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jim")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(180L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key3");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("ken")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(178L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
reader = writer.finish(true);
} finally {
FileUtils.closeQuietly(writer);
}
Assert.assertTrue(observer.isComplete);
Assert.assertEquals(expected.size(), observer.rows.size());
for (Pair<ByteBuffer, Long> e : observer.rows.keySet()) {
ByteBuffer key = e.left;
Long indexPosition = e.right;
try (FileDataInput index = reader.ifile.createReader(indexPosition)) {
ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(index);
Assert.assertEquals(0, UTF8Type.instance.compare(key, indexKey));
} catch (IOException ex) {
throw new FSReadError(ex, reader.getIndexFilename());
}
Assert.assertEquals(expected.get(key), observer.rows.get(e));
}
}
Aggregations