use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class FileDirectSegment method writeLogHeader.
@Override
void writeLogHeader() {
super.writeLogHeader();
try {
channel.write((ByteBuffer) buffer.duplicate().flip());
manager.addSize(lastWrittenPos = buffer.position());
} catch (IOException e) {
throw new FSWriteError(e, getPath());
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class MemoryMappedSegment method createBuffer.
ByteBuffer createBuffer(CommitLog commitLog) {
try {
MappedByteBuffer mappedFile = channel.map(FileChannel.MapMode.READ_WRITE, 0, DatabaseDescriptor.getCommitLogSegmentSize());
manager.addSize(DatabaseDescriptor.getCommitLogSegmentSize());
return mappedFile;
} catch (IOException e) {
throw new FSWriteError(e, logFile);
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class BigTableWriter method append.
/**
* Appends partition data to this writer.
*
* @param iterator the partition to write
* @return the created index entry if something was written, that is if {@code iterator}
* wasn't empty, {@code null} otherwise.
*
* @throws FSWriteError if a write to the dataFile fails
*/
public RowIndexEntry append(UnfilteredRowIterator iterator) {
DecoratedKey key = iterator.partitionKey();
if (key.getKey().remaining() > FBUtilities.MAX_UNSIGNED_SHORT) {
logger.error("Key size {} exceeds maximum of {}, skipping row", key.getKey().remaining(), FBUtilities.MAX_UNSIGNED_SHORT);
return null;
}
if (iterator.isEmpty())
return null;
long startPosition = beforeAppend(key);
observers.forEach((o) -> o.startPartition(key, iwriter.indexFile.position()));
//Reuse the writer for each row
columnIndexWriter.reset();
try (UnfilteredRowIterator collecting = Transformation.apply(iterator, new StatsCollector(metadataCollector))) {
columnIndexWriter.buildRowIndex(collecting);
// afterAppend() writes the partition key before the first RowIndexEntry - so we have to add it's
// serialized size to the index-writer position
long indexFilePosition = ByteBufferUtil.serializedSizeWithShortLength(key.getKey()) + iwriter.indexFile.position();
RowIndexEntry entry = RowIndexEntry.create(startPosition, indexFilePosition, collecting.partitionLevelDeletion(), columnIndexWriter.headerLength, columnIndexWriter.columnIndexCount, columnIndexWriter.indexInfoSerializedSize(), columnIndexWriter.indexSamples(), columnIndexWriter.offsets(), getRowIndexEntrySerializer().indexInfoSerializer());
long endPosition = dataFile.position();
long rowSize = endPosition - startPosition;
maybeLogLargePartitionWarning(key, rowSize);
metadataCollector.addPartitionSizeInBytes(rowSize);
afterAppend(key, endPosition, entry, columnIndexWriter.buffer());
return entry;
} catch (IOException e) {
throw new FSWriteError(e, dataFile.getPath());
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class CompressedSequentialWriter method flushData.
@Override
protected void flushData() {
// why is this necessary? seems like it should always be at chunk start in normal operation
seekToChunkStart();
try {
// compressing data with buffer re-use
buffer.flip();
compressed.clear();
compressor.compress(buffer, compressed);
} catch (IOException e) {
// shouldn't happen
throw new RuntimeException("Compression exception", e);
}
int compressedLength = compressed.position();
uncompressedSize += buffer.position();
ByteBuffer toWrite = compressed;
if (compressedLength > maxCompressedLength) {
toWrite = buffer;
compressedLength = buffer.position();
}
compressedSize += compressedLength;
try {
// write an offset of the newly written chunk to the index file
metadataWriter.addOffset(chunkOffset);
chunkCount++;
// write out the compressed data
toWrite.flip();
channel.write(toWrite);
// write corresponding checksum
toWrite.rewind();
crcMetadata.appendDirect(toWrite, true);
lastFlushOffset += compressedLength + 4;
} catch (IOException e) {
throw new FSWriteError(e, getPath());
}
if (toWrite == buffer)
buffer.position(compressedLength);
// next chunk should be written right after current + length of the checksum (int)
chunkOffset += compressedLength + 4;
if (runPostFlush != null)
runPostFlush.run();
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class SequentialWriter method flushData.
/**
* Override this method instead of overriding flush()
* @throws FSWriteError on any I/O error.
*/
protected void flushData() {
try {
buffer.flip();
channel.write(buffer);
lastFlushOffset += buffer.position();
} catch (IOException e) {
throw new FSWriteError(e, getPath());
}
if (runPostFlush != null)
runPostFlush.run();
}
Aggregations