use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class CommitLogTest method testUnwriteableFlushRecovery.
public void testUnwriteableFlushRecovery() throws ExecutionException, InterruptedException, IOException {
CommitLog.instance.resetUnsafe(true);
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(STANDARD1);
DiskFailurePolicy oldPolicy = DatabaseDescriptor.getDiskFailurePolicy();
try {
DatabaseDescriptor.setDiskFailurePolicy(DiskFailurePolicy.ignore);
for (int i = 0; i < 5; i++) {
new RowUpdateBuilder(cfs.metadata(), 0, "k").clustering("c" + i).add("val", ByteBuffer.allocate(100)).build().apply();
if (i == 2) {
try (Closeable c = Util.markDirectoriesUnwriteable(cfs)) {
cfs.forceBlockingFlush();
} catch (Throwable t) {
// expected. Cause (after some wrappings) should be a write error
while (!(t instanceof FSWriteError)) t = t.getCause();
}
} else
cfs.forceBlockingFlush();
}
} finally {
DatabaseDescriptor.setDiskFailurePolicy(oldPolicy);
}
CommitLog.instance.sync();
System.setProperty("cassandra.replayList", KEYSPACE1 + "." + STANDARD1);
// Currently we don't attempt to re-flush a memtable that failed, thus make sure data is replayed by commitlog.
// If retries work subsequent flushes should clear up error and this should change to expect 0.
Assert.assertEquals(1, CommitLog.instance.resetUnsafe(false));
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class VerifyTest method writeChecksum.
protected void writeChecksum(long checksum, String filePath) {
File outFile = new File(filePath);
BufferedWriter out = null;
try {
out = Files.newBufferedWriter(outFile.toPath(), Charsets.UTF_8);
out.write(String.valueOf(checksum));
out.flush();
out.close();
} catch (IOException e) {
throw new FSWriteError(e, outFile);
} finally {
FileUtils.closeQuietly(out);
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class CommitLog method add.
/**
* Add a Mutation to the commit log. If CDC is enabled, this can fail.
*
* @param mutation the Mutation to add to the log
* @throws WriteTimeoutException
*/
public CommitLogPosition add(Mutation mutation) throws WriteTimeoutException {
assert mutation != null;
try (DataOutputBuffer dob = DataOutputBuffer.scratchBuffer.get()) {
Mutation.serializer.serialize(mutation, dob, MessagingService.current_version);
int size = dob.getLength();
int totalSize = size + ENTRY_OVERHEAD_SIZE;
if (totalSize > MAX_MUTATION_SIZE) {
throw new IllegalArgumentException(String.format("Mutation of %s is too large for the maximum size of %s", FBUtilities.prettyPrintMemory(totalSize), FBUtilities.prettyPrintMemory(MAX_MUTATION_SIZE)));
}
Allocation alloc = segmentManager.allocate(mutation, totalSize);
CRC32 checksum = new CRC32();
final ByteBuffer buffer = alloc.getBuffer();
try (BufferedDataOutputStreamPlus dos = new DataOutputBufferFixed(buffer)) {
// checksummed length
dos.writeInt(size);
updateChecksumInt(checksum, size);
buffer.putInt((int) checksum.getValue());
// checksummed mutation
dos.write(dob.getData(), 0, size);
updateChecksum(checksum, buffer, buffer.position() - size, size);
buffer.putInt((int) checksum.getValue());
} catch (IOException e) {
throw new FSWriteError(e, alloc.getSegment().getPath());
} finally {
alloc.markWritten();
}
executor.finishWriteFor(alloc);
return alloc.getCommitLogPosition();
} catch (IOException e) {
throw new FSWriteError(e, segmentManager.allocatingFrom().getPath());
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class CommitLogSegment method internalClose.
/**
* Close the segment file. Do not call from outside this class, use syncAndClose() instead.
*/
protected void internalClose() {
try {
channel.close();
buffer = null;
} catch (IOException e) {
throw new FSWriteError(e, getPath());
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class CompressedSegment method write.
@Override
void write(int startMarker, int nextMarker) {
int contentStart = startMarker + SYNC_MARKER_SIZE;
int length = nextMarker - contentStart;
// The length may be 0 when the segment is being closed.
assert length > 0 || length == 0 && !isStillAllocating();
try {
int neededBufferSize = compressor.initialCompressedBufferLength(length) + COMPRESSED_MARKER_SIZE;
ByteBuffer compressedBuffer = manager.getBufferPool().getThreadLocalReusableBuffer(neededBufferSize);
ByteBuffer inputBuffer = buffer.duplicate();
inputBuffer.limit(contentStart + length).position(contentStart);
compressedBuffer.limit(compressedBuffer.capacity()).position(COMPRESSED_MARKER_SIZE);
compressor.compress(inputBuffer, compressedBuffer);
compressedBuffer.flip();
compressedBuffer.putInt(SYNC_MARKER_SIZE, length);
// Only one thread can be here at a given time.
// Protected by synchronization on CommitLogSegment.sync().
writeSyncMarker(compressedBuffer, 0, (int) channel.position(), (int) channel.position() + compressedBuffer.remaining());
manager.addSize(compressedBuffer.limit());
channel.write(compressedBuffer);
assert channel.position() - lastWrittenPos == compressedBuffer.limit();
lastWrittenPos = channel.position();
SyncUtil.force(channel, true);
} catch (Exception e) {
throw new FSWriteError(e, getPath());
}
}
Aggregations