use of org.apache.cassandra.io.util.FileOutputStreamPlus in project cassandra by apache.
the class SnapshotManifestTest method testDeserializeManifest.
@Test
public void testDeserializeManifest() throws IOException {
Map<String, Object> map = new HashMap<>();
String createdAt = "2021-07-03T10:37:30Z";
String expiresAt = "2021-08-03T10:37:30Z";
map.put("created_at", createdAt);
map.put("expires_at", expiresAt);
map.put("files", Arrays.asList("db1", "db2", "db3"));
ObjectMapper mapper = new ObjectMapper();
File manifestFile = new File(tempFolder.newFile("manifest.json"));
mapper.writeValue((OutputStream) new FileOutputStreamPlus(manifestFile), map);
SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile);
assertThat(manifest.getExpiresAt()).isEqualTo(Instant.parse(expiresAt));
assertThat(manifest.getCreatedAt()).isEqualTo(Instant.parse(createdAt));
assertThat(manifest.getFiles()).contains("db1").contains("db2").contains("db3").hasSize(3);
}
use of org.apache.cassandra.io.util.FileOutputStreamPlus in project cassandra by apache.
the class CommitLogTest method testRecoveryWithIdMismatch.
@Test
public void testRecoveryWithIdMismatch() throws Exception {
CommitLogDescriptor desc = new CommitLogDescriptor(4, null, EncryptionContextGenerator.createDisabledContext());
File logFile = tmpFile(desc.version);
ByteBuffer buf = ByteBuffer.allocate(1024);
CommitLogDescriptor.writeHeader(buf, desc);
try (OutputStream lout = new FileOutputStreamPlus(logFile)) {
lout.write(buf.array(), 0, buf.position());
runExpecting(() -> {
// CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
CommitLog.instance.recover(logFile.path());
return null;
}, CommitLogReplayException.class);
}
}
use of org.apache.cassandra.io.util.FileOutputStreamPlus in project cassandra by apache.
the class CommitLogTest method tmpFile.
/**
* Create a temporary commit log file with an appropriate descriptor at the head.
*
* @return the commit log file reference and the first position after the descriptor in the file
* (so that subsequent writes happen at the correct file location).
*/
protected Pair<File, Integer> tmpFile() throws IOException {
EncryptionContext encryptionContext = DatabaseDescriptor.getEncryptionContext();
CommitLogDescriptor desc = new CommitLogDescriptor(CommitLogDescriptor.current_version, CommitLogSegment.getNextId(), DatabaseDescriptor.getCommitLogCompression(), encryptionContext);
ByteBuffer buf = ByteBuffer.allocate(1024);
CommitLogDescriptor.writeHeader(buf, desc, getAdditionalHeaders(encryptionContext));
buf.flip();
int positionAfterHeader = buf.limit() + 1;
File logFile = new File(DatabaseDescriptor.getCommitLogLocation(), desc.fileName());
try (OutputStream lout = new FileOutputStreamPlus(logFile)) {
lout.write(buf.array(), 0, buf.limit());
}
return Pair.create(logFile, positionAfterHeader);
}
use of org.apache.cassandra.io.util.FileOutputStreamPlus in project cassandra by apache.
the class CommitLogTest method zeroFirstSyncMarkerCRC.
private void zeroFirstSyncMarkerCRC(File file) throws IOException {
// Get the position of the first sync marker...
int firstSyncMarkerPosition = -1;
try (RandomAccessReader reader = RandomAccessReader.open(file)) {
CommitLogDescriptor.readHeader(reader, DatabaseDescriptor.getEncryptionContext());
firstSyncMarkerPosition = (int) reader.getFilePointer();
}
// ...buffer the file into memory...
ByteBuffer buffer = ByteBuffer.allocate((int) file.length());
try (RandomAccessReader reader = RandomAccessReader.open(file)) {
reader.readFully(buffer);
}
// ...overwrite the sync marker CRC with zero...
buffer.position(firstSyncMarkerPosition + 4);
buffer.putInt(0);
// ...and write the file back out.
try (OutputStream out = new FileOutputStreamPlus(file)) {
out.write(buffer.array());
}
}
use of org.apache.cassandra.io.util.FileOutputStreamPlus in project cassandra by apache.
the class CommitLogTest method testHeaderOnlyFileFiltering.
/**
* Since commit log segments can be allocated before they're needed, the commit log file with the highest
* id isn't neccesarily the last log that we wrote to. We should remove header only logs on recover so we
* can tolerate truncated logs
*/
@Test
public void testHeaderOnlyFileFiltering() throws Exception {
Assume.assumeTrue(!DatabaseDescriptor.getEncryptionContext().isEnabled());
File directory = new File(Files.createTempDir());
CommitLogDescriptor desc1 = new CommitLogDescriptor(CommitLogDescriptor.current_version, 1, null, DatabaseDescriptor.getEncryptionContext());
CommitLogDescriptor desc2 = new CommitLogDescriptor(CommitLogDescriptor.current_version, 2, null, DatabaseDescriptor.getEncryptionContext());
ByteBuffer buffer;
// this has a header and malformed data
File file1 = new File(directory, desc1.fileName());
buffer = ByteBuffer.allocate(1024);
CommitLogDescriptor.writeHeader(buffer, desc1);
int pos = buffer.position();
CommitLogSegment.writeSyncMarker(desc1.id, buffer, buffer.position(), buffer.position(), buffer.position() + 128);
buffer.position(pos + 8);
buffer.putInt(5);
buffer.putInt(6);
try (OutputStream lout = new FileOutputStreamPlus(file1)) {
lout.write(buffer.array());
}
// this has only a header
File file2 = new File(directory, desc2.fileName());
buffer = ByteBuffer.allocate(1024);
CommitLogDescriptor.writeHeader(buffer, desc2);
try (OutputStream lout = new FileOutputStreamPlus(file2)) {
lout.write(buffer.array());
}
// one corrupt file and one header only file should be ok
runExpecting(() -> {
CommitLog.instance.recoverFiles(file1, file2);
return null;
}, null);
// 2 corrupt files and one header only file should fail
runExpecting(() -> {
CommitLog.instance.recoverFiles(file1, file1, file2);
return null;
}, CommitLogReplayException.class);
}
Aggregations