use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class SequentialWriter method truncate.
public void truncate(long toSize) {
try {
fchannel.truncate(toSize);
lastFlushOffset = toSize;
} catch (IOException e) {
throw new FSWriteError(e, getPath());
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class BigTableWriter method writeMetadata.
private void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components) {
File file = new File(desc.filenameFor(Component.STATS));
try (SequentialWriter out = new SequentialWriter(file, writerOption)) {
desc.getMetadataSerializer().serialize(components, out, desc.version);
out.finish();
} catch (IOException e) {
throw new FSWriteError(e, file.getPath());
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class OutOfSpaceTest method flushAndExpectError.
public void flushAndExpectError() throws InterruptedException, ExecutionException {
try {
Keyspace.open(KEYSPACE).getColumnFamilyStore(currentTable()).forceFlush().get();
fail("FSWriteError expected.");
} catch (ExecutionException e) {
// Correct path.
Assert.assertTrue(e.getCause() instanceof FSWriteError);
}
// Make sure commit log wasn't discarded.
TableId tableId = currentTableMetadata().id;
for (CommitLogSegment segment : CommitLog.instance.segmentManager.getActiveSegments()) if (segment.getDirtyTableIds().contains(tableId))
return;
fail("Expected commit log to remain dirty for the affected table.");
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class DirectoriesTest method testDiskFailurePolicy_best_effort.
@Test
public void testDiskFailurePolicy_best_effort() {
DiskFailurePolicy origPolicy = DatabaseDescriptor.getDiskFailurePolicy();
try {
DatabaseDescriptor.setDiskFailurePolicy(DiskFailurePolicy.best_effort);
// Fake a Directory creation failure
if (Directories.dataDirectories.length > 0) {
String[] path = new String[] { KS, "bad" };
File dir = new File(Directories.dataDirectories[0].location, StringUtils.join(path, File.separator));
FileUtils.handleFSError(new FSWriteError(new IOException("Unable to create directory " + dir), dir));
}
for (DataDirectory dd : Directories.dataDirectories) {
File file = new File(dd.location, new File(KS, "bad").getPath());
assertTrue(BlacklistedDirectories.isUnwritable(file));
}
} finally {
DatabaseDescriptor.setDiskFailurePolicy(origPolicy);
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class SSTableFlushObserverTest method testFlushObserver.
@Test
public void testFlushObserver() {
TableMetadata cfm = TableMetadata.builder(KS_NAME, CF_NAME).addPartitionKeyColumn("id", UTF8Type.instance).addRegularColumn("first_name", UTF8Type.instance).addRegularColumn("age", Int32Type.instance).addRegularColumn("height", LongType.instance).build();
LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.COMPACTION);
FlushObserver observer = new FlushObserver();
String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME);
directory.deleteOnExit();
if (!directory.exists() && !directory.mkdirs())
throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath());
SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(), directory, KS_NAME, CF_NAME, 0, sstableFormat), 10L, 0L, null, TableMetadataRef.forOfflineTools(cfm), new MetadataCollector(cfm.comparator).sstableLevel(0), new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS), Collections.singletonList(observer), transaction);
SSTableReader reader = null;
Multimap<ByteBuffer, Cell> expected = ArrayListMultimap.create();
try {
final long now = System.currentTimeMillis();
ByteBuffer key = UTF8Type.instance.fromString("key1");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(27)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jack")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(183L))));
writer.append(new RowIterator(cfm, key.duplicate(), Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key2");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jim")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(180L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key3");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("ken")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(178L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
reader = writer.finish(true);
} finally {
FileUtils.closeQuietly(writer);
}
Assert.assertTrue(observer.isComplete);
Assert.assertEquals(expected.size(), observer.rows.size());
for (Pair<ByteBuffer, Long> e : observer.rows.keySet()) {
ByteBuffer key = e.left;
Long indexPosition = e.right;
try (FileDataInput index = reader.ifile.createReader(indexPosition)) {
ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(index);
Assert.assertEquals(0, UTF8Type.instance.compare(key, indexKey));
} catch (IOException ex) {
throw new FSReadError(ex, reader.getIndexFilename());
}
Assert.assertEquals(expected.get(key), observer.rows.get(e));
}
}
Aggregations