Search in sources :

Example 6 with FSWriteError

use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.

the class SequentialWriter method truncate.

public void truncate(long toSize) {
    try {
        fchannel.truncate(toSize);
        lastFlushOffset = toSize;
    } catch (IOException e) {
        throw new FSWriteError(e, getPath());
    }
}
Also used : FSWriteError(org.apache.cassandra.io.FSWriteError) IOException(java.io.IOException)

Example 7 with FSWriteError

use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.

the class BigTableWriter method writeMetadata.

private void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components) {
    File file = new File(desc.filenameFor(Component.STATS));
    try (SequentialWriter out = new SequentialWriter(file, writerOption)) {
        desc.getMetadataSerializer().serialize(components, out, desc.version);
        out.finish();
    } catch (IOException e) {
        throw new FSWriteError(e, file.getPath());
    }
}
Also used : FSWriteError(org.apache.cassandra.io.FSWriteError) CompressedSequentialWriter(org.apache.cassandra.io.compress.CompressedSequentialWriter)

Example 8 with FSWriteError

use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.

the class OutOfSpaceTest method flushAndExpectError.

public void flushAndExpectError() throws InterruptedException, ExecutionException {
    try {
        Keyspace.open(KEYSPACE).getColumnFamilyStore(currentTable()).forceFlush().get();
        fail("FSWriteError expected.");
    } catch (ExecutionException e) {
        // Correct path.
        Assert.assertTrue(e.getCause() instanceof FSWriteError);
    }
    // Make sure commit log wasn't discarded.
    TableId tableId = currentTableMetadata().id;
    for (CommitLogSegment segment : CommitLog.instance.segmentManager.getActiveSegments()) if (segment.getDirtyTableIds().contains(tableId))
        return;
    fail("Expected commit log to remain dirty for the affected table.");
}
Also used : TableId(org.apache.cassandra.schema.TableId) CommitLogSegment(org.apache.cassandra.db.commitlog.CommitLogSegment) FSWriteError(org.apache.cassandra.io.FSWriteError) ExecutionException(java.util.concurrent.ExecutionException)

Example 9 with FSWriteError

use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.

the class DirectoriesTest method testDiskFailurePolicy_best_effort.

@Test
public void testDiskFailurePolicy_best_effort() {
    DiskFailurePolicy origPolicy = DatabaseDescriptor.getDiskFailurePolicy();
    try {
        DatabaseDescriptor.setDiskFailurePolicy(DiskFailurePolicy.best_effort);
        // Fake a Directory creation failure
        if (Directories.dataDirectories.length > 0) {
            String[] path = new String[] { KS, "bad" };
            File dir = new File(Directories.dataDirectories[0].location, StringUtils.join(path, File.separator));
            FileUtils.handleFSError(new FSWriteError(new IOException("Unable to create directory " + dir), dir));
        }
        for (DataDirectory dd : Directories.dataDirectories) {
            File file = new File(dd.location, new File(KS, "bad").getPath());
            assertTrue(BlacklistedDirectories.isUnwritable(file));
        }
    } finally {
        DatabaseDescriptor.setDiskFailurePolicy(origPolicy);
    }
}
Also used : DiskFailurePolicy(org.apache.cassandra.config.Config.DiskFailurePolicy) FSWriteError(org.apache.cassandra.io.FSWriteError) DataDirectory(org.apache.cassandra.db.Directories.DataDirectory) IOException(java.io.IOException) File(java.io.File) Test(org.junit.Test)

Example 10 with FSWriteError

use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.

the class SSTableFlushObserverTest method testFlushObserver.

@Test
public void testFlushObserver() {
    TableMetadata cfm = TableMetadata.builder(KS_NAME, CF_NAME).addPartitionKeyColumn("id", UTF8Type.instance).addRegularColumn("first_name", UTF8Type.instance).addRegularColumn("age", Int32Type.instance).addRegularColumn("height", LongType.instance).build();
    LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.COMPACTION);
    FlushObserver observer = new FlushObserver();
    String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
    File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME);
    directory.deleteOnExit();
    if (!directory.exists() && !directory.mkdirs())
        throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath());
    SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
    BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(), directory, KS_NAME, CF_NAME, 0, sstableFormat), 10L, 0L, null, TableMetadataRef.forOfflineTools(cfm), new MetadataCollector(cfm.comparator).sstableLevel(0), new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS), Collections.singletonList(observer), transaction);
    SSTableReader reader = null;
    Multimap<ByteBuffer, Cell> expected = ArrayListMultimap.create();
    try {
        final long now = System.currentTimeMillis();
        ByteBuffer key = UTF8Type.instance.fromString("key1");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(27)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jack")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(183L))));
        writer.append(new RowIterator(cfm, key.duplicate(), Collections.singletonList(buildRow(expected.get(key)))));
        key = UTF8Type.instance.fromString("key2");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jim")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(180L))));
        writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
        key = UTF8Type.instance.fromString("key3");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("ken")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(178L))));
        writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
        reader = writer.finish(true);
    } finally {
        FileUtils.closeQuietly(writer);
    }
    Assert.assertTrue(observer.isComplete);
    Assert.assertEquals(expected.size(), observer.rows.size());
    for (Pair<ByteBuffer, Long> e : observer.rows.keySet()) {
        ByteBuffer key = e.left;
        Long indexPosition = e.right;
        try (FileDataInput index = reader.ifile.createReader(indexPosition)) {
            ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(index);
            Assert.assertEquals(0, UTF8Type.instance.compare(key, indexKey));
        } catch (IOException ex) {
            throw new FSReadError(ex, reader.getIndexFilename());
        }
        Assert.assertEquals(expected.get(key), observer.rows.get(e));
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) FSWriteError(org.apache.cassandra.io.FSWriteError) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) FileDataInput(org.apache.cassandra.io.util.FileDataInput) SerializationHeader(org.apache.cassandra.db.SerializationHeader) FSReadError(org.apache.cassandra.io.FSReadError) BigTableWriter(org.apache.cassandra.io.sstable.format.big.BigTableWriter) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) File(java.io.File) Test(org.junit.Test)

Aggregations

FSWriteError (org.apache.cassandra.io.FSWriteError)23 IOException (java.io.IOException)15 ByteBuffer (java.nio.ByteBuffer)5 File (java.io.File)3 DiskFailurePolicy (org.apache.cassandra.config.Config.DiskFailurePolicy)2 Test (org.junit.Test)2 PrintStream (java.io.PrintStream)1 MappedByteBuffer (java.nio.MappedByteBuffer)1 ExecutionException (java.util.concurrent.ExecutionException)1 CRC32 (java.util.zip.CRC32)1 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)1 DataDirectory (org.apache.cassandra.db.Directories.DataDirectory)1 SerializationHeader (org.apache.cassandra.db.SerializationHeader)1 CommitLogSegment (org.apache.cassandra.db.commitlog.CommitLogSegment)1 Allocation (org.apache.cassandra.db.commitlog.CommitLogSegment.Allocation)1 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)1 ConfigurationException (org.apache.cassandra.exceptions.ConfigurationException)1 IndexedTerm (org.apache.cassandra.index.sasi.sa.IndexedTerm)1 FSDiskFullWriteError (org.apache.cassandra.io.FSDiskFullWriteError)1 FSReadError (org.apache.cassandra.io.FSReadError)1