use of java.io.IOError in project cassandra by apache.
the class ScrubTest method testScrubCorruptedCounterPartition.
@Test
public void testScrubCorruptedCounterPartition() throws IOException, WriteTimeoutException {
// When compression is enabled, for testing corrupted chunks we need enough partitions to cover
// at least 3 chunks of size COMPRESSION_CHUNK_LENGTH
int numPartitions = 1000;
CompactionManager.instance.disableAutoCompaction();
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
cfs.truncateBlocking();
fillCounterCF(cfs, numPartitions);
assertOrderedAll(cfs, numPartitions);
assertEquals(1, cfs.getLiveSSTables().size());
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
// make sure to override at most 1 chunk when compression is enabled
overrideWithGarbage(sstable, ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes("1"));
// with skipCorrupted == false, the scrub is expected to fail
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Collections.singletonList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, false, true)) {
scrubber.scrub();
fail("Expected a CorruptSSTableException to be thrown");
} catch (IOError err) {
assertTrue(err.getCause() instanceof CorruptSSTableException);
}
// with skipCorrupted == true, the corrupt rows will be skipped
Scrubber.ScrubResult scrubResult;
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Collections.singletonList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, true, true)) {
scrubResult = scrubber.scrubWithResult();
}
assertNotNull(scrubResult);
boolean compression = Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false"));
assertEquals(0, scrubResult.emptyPartitions);
if (compression) {
assertEquals(numPartitions, scrubResult.badPartitions + scrubResult.goodPartitions);
// because we only corrupted 1 chunk and we chose enough partitions to cover at least 3 chunks
assertTrue(scrubResult.goodPartitions >= scrubResult.badPartitions * 2);
} else {
assertEquals(1, scrubResult.badPartitions);
assertEquals(numPartitions - 1, scrubResult.goodPartitions);
}
assertEquals(1, cfs.getLiveSSTables().size());
assertOrderedAll(cfs, scrubResult.goodPartitions);
}
use of java.io.IOError in project cassandra by apache.
the class ScrubTest method testScrubCorruptedRowInSmallFile.
@Test
public void testScrubCorruptedRowInSmallFile() throws IOException, WriteTimeoutException {
// cannot test this with compression
assumeTrue(!Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false")));
CompactionManager.instance.disableAutoCompaction();
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
fillCounterCF(cfs, 2);
assertOrderedAll(cfs, 2);
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
// overwrite one row with garbage
overrideWithGarbage(sstable, ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes("1"));
// with skipCorrupted == false, the scrub is expected to fail
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Collections.singletonList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, false, true)) {
// with skipCorrupted == true, the corrupt row will be skipped
scrubber.scrub();
fail("Expected a CorruptSSTableException to be thrown");
} catch (IOError err) {
assertTrue(err.getCause() instanceof CorruptSSTableException);
}
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Collections.singletonList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, true, true)) {
// with skipCorrupted == true, the corrupt row will be skipped
scrubber.scrub();
}
assertEquals(1, cfs.getLiveSSTables().size());
// verify that we can read all of the rows, and there is now one less row
assertOrderedAll(cfs, 1);
}
use of java.io.IOError in project cassandra by apache.
the class ScrubToolTest method testNoSkipScrubCorruptedCounterPartitionWithTool.
@Test
public void testNoSkipScrubCorruptedCounterPartitionWithTool() throws IOException, WriteTimeoutException {
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
int numPartitions = 1000;
fillCounterCF(cfs, numPartitions);
assertOrderedAll(cfs, numPartitions);
assertEquals(1, cfs.getLiveSSTables().size());
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
overrideWithGarbage(sstable, ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes("1"));
// with skipCorrupted == false, the scrub is expected to fail
try {
ToolRunner.invokeClass(StandaloneScrubber.class, ksName, COUNTER_CF);
fail("Expected a CorruptSSTableException to be thrown");
} catch (IOError err) {
assertTrue(err.getCause() instanceof CorruptSSTableException);
}
}
use of java.io.IOError in project cassandra by apache.
the class UnfilteredRowIteratorSerializer method deserialize.
public UnfilteredRowIterator deserialize(DataInputPlus in, int version, TableMetadata metadata, DeserializationHelper.Flag flag, Header header) throws IOException {
if (header.isEmpty)
return EmptyIterators.unfilteredRow(metadata, header.key, header.isReversed);
final DeserializationHelper helper = new DeserializationHelper(metadata, version, flag);
final SerializationHeader sHeader = header.sHeader;
return new AbstractUnfilteredRowIterator(metadata, header.key, header.partitionDeletion, sHeader.columns(), header.staticRow, header.isReversed, sHeader.stats()) {
private final Row.Builder builder = BTreeRow.sortedBuilder();
protected Unfiltered computeNext() {
try {
Unfiltered unfiltered = UnfilteredSerializer.serializer.deserialize(in, sHeader, helper, builder);
return unfiltered == null ? endOfData() : unfiltered;
} catch (IOException e) {
throw new IOError(e);
}
}
};
}
use of java.io.IOError in project cassandra by apache.
the class StorageService method decommission.
public void decommission(boolean force) throws InterruptedException {
TokenMetadata metadata = tokenMetadata.cloneAfterAllLeft();
if (operationMode != Mode.LEAVING) {
if (!tokenMetadata.isMember(FBUtilities.getBroadcastAddressAndPort()))
throw new UnsupportedOperationException("local node is not a member of the token ring yet");
if (metadata.getAllEndpoints().size() < 2)
throw new UnsupportedOperationException("no other normal nodes in the ring; decommission would be pointless");
if (operationMode != Mode.NORMAL)
throw new UnsupportedOperationException("Node in " + operationMode + " state; wait for status to become normal or restart");
}
if (!isDecommissioning.compareAndSet(false, true))
throw new IllegalStateException("Node is still decommissioning. Check nodetool netstats.");
if (logger.isDebugEnabled())
logger.debug("DECOMMISSIONING");
try {
PendingRangeCalculatorService.instance.blockUntilFinished();
String dc = DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter();
if (// If we're already decommissioning there is no point checking RF/pending ranges
operationMode != Mode.LEAVING) {
int rf, numNodes;
for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces()) {
if (!force) {
Keyspace keyspace = Keyspace.open(keyspaceName);
if (keyspace.getReplicationStrategy() instanceof NetworkTopologyStrategy) {
NetworkTopologyStrategy strategy = (NetworkTopologyStrategy) keyspace.getReplicationStrategy();
rf = strategy.getReplicationFactor(dc).allReplicas;
numNodes = metadata.getTopology().getDatacenterEndpoints().get(dc).size();
} else {
numNodes = metadata.getAllEndpoints().size();
rf = keyspace.getReplicationStrategy().getReplicationFactor().allReplicas;
}
if (numNodes <= rf)
throw new UnsupportedOperationException("Not enough live nodes to maintain replication factor in keyspace " + keyspaceName + " (RF = " + rf + ", N = " + numNodes + ")." + " Perform a forceful decommission to ignore.");
}
// TODO: do we care about fixing transient/full self-movements here? probably
if (tokenMetadata.getPendingRanges(keyspaceName, FBUtilities.getBroadcastAddressAndPort()).size() > 0)
throw new UnsupportedOperationException("data is currently moving to this node; unable to leave the ring");
}
}
startLeaving();
long timeout = Math.max(RING_DELAY, BatchlogManager.instance.getBatchlogTimeout());
setMode(Mode.LEAVING, "sleeping " + timeout + " ms for batch processing and pending range setup", true);
Thread.sleep(timeout);
Runnable finishLeaving = new Runnable() {
public void run() {
shutdownClientServers();
Gossiper.instance.stop();
try {
MessagingService.instance().shutdown();
} catch (IOError ioe) {
logger.info("failed to shutdown message service: {}", ioe);
}
Stage.shutdownNow();
SystemKeyspace.setBootstrapState(SystemKeyspace.BootstrapState.DECOMMISSIONED);
setMode(Mode.DECOMMISSIONED, true);
// let op be responsible for killing the process
}
};
unbootstrap(finishLeaving);
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
} catch (ExecutionException e) {
logger.error("Error while decommissioning node ", e.getCause());
throw new RuntimeException("Error while decommissioning node: " + e.getCause().getMessage());
} finally {
isDecommissioning.set(false);
}
}
Aggregations