use of java.io.IOError in project eiger by wlloyd.
the class SerializingCache method serialize.
private FreeableMemory serialize(V value) {
long serializedSize = serializer.serializedSize(value);
if (serializedSize > Integer.MAX_VALUE)
throw new IllegalArgumentException("Unable to allocate " + serializedSize + " bytes");
FreeableMemory freeableMemory;
try {
freeableMemory = new FreeableMemory(serializedSize);
} catch (OutOfMemoryError e) {
return null;
}
try {
serializer.serialize(value, new DataOutputStream(new MemoryOutputStream(freeableMemory)));
} catch (IOException e) {
throw new IOError(e);
}
return freeableMemory;
}
use of java.io.IOError in project eiger by wlloyd.
the class DefinitionsUpdateVerbHandler method doVerb.
/** someone sent me their data definitions */
public void doVerb(final Message message, String id) {
try {
// these are the serialized row mutations that I must apply.
// check versions at every step along the way to make sure migrations are not applied out of order.
Collection<Column> cols = MigrationManager.makeColumns(message);
for (Column col : cols) {
final UUID version = UUIDGen.getUUID(col.name());
if (version.timestamp() > Schema.instance.getVersion().timestamp()) {
final Migration m = Migration.deserialize(col.value(), message.getVersion());
assert m.getVersion().equals(version);
StageManager.getStage(Stage.MIGRATION).submit(new WrappedRunnable() {
protected void runMayThrow() throws Exception {
// check to make sure the current version is before this one.
if (Schema.instance.getVersion().timestamp() == version.timestamp())
logger.debug("Not appling (equal) " + version.toString());
else if (Schema.instance.getVersion().timestamp() > version.timestamp())
logger.debug("Not applying (before)" + version.toString());
else {
logger.debug("Applying {} from {}", m.getClass().getSimpleName(), message.getFrom());
try {
m.apply();
// update gossip, but don't contact nodes directly
m.passiveAnnounce();
} catch (ConfigurationException ex) {
// Trying to apply the same migration twice. This happens as a result of gossip.
logger.debug("Migration not applied " + ex.getMessage());
}
}
}
});
}
}
} catch (IOException ex) {
throw new IOError(ex);
}
}
use of java.io.IOError in project eiger by wlloyd.
the class TruncateVerbHandler method doVerb.
public void doVerb(Message message, String id) {
byte[] bytes = message.getMessageBody();
FastByteArrayInputStream buffer = new FastByteArrayInputStream(bytes);
try {
Truncation t = Truncation.serializer().deserialize(new DataInputStream(buffer), message.getVersion());
logger.debug("Applying {}", t);
try {
ColumnFamilyStore cfs = Table.open(t.keyspace).getColumnFamilyStore(t.columnFamily);
cfs.truncate().get();
} catch (Exception e) {
logger.error("Error in truncation", e);
respondError(t, message);
}
logger.debug("Truncate operation succeeded at this host");
TruncateResponse response = new TruncateResponse(t.keyspace, t.columnFamily, true);
Message responseMessage = TruncateResponse.makeTruncateResponseMessage(message, response);
logger.debug("{} applied. Sending response to {}@{} ", new Object[] { t, id, message.getFrom() });
MessagingService.instance().sendReply(responseMessage, id, message.getFrom());
} catch (IOException e) {
throw new IOError(e);
}
}
use of java.io.IOError in project eiger by wlloyd.
the class CompactionManager method scrubOne.
private void scrubOne(ColumnFamilyStore cfs, SSTableReader sstable) throws IOException {
logger.info("Scrubbing " + sstable);
CompactionController controller = new CompactionController(cfs, Collections.singletonList(sstable), getDefaultGcBefore(cfs), true);
boolean isCommutative = cfs.metadata.getDefaultValidator().isCommutative();
// Calculate the expected compacted filesize
File compactionFileLocation = cfs.directories.getDirectoryForNewSSTables(sstable.onDiskLength());
if (compactionFileLocation == null)
throw new IOException("disk full");
int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(), (int) (SSTableReader.getApproximateKeyCount(Arrays.asList(sstable))));
// loop through each row, deserializing to check for damage.
// we'll also loop through the index at the same time, using the position from the index to recover if the
// row header (key or data size) is corrupt. (This means our position in the index file will be one row
// "ahead" of the data file.)
final RandomAccessReader dataFile = sstable.openDataReader(true);
RandomAccessReader indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)), true);
ScrubInfo scrubInfo = new ScrubInfo(dataFile, sstable);
executor.beginCompaction(scrubInfo);
SSTableWriter writer = null;
SSTableReader newSstable = null;
int goodRows = 0, badRows = 0, emptyRows = 0;
try {
ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile);
{
// throw away variable so we don't have a side effect in the assert
long firstRowPositionFromIndex = indexFile.readLong();
assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex;
}
// TODO errors when creating the writer may leave empty temp files.
writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, null, Collections.singletonList(sstable));
while (!dataFile.isEOF()) {
if (scrubInfo.isStopped())
throw new CompactionInterruptedException(scrubInfo.getCompactionInfo());
long rowStart = dataFile.getFilePointer();
if (logger.isDebugEnabled())
logger.debug("Reading row at " + rowStart);
DecoratedKey key = null;
long dataSize = -1;
try {
key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, ByteBufferUtil.readWithShortLength(dataFile));
dataSize = sstable.descriptor.hasIntRowSize ? dataFile.readInt() : dataFile.readLong();
if (logger.isDebugEnabled())
logger.debug(String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize));
} catch (Throwable th) {
throwIfFatal(th);
// check for null key below
}
ByteBuffer currentIndexKey = nextIndexKey;
long nextRowPositionFromIndex;
try {
nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile);
nextRowPositionFromIndex = indexFile.isEOF() ? dataFile.length() : indexFile.readLong();
} catch (Throwable th) {
logger.warn("Error reading index file", th);
nextIndexKey = null;
nextRowPositionFromIndex = dataFile.length();
}
long dataStart = dataFile.getFilePointer();
long dataStartFromIndex = currentIndexKey == null ? -1 : rowStart + 2 + currentIndexKey.remaining() + (sstable.descriptor.hasIntRowSize ? 4 : 8);
long dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex;
assert currentIndexKey != null || indexFile.isEOF();
if (logger.isDebugEnabled() && currentIndexKey != null)
logger.debug(String.format("Index doublecheck: row %s is %s bytes", ByteBufferUtil.bytesToHex(currentIndexKey), dataSizeFromIndex));
writer.mark();
try {
if (key == null)
throw new IOError(new IOException("Unable to read row key from data file"));
if (dataSize > dataFile.length())
throw new IOError(new IOException("Impossible row size " + dataSize));
SSTableIdentityIterator row = new SSTableIdentityIterator(sstable, dataFile, key, dataStart, dataSize, true);
AbstractCompactedRow compactedRow = controller.getCompactedRow(row);
if (compactedRow.isEmpty()) {
emptyRows++;
} else {
writer.append(compactedRow);
goodRows++;
}
if (!key.key.equals(currentIndexKey) || dataStart != dataStartFromIndex)
logger.warn("Index file contained a different key or row size; using key from data file");
} catch (Throwable th) {
throwIfFatal(th);
logger.warn("Non-fatal error reading row (stacktrace follows)", th);
writer.resetAndTruncate();
if (currentIndexKey != null && (key == null || !key.key.equals(currentIndexKey) || dataStart != dataStartFromIndex || dataSize != dataSizeFromIndex)) {
logger.info(String.format("Retrying from row index; data is %s bytes starting at %s", dataSizeFromIndex, dataStartFromIndex));
key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, currentIndexKey);
try {
SSTableIdentityIterator row = new SSTableIdentityIterator(sstable, dataFile, key, dataStartFromIndex, dataSizeFromIndex, true);
AbstractCompactedRow compactedRow = controller.getCompactedRow(row);
if (compactedRow.isEmpty()) {
emptyRows++;
} else {
writer.append(compactedRow);
goodRows++;
}
} catch (Throwable th2) {
throwIfFatal(th2);
// Skipping rows is dangerous for counters (see CASSANDRA-2759)
if (isCommutative)
throw new IOError(th2);
logger.warn("Retry failed too. Skipping to next row (retry's stacktrace follows)", th2);
writer.resetAndTruncate();
dataFile.seek(nextRowPositionFromIndex);
badRows++;
}
} else {
// Skipping rows is dangerous for counters (see CASSANDRA-2759)
if (isCommutative)
throw new IOError(th);
logger.warn("Row at " + dataStart + " is unreadable; skipping to next");
if (currentIndexKey != null)
dataFile.seek(nextRowPositionFromIndex);
badRows++;
}
}
}
if (writer.getFilePointer() > 0)
newSstable = writer.closeAndOpenReader(sstable.maxDataAge);
} catch (Exception e) {
if (writer != null)
writer.abort();
throw FBUtilities.unchecked(e);
} finally {
FileUtils.closeQuietly(dataFile);
FileUtils.closeQuietly(indexFile);
executor.finishCompaction(scrubInfo);
}
if (newSstable == null) {
cfs.markCompacted(Arrays.asList(sstable));
if (badRows > 0)
logger.warn("No valid rows found while scrubbing " + sstable + "; it is marked for deletion now. If you want to attempt manual recovery, you can find a copy in the pre-scrub snapshot");
else
logger.info("Scrub of " + sstable + " complete; looks like all " + emptyRows + " rows were tombstoned");
} else {
cfs.replaceCompactedSSTables(Arrays.asList(sstable), Arrays.asList(newSstable));
logger.info("Scrub of " + sstable + " complete: " + goodRows + " rows in new sstable and " + emptyRows + " empty (tombstoned) rows dropped");
if (badRows > 0)
logger.warn("Unable to recover " + badRows + " rows that were skipped. You can attempt manual recovery from the pre-scrub snapshot. You can also run nodetool repair to transfer the data from a healthy replica, if any");
}
}
use of java.io.IOError in project eiger by wlloyd.
the class SystemTable method removeToken.
/**
* Remove stored token being used by another node
*/
public static synchronized void removeToken(Token token) {
IPartitioner p = StorageService.getPartitioner();
RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, RING_KEY);
rm.delete(new QueryPath(STATUS_CF, null, p.getTokenFactory().toByteArray(token)), LamportClock.getVersion());
try {
rm.apply();
} catch (IOException e) {
throw new IOError(e);
}
forceBlockingFlush(STATUS_CF);
}
Aggregations