use of org.apache.cassandra.utils.WrappedRunnable in project eiger by wlloyd.
the class FileUtils method deleteAsync.
public static void deleteAsync(final String file) {
Runnable runnable = new WrappedRunnable() {
protected void runMayThrow() throws IOException {
deleteWithConfirm(new File(file));
}
};
StorageService.tasks.execute(runnable);
}
use of org.apache.cassandra.utils.WrappedRunnable in project eiger by wlloyd.
the class CommitLog method recover.
/**
* Perform recovery on a list of commit log files.
*
* @param clogs the list of commit log files to replay
* @return the number of mutations replayed
*/
public int recover(File[] clogs) throws IOException {
final Set<Table> tablesRecovered = new HashSet<Table>();
List<Future<?>> futures = new ArrayList<Future<?>>();
byte[] bytes = new byte[4096];
Map<Integer, AtomicInteger> invalidMutations = new HashMap<Integer, AtomicInteger>();
// count the number of replayed mutation. We don't really care about atomicity, but we need it to be a reference.
final AtomicInteger replayedCount = new AtomicInteger();
// compute per-CF and global replay positions
final Map<Integer, ReplayPosition> cfPositions = new HashMap<Integer, ReplayPosition>();
for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) {
// it's important to call RP.gRP per-cf, before aggregating all the positions w/ the Ordering.min call
// below: gRP will return NONE if there are no flushed sstables, which is important to have in the
// list (otherwise we'll just start replay from the first flush position that we do have, which is not correct).
ReplayPosition rp = ReplayPosition.getReplayPosition(cfs.getSSTables());
cfPositions.put(cfs.metadata.cfId, rp);
}
final ReplayPosition globalPosition = Ordering.from(ReplayPosition.comparator).min(cfPositions.values());
Checksum checksum = new CRC32();
for (final File file : clogs) {
logger.info("Replaying " + file.getPath());
final long segment = CommitLogSegment.idFromFilename(file.getName());
RandomAccessReader reader = RandomAccessReader.open(new File(file.getAbsolutePath()), true);
assert reader.length() <= Integer.MAX_VALUE;
try {
int replayPosition;
if (globalPosition.segment < segment)
replayPosition = 0;
else if (globalPosition.segment == segment)
replayPosition = globalPosition.position;
else
replayPosition = (int) reader.length();
if (replayPosition < 0 || replayPosition >= reader.length()) {
// replayPosition > reader.length() can happen if some data gets flushed before it is written to the commitlog
// (see https://issues.apache.org/jira/browse/CASSANDRA-2285)
logger.debug("skipping replay of fully-flushed {}", file);
continue;
}
reader.seek(replayPosition);
if (logger.isDebugEnabled())
logger.debug("Replaying " + file + " starting at " + reader.getFilePointer());
/* read the logs populate RowMutation and apply */
while (!reader.isEOF()) {
if (logger.isDebugEnabled())
logger.debug("Reading mutation at " + reader.getFilePointer());
long claimedCRC32;
int serializedSize;
try {
// any of the reads may hit EOF
serializedSize = reader.readInt();
if (serializedSize == CommitLog.END_OF_SEGMENT_MARKER) {
logger.debug("Encountered end of segment marker at " + reader.getFilePointer());
break;
}
// This prevents CRC by being fooled by special-case garbage in the file; see CASSANDRA-2128
if (serializedSize < 10)
break;
long claimedSizeChecksum = reader.readLong();
checksum.reset();
checksum.update(serializedSize);
if (checksum.getValue() != claimedSizeChecksum)
// entry wasn't synced correctly/fully. that's ok.
break;
if (serializedSize > bytes.length)
bytes = new byte[(int) (1.2 * serializedSize)];
reader.readFully(bytes, 0, serializedSize);
claimedCRC32 = reader.readLong();
} catch (EOFException eof) {
// last CL entry didn't get completely written. that's ok.
break;
}
checksum.update(bytes, 0, serializedSize);
if (claimedCRC32 != checksum.getValue()) {
// but just in case there is no harm in trying them (since we still read on an entry boundary)
continue;
}
/* deserialize the commit log entry */
FastByteArrayInputStream bufIn = new FastByteArrayInputStream(bytes, 0, serializedSize);
RowMutation rm = null;
try {
// assuming version here. We've gone to lengths to make sure what gets written to the CL is in
// the current version. so do make sure the CL is drained prior to upgrading a node.
rm = RowMutation.serializer().deserialize(new DataInputStream(bufIn), MessagingService.version_, IColumnSerializer.Flag.LOCAL);
} catch (UnserializableColumnFamilyException ex) {
AtomicInteger i = invalidMutations.get(ex.cfId);
if (i == null) {
i = new AtomicInteger(1);
invalidMutations.put(ex.cfId, i);
} else
i.incrementAndGet();
continue;
}
if (logger.isDebugEnabled())
logger.debug(String.format("replaying mutation for %s.%s: %s", rm.getTable(), ByteBufferUtil.bytesToHex(rm.key()), "{" + StringUtils.join(rm.getColumnFamilies().iterator(), ", ") + "}"));
final long entryLocation = reader.getFilePointer();
final RowMutation frm = rm;
Runnable runnable = new WrappedRunnable() {
public void runMayThrow() throws IOException {
if (Schema.instance.getKSMetaData(frm.getTable()) == null)
return;
final Table table = Table.open(frm.getTable());
RowMutation newRm = new RowMutation(frm.getTable(), frm.key());
// thing based on the cfid instead.
for (ColumnFamily columnFamily : frm.getColumnFamilies()) {
if (Schema.instance.getCF(columnFamily.id()) == null)
// null means the cf has been dropped
continue;
ReplayPosition rp = cfPositions.get(columnFamily.id());
// segment, if we are after the replay position
if (segment > rp.segment || (segment == rp.segment && entryLocation > rp.position)) {
newRm.add(columnFamily);
replayedCount.incrementAndGet();
}
}
if (!newRm.isEmpty()) {
Table.open(newRm.getTable()).apply(newRm, false);
tablesRecovered.add(table);
}
}
};
futures.add(StageManager.getStage(Stage.MUTATION).submit(runnable));
if (futures.size() > MAX_OUTSTANDING_REPLAY_COUNT) {
FBUtilities.waitOnFutures(futures);
futures.clear();
}
}
} finally {
FileUtils.closeQuietly(reader);
logger.info("Finished reading " + file);
}
}
for (Map.Entry<Integer, AtomicInteger> entry : invalidMutations.entrySet()) logger.info(String.format("Skipped %d mutations from unknown (probably removed) CF with id %d", entry.getValue().intValue(), entry.getKey()));
// wait for all the writes to finish on the mutation stage
FBUtilities.waitOnFutures(futures);
logger.debug("Finished waiting on mutations from recovery");
// flush replayed tables
futures.clear();
for (Table table : tablesRecovered) futures.addAll(table.flush());
FBUtilities.waitOnFutures(futures);
return replayedCount.get();
}
use of org.apache.cassandra.utils.WrappedRunnable in project eiger by wlloyd.
the class AutoSavingCache method scheduleSaving.
public void scheduleSaving(int savePeriodInSeconds, final int keysToSave) {
if (saveTask != null) {
// Do not interrupt an in-progress save
saveTask.cancel(false);
saveTask = null;
}
if (savePeriodInSeconds > 0) {
Runnable runnable = new WrappedRunnable() {
public void runMayThrow() {
submitWrite(keysToSave);
}
};
saveTask = StorageService.optionalTasks.scheduleWithFixedDelay(runnable, savePeriodInSeconds, savePeriodInSeconds, TimeUnit.SECONDS);
}
}
use of org.apache.cassandra.utils.WrappedRunnable in project eiger by wlloyd.
the class TableTest method testGetSliceWithCutoff.
@Test
public void testGetSliceWithCutoff() throws Throwable {
// tests slicing against data from one row in a memtable and then flushed to an sstable
final Table table = Table.open("Keyspace1");
final ColumnFamilyStore cfStore = table.getColumnFamilyStore("Standard1");
final DecoratedKey ROW = Util.dk("row4");
final NumberFormat fmt = new DecimalFormat("000");
RowMutation rm = new RowMutation("Keyspace1", ROW.key);
ColumnFamily cf = ColumnFamily.create("Keyspace1", "Standard1");
// so if we go to 300, we'll get at least 4 blocks, which is plenty for testing.
for (int i = 0; i < 300; i++) cf.addColumn(column("col" + fmt.format(i), "omg!thisisthevalue!" + i, 1L));
rm.add(cf);
rm.apply();
Runnable verify = new WrappedRunnable() {
public void runMayThrow() throws Exception {
ColumnFamily cf;
// blocks are partitioned like this: 000-097, 098-193, 194-289, 290-299, assuming a 4k column index size.
assert DatabaseDescriptor.getColumnIndexSize() == 4096 : "Unexpected column index size, block boundaries won't be where tests expect them.";
// test forward, spanning a segment.
cf = cfStore.getColumnFamily(ROW, new QueryPath("Standard1"), ByteBufferUtil.bytes("col096"), ByteBufferUtil.bytes("col099"), false, 4);
assertColumns(cf, "col096", "col097", "col098", "col099");
// test reversed, spanning a segment.
cf = cfStore.getColumnFamily(ROW, new QueryPath("Standard1"), ByteBufferUtil.bytes("col099"), ByteBufferUtil.bytes("col096"), true, 4);
assertColumns(cf, "col096", "col097", "col098", "col099");
// test forward, within a segment.
cf = cfStore.getColumnFamily(ROW, new QueryPath("Standard1"), ByteBufferUtil.bytes("col100"), ByteBufferUtil.bytes("col103"), false, 4);
assertColumns(cf, "col100", "col101", "col102", "col103");
// test reversed, within a segment.
cf = cfStore.getColumnFamily(ROW, new QueryPath("Standard1"), ByteBufferUtil.bytes("col103"), ByteBufferUtil.bytes("col100"), true, 4);
assertColumns(cf, "col100", "col101", "col102", "col103");
// test forward from beginning, spanning a segment.
// col000-col099
String[] strCols = new String[100];
for (int i = 0; i < 100; i++) strCols[i] = "col" + fmt.format(i);
cf = cfStore.getColumnFamily(ROW, new QueryPath("Standard1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.bytes("col099"), false, 100);
assertColumns(cf, strCols);
// test reversed, from end, spanning a segment.
cf = cfStore.getColumnFamily(ROW, new QueryPath("Standard1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.bytes("col288"), true, 12);
assertColumns(cf, "col288", "col289", "col290", "col291", "col292", "col293", "col294", "col295", "col296", "col297", "col298", "col299");
}
};
reTest(table.getColumnFamilyStore("Standard1"), verify);
}
use of org.apache.cassandra.utils.WrappedRunnable in project eiger by wlloyd.
the class TableTest method testGetRowNoColumns.
@Test
public void testGetRowNoColumns() throws Throwable {
final Table table = Table.open("Keyspace2");
final ColumnFamilyStore cfStore = table.getColumnFamilyStore("Standard3");
RowMutation rm = new RowMutation("Keyspace2", TEST_KEY.key);
ColumnFamily cf = ColumnFamily.create("Keyspace2", "Standard3");
cf.addColumn(column("col1", "val1", 1L));
rm.add(cf);
rm.apply();
Runnable verify = new WrappedRunnable() {
public void runMayThrow() throws Exception {
ColumnFamily cf;
cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(TEST_KEY, new QueryPath("Standard3"), new TreeSet<ByteBuffer>()));
assertColumns(cf);
cf = cfStore.getColumnFamily(QueryFilter.getSliceFilter(TEST_KEY, new QueryPath("Standard3"), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 0));
assertColumns(cf);
cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(TEST_KEY, new QueryPath("Standard3"), ByteBufferUtil.bytes("col99")));
assertColumns(cf);
}
};
reTest(table.getColumnFamilyStore("Standard3"), verify);
}
Aggregations