use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class CommitLogTest method testDeleteIfNotDirty.
@Test
public void testDeleteIfNotDirty() throws Exception {
Keyspace ks = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs1 = ks.getColumnFamilyStore(STANDARD1);
ColumnFamilyStore cfs2 = ks.getColumnFamilyStore(STANDARD2);
// Roughly 32 MB mutation
Mutation rm = new RowUpdateBuilder(cfs1.metadata(), 0, "k").clustering("bytes").add("val", ByteBuffer.allocate((DatabaseDescriptor.getCommitLogSegmentSize() / 4) - 1)).build();
// Adding it twice (won't change segment)
CommitLog.instance.add(rm);
CommitLog.instance.add(rm);
assertEquals(1, CommitLog.instance.segmentManager.getActiveSegments().size());
// "Flush": this won't delete anything
TableId id1 = rm.getTableIds().iterator().next();
CommitLog.instance.sync();
CommitLog.instance.discardCompletedSegments(id1, CommitLogPosition.NONE, CommitLog.instance.getCurrentPosition());
assertEquals(1, CommitLog.instance.segmentManager.getActiveSegments().size());
// Adding new mutation on another CF, large enough (including CL entry overhead) that a new segment is created
Mutation rm2 = new RowUpdateBuilder(cfs2.metadata(), 0, "k").clustering("bytes").add("val", ByteBuffer.allocate(DatabaseDescriptor.getMaxMutationSize() - 200)).build();
CommitLog.instance.add(rm2);
// also forces a new segment, since each entry-with-overhead is just under half the CL size
CommitLog.instance.add(rm2);
CommitLog.instance.add(rm2);
Collection<CommitLogSegment> segments = CommitLog.instance.segmentManager.getActiveSegments();
assertEquals(String.format("Expected 3 segments but got %d (%s)", segments.size(), getDirtyCFIds(segments)), 3, segments.size());
// "Flush" second cf: The first segment should be deleted since we
// didn't write anything on cf1 since last flush (and we flush cf2)
TableId id2 = rm2.getTableIds().iterator().next();
CommitLog.instance.discardCompletedSegments(id2, CommitLogPosition.NONE, CommitLog.instance.getCurrentPosition());
segments = CommitLog.instance.segmentManager.getActiveSegments();
// Assert we still have both our segment
assertEquals(String.format("Expected 1 segment but got %d (%s)", segments.size(), getDirtyCFIds(segments)), 1, segments.size());
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class CommitLogUpgradeTest method testRestore.
public void testRestore(String location) throws IOException, InterruptedException {
Properties prop = new Properties();
prop.load(new FileInputStream(new File(location + File.separatorChar + PROPERTIES_FILE)));
int hash = Integer.parseInt(prop.getProperty(HASH_PROPERTY));
int cells = Integer.parseInt(prop.getProperty(CELLS_PROPERTY));
String cfidString = prop.getProperty(CFID_PROPERTY);
if (cfidString != null) {
TableId tableId = TableId.fromString(cfidString);
if (Schema.instance.getTableMetadata(tableId) == null)
Schema.instance.load(KeyspaceMetadata.create(KEYSPACE, KeyspaceParams.simple(1), Tables.of(metadata.unbuild().id(tableId).build())));
}
Hasher hasher = new Hasher();
CommitLogTestReplayer replayer = new CommitLogTestReplayer(hasher);
File[] files = new File(location).listFiles((file, name) -> name.endsWith(".log"));
replayer.replayFiles(files);
Assert.assertEquals(cells, hasher.cells);
Assert.assertEquals(hash, hasher.hash);
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class SessionInfoTest method testTotals.
/**
* Test if total numbers are collect
*/
@Test
public void testTotals() {
TableId tableId = TableId.generate();
InetAddress local = FBUtilities.getLocalAddress();
Collection<StreamSummary> summaries = new ArrayList<>();
for (int i = 0; i < 10; i++) {
StreamSummary summary = new StreamSummary(tableId, i, (i + 1) * 10);
summaries.add(summary);
}
StreamSummary sending = new StreamSummary(tableId, 10, 100);
SessionInfo info = new SessionInfo(local, 0, local, summaries, Collections.singleton(sending), StreamSession.State.PREPARING);
assert info.getTotalFilesToReceive() == 45;
assert info.getTotalFilesToSend() == 10;
assert info.getTotalSizeToReceive() == 550;
assert info.getTotalSizeToSend() == 100;
// still, no files received or sent
assert info.getTotalFilesReceived() == 0;
assert info.getTotalFilesSent() == 0;
// receive in progress
info.updateProgress(new ProgressInfo(local, 0, "test.txt", ProgressInfo.Direction.IN, 50, 100));
// still in progress, but not completed yet
assert info.getTotalSizeReceived() == 50;
assert info.getTotalSizeSent() == 0;
assert info.getTotalFilesReceived() == 0;
assert info.getTotalFilesSent() == 0;
info.updateProgress(new ProgressInfo(local, 0, "test.txt", ProgressInfo.Direction.IN, 100, 100));
// 1 file should be completed
assert info.getTotalSizeReceived() == 100;
assert info.getTotalSizeSent() == 0;
assert info.getTotalFilesReceived() == 1;
assert info.getTotalFilesSent() == 0;
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class AutoSavingCache method loadSaved.
public int loadSaved() {
int count = 0;
long start = System.nanoTime();
// modern format, allows both key and value (so key cache load can be purely sequential)
File dataPath = getCacheDataPath(CURRENT_VERSION);
File crcPath = getCacheCrcPath(CURRENT_VERSION);
if (dataPath.exists() && crcPath.exists()) {
DataInputStreamPlus in = null;
try {
logger.info("reading saved cache {}", dataPath);
in = new DataInputStreamPlus(new LengthAvailableInputStream(new BufferedInputStream(streamFactory.getInputStream(dataPath, crcPath)), dataPath.length()));
//Check the schema has not changed since CFs are looked up by name which is ambiguous
UUID schemaVersion = new UUID(in.readLong(), in.readLong());
if (!schemaVersion.equals(Schema.instance.getVersion()))
throw new RuntimeException("Cache schema version " + schemaVersion + " does not match current schema version " + Schema.instance.getVersion());
ArrayDeque<Future<Pair<K, V>>> futures = new ArrayDeque<Future<Pair<K, V>>>();
while (in.available() > 0) {
//tableId and indexName are serialized by the serializers in CacheService
//That is delegated there because there are serializer specific conditions
//where a cache key is skipped and not written
TableId tableId = TableId.deserialize(in);
String indexName = in.readUTF();
if (indexName.isEmpty())
indexName = null;
ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(tableId);
if (indexName != null && cfs != null)
cfs = cfs.indexManager.getIndexByName(indexName).getBackingTable().orElse(null);
Future<Pair<K, V>> entryFuture = cacheLoader.deserialize(in, cfs);
// Key cache entry can return null, if the SSTable doesn't exist.
if (entryFuture == null)
continue;
futures.offer(entryFuture);
count++;
/*
* Kind of unwise to accrue an unbounded number of pending futures
* So now there is this loop to keep a bounded number pending.
*/
do {
while (futures.peek() != null && futures.peek().isDone()) {
Future<Pair<K, V>> future = futures.poll();
Pair<K, V> entry = future.get();
if (entry != null && entry.right != null)
put(entry.left, entry.right);
}
if (futures.size() > 1000)
Thread.yield();
} while (futures.size() > 1000);
}
Future<Pair<K, V>> future = null;
while ((future = futures.poll()) != null) {
Pair<K, V> entry = future.get();
if (entry != null && entry.right != null)
put(entry.left, entry.right);
}
} catch (CorruptFileException e) {
JVMStabilityInspector.inspectThrowable(e);
logger.warn(String.format("Non-fatal checksum error reading saved cache %s", dataPath.getAbsolutePath()), e);
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
logger.info(String.format("Harmless error reading saved cache %s", dataPath.getAbsolutePath()), t);
} finally {
FileUtils.closeQuietly(in);
}
}
if (logger.isTraceEnabled())
logger.trace("completed reading ({} ms; {} keys) saved cache {}", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start), count, dataPath);
return count;
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class CommitLogReplayer method blockForWrites.
/**
* Flushes all keyspaces associated with this replayer in parallel, blocking until their flushes are complete.
* @return the number of mutations replayed
*/
public int blockForWrites() {
for (Map.Entry<TableId, AtomicInteger> entry : commitLogReader.getInvalidMutations()) logger.warn("Skipped {} mutations from unknown (probably removed) CF with id {}", entry.getValue(), entry.getKey());
// wait for all the writes to finish on the mutation stage
FBUtilities.waitOnFutures(futures);
logger.trace("Finished waiting on mutations from recovery");
// flush replayed keyspaces
futures.clear();
boolean flushingSystem = false;
List<Future<?>> futures = new ArrayList<Future<?>>();
for (Keyspace keyspace : keyspacesReplayed) {
if (keyspace.getName().equals(SchemaConstants.SYSTEM_KEYSPACE_NAME))
flushingSystem = true;
futures.addAll(keyspace.flush());
}
// also flush batchlog incase of any MV updates
if (!flushingSystem)
futures.add(Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceFlush());
FBUtilities.waitOnFutures(futures);
return replayedCount.get();
}
Aggregations