use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class OutOfSpaceTest method flushAndExpectError.
public void flushAndExpectError() throws InterruptedException, ExecutionException {
try {
Keyspace.open(KEYSPACE).getColumnFamilyStore(currentTable()).forceFlush().get();
fail("FSWriteError expected.");
} catch (ExecutionException e) {
// Correct path.
Assert.assertTrue(e.getCause() instanceof FSWriteError);
}
// Make sure commit log wasn't discarded.
TableId tableId = currentTableMetadata().id;
for (CommitLogSegment segment : CommitLog.instance.segmentManager.getActiveSegments()) if (segment.getDirtyTableIds().contains(tableId))
return;
fail("Expected commit log to remain dirty for the affected table.");
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class DescribeStatementTest method testDescribeTableWithInternals.
@Test
public void testDescribeTableWithInternals() throws Throwable {
String table = createTable("CREATE TABLE %s (pk1 text, pk2 int, c int, s decimal static, v1 text, v2 int, v3 int, PRIMARY KEY ((pk1, pk2), c ))");
TableId id = Schema.instance.getTableMetadata(KEYSPACE, table).id;
String tableCreateStatement = "CREATE TABLE " + KEYSPACE + "." + table + " (\n" + " pk1 text,\n" + " pk2 int,\n" + " c int,\n" + " s decimal static,\n" + " v1 text,\n" + " v2 int,\n" + " v3 int,\n" + " PRIMARY KEY ((pk1, pk2), c)\n" + ") WITH ID = " + id + "\n" + " AND CLUSTERING ORDER BY (c ASC)\n" + " AND " + tableParametersCql();
assertRowsNet(executeDescribeNet("DESCRIBE TABLE " + KEYSPACE + "." + table + " WITH INTERNALS"), row(KEYSPACE, "table", table, tableCreateStatement));
String dropStatement = "ALTER TABLE " + KEYSPACE + "." + table + " DROP v3 USING TIMESTAMP 1589286942065000;";
execute(dropStatement);
assertRowsNet(executeDescribeNet("DESCRIBE TABLE " + KEYSPACE + "." + table + " WITH INTERNALS"), row(KEYSPACE, "table", table, tableCreateStatement + "\n" + dropStatement));
String addStatement = "ALTER TABLE " + KEYSPACE + "." + table + " ADD v3 int;";
execute(addStatement);
assertRowsNet(executeDescribeNet("DESCRIBE TABLE " + KEYSPACE + "." + table + " WITH INTERNALS"), row(KEYSPACE, "table", table, tableCreateStatement + "\n" + dropStatement + "\n" + addStatement));
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class DiskSpaceMetricsTest method indexDownsampleCancelLastSSTable.
private static void indexDownsampleCancelLastSSTable(ColumnFamilyStore cfs) {
List<SSTableReader> sstables = Lists.newArrayList(cfs.getSSTables(SSTableSet.CANONICAL));
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
Map<TableId, LifecycleTransaction> txns = ImmutableMap.of(cfs.metadata.id, txn);
// fail on the last file (* 3 because we call isStopRequested 3 times for each sstable, and we should fail on the last)
AtomicInteger countdown = new AtomicInteger(3 * sstables.size() - 1);
IndexSummaryRedistribution redistribution = new IndexSummaryRedistribution(txns, 0, 0) {
public boolean isStopRequested() {
return countdown.decrementAndGet() == 0;
}
};
try {
IndexSummaryManager.redistributeSummaries(redistribution);
Assert.fail("Should throw CompactionInterruptedException");
} catch (CompactionInterruptedException e) {
// trying to get this to happen
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
FBUtilities.closeAll(txns.values());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class SessionInfoTest method testTotals.
/**
* Test if total numbers are collect
*/
@Test
public void testTotals() {
TableId tableId = TableId.generate();
InetAddressAndPort local = FBUtilities.getLocalAddressAndPort();
Collection<StreamSummary> summaries = new ArrayList<>();
for (int i = 0; i < 10; i++) {
StreamSummary summary = new StreamSummary(tableId, i, (i + 1) * 10);
summaries.add(summary);
}
StreamSummary sending = new StreamSummary(tableId, 10, 100);
SessionInfo info = new SessionInfo(local, 0, local, summaries, Collections.singleton(sending), StreamSession.State.PREPARING);
assert info.getTotalFilesToReceive() == 45;
assert info.getTotalFilesToSend() == 10;
assert info.getTotalSizeToReceive() == 550;
assert info.getTotalSizeToSend() == 100;
// still, no files received or sent
assert info.getTotalFilesReceived() == 0;
assert info.getTotalFilesSent() == 0;
// receive in progress
info.updateProgress(new ProgressInfo(local, 0, "test.txt", ProgressInfo.Direction.IN, 50, 100));
// still in progress, but not completed yet
assert info.getTotalSizeReceived() == 50;
assert info.getTotalSizeSent() == 0;
assert info.getTotalFilesReceived() == 0;
assert info.getTotalFilesSent() == 0;
info.updateProgress(new ProgressInfo(local, 0, "test.txt", ProgressInfo.Direction.IN, 100, 100));
// 1 file should be completed
assert info.getTotalSizeReceived() == 100;
assert info.getTotalSizeSent() == 0;
assert info.getTotalFilesReceived() == 1;
assert info.getTotalFilesSent() == 0;
}
use of org.apache.cassandra.schema.TableId in project cassandra by apache.
the class ActiveRepairService method prepareForRepair.
public UUID prepareForRepair(UUID parentRepairSession, InetAddress coordinator, Set<InetAddress> endpoints, RepairOption options, List<ColumnFamilyStore> columnFamilyStores) {
// we only want repairedAt for incremental repairs, for non incremental repairs, UNREPAIRED_SSTABLE will preserve repairedAt on streamed sstables
long repairedAt = options.isIncremental() ? Clock.instance.currentTimeMillis() : ActiveRepairService.UNREPAIRED_SSTABLE;
registerParentRepairSession(parentRepairSession, coordinator, columnFamilyStores, options.getRanges(), options.isIncremental(), repairedAt, options.isGlobal());
final CountDownLatch prepareLatch = new CountDownLatch(endpoints.size());
final AtomicBoolean status = new AtomicBoolean(true);
final Set<String> failedNodes = Collections.synchronizedSet(new HashSet<String>());
IAsyncCallbackWithFailure callback = new IAsyncCallbackWithFailure() {
public void response(MessageIn msg) {
prepareLatch.countDown();
}
public boolean isLatencyForSnitch() {
return false;
}
public void onFailure(InetAddress from, RequestFailureReason failureReason) {
status.set(false);
failedNodes.add(from.getHostAddress());
prepareLatch.countDown();
}
};
List<TableId> tableIds = new ArrayList<>(columnFamilyStores.size());
for (ColumnFamilyStore cfs : columnFamilyStores) tableIds.add(cfs.metadata.id);
for (InetAddress neighbour : endpoints) {
if (FailureDetector.instance.isAlive(neighbour)) {
PrepareMessage message = new PrepareMessage(parentRepairSession, tableIds, options.getRanges(), options.isIncremental(), repairedAt, options.isGlobal());
MessageOut<RepairMessage> msg = message.createMessage();
MessagingService.instance().sendRR(msg, neighbour, callback, DatabaseDescriptor.getRpcTimeout(), true);
} else {
status.set(false);
failedNodes.add(neighbour.getHostAddress());
prepareLatch.countDown();
}
}
try {
prepareLatch.await(DatabaseDescriptor.getRpcTimeout(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
removeParentRepairSession(parentRepairSession);
throw new RuntimeException("Did not get replies from all endpoints. List of failed endpoint(s): " + failedNodes, e);
}
if (!status.get()) {
removeParentRepairSession(parentRepairSession);
throw new RuntimeException("Did not get positive replies from all endpoints. List of failed endpoint(s): " + failedNodes);
}
return parentRepairSession;
}
Aggregations