use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest method shouldLogWarningWhenSettingWalOptions.
@Test
public void shouldLogWarningWhenSettingWalOptions() throws Exception {
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.class)) {
final RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter adapter = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(new DBOptions(), new ColumnFamilyOptions());
for (final Method method : RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.class.getDeclaredMethods()) {
if (walRelatedMethods.contains(method.getName())) {
method.invoke(adapter, getDBOptionsParameters(method.getParameterTypes()));
}
}
final List<String> walOptions = Arrays.asList("walDir", "walFilter", "walRecoveryMode", "walBytesPerSync", "walSizeLimitMB", "manualWalFlush", "maxTotalWalSize", "walTtlSeconds");
final Set<String> logMessages = appender.getEvents().stream().filter(e -> e.getLevel().equals("WARN")).map(LogCaptureAppender.Event::getMessage).collect(Collectors.toSet());
walOptions.forEach(option -> assertThat(logMessages, hasItem(String.format("WAL is explicitly disabled by Streams in RocksDB. Setting option '%s' will be ignored", option))));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class RocksDBTimestampedStoreTest method shouldOpenExistingStoreInRegularMode.
@Test
public void shouldOpenExistingStoreInRegularMode() throws Exception {
// prepare store
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
rocksDBStore.put(new Bytes("key".getBytes()), "timestamped".getBytes());
rocksDBStore.close();
// re-open store
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
} finally {
rocksDBStore.close();
}
// verify store
final DBOptions dbOptions = new DBOptions();
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
RocksDB db = null;
ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null;
try {
db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
noTimestampColumnFamily = columnFamilies.get(0);
withTimestampColumnFamily = columnFamilies.get(1);
assertThat(db.get(noTimestampColumnFamily, "key".getBytes()), new IsNull<>());
assertThat(db.getLongProperty(noTimestampColumnFamily, "rocksdb.estimate-num-keys"), is(0L));
assertThat(db.get(withTimestampColumnFamily, "key".getBytes()).length, is(11));
assertThat(db.getLongProperty(withTimestampColumnFamily, "rocksdb.estimate-num-keys"), is(1L));
} finally {
// Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
if (noTimestampColumnFamily != null) {
noTimestampColumnFamily.close();
}
if (withTimestampColumnFamily != null) {
withTimestampColumnFamily.close();
}
if (db != null) {
db.close();
}
dbOptions.close();
columnFamilyOptions.close();
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StateDirectoryTest method shouldNotDeleteAppDirWhenCleanUpIfNotEmpty.
@Test
public void shouldNotDeleteAppDirWhenCleanUpIfNotEmpty() throws IOException {
final TaskId taskId = new TaskId(0, 0);
final File taskDirectory = directory.getOrCreateDirectoryForTask(taskId);
final File testFile = new File(taskDirectory, "testFile");
assertThat(testFile.mkdir(), is(true));
assertThat(directory.directoryForTaskIsEmpty(taskId), is(false));
// Create a dummy file in appDir; for this, appDir will not be empty after cleanup.
final File dummyFile = new File(appDir, "dummy");
assertTrue(dummyFile.createNewFile());
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) {
// call StateDirectory#clean
directory.clean();
assertThat(appender.getMessages(), hasItem(endsWith(String.format("Failed to delete state store directory of %s for it is not empty", appDir.getAbsolutePath()))));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StateDirectoryTest method shouldCleanupObsoleteTaskDirectoriesAndDeleteTheDirectoryItself.
@Test
public void shouldCleanupObsoleteTaskDirectoriesAndDeleteTheDirectoryItself() {
final File dir = directory.getOrCreateDirectoryForTask(new TaskId(2, 0));
assertTrue(new File(dir, "store").mkdir());
assertEquals(1, directory.listAllTaskDirectories().size());
assertEquals(1, directory.listNonEmptyTaskDirectories().size());
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) {
time.sleep(5000);
directory.cleanRemovedTasks(0);
assertFalse(dir.exists());
assertEquals(0, directory.listAllTaskDirectories().size());
assertEquals(0, directory.listNonEmptyTaskDirectories().size());
assertThat(appender.getMessages(), hasItem(containsString("Deleting obsolete state directory")));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StateDirectoryTest method shouldLogStateDirCleanerMessage.
@Test
public void shouldLogStateDirCleanerMessage() {
final TaskId taskId = new TaskId(0, 0);
final File taskDirectory = directory.getOrCreateDirectoryForTask(taskId);
final File testFile = new File(taskDirectory, "testFile");
assertThat(testFile.mkdir(), is(true));
assertThat(directory.directoryForTaskIsEmpty(taskId), is(false));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) {
final long cleanupDelayMs = 0;
time.sleep(5000);
directory.cleanRemovedTasks(cleanupDelayMs);
assertThat(appender.getMessages(), hasItem(endsWith("ms has elapsed (cleanup delay is " + cleanupDelayMs + "ms).")));
}
}
Aggregations