use of org.apache.kafka.streams.processor.internals.StateDirectory in project kafka by apache.
the class StreamThreadStateStoreProviderTest method before.
@Before
public void before() throws IOException {
final TopologyBuilder builder = new TopologyBuilder();
builder.addSource("the-source", "the-source");
builder.addProcessor("the-processor", new MockProcessorSupplier(), "the-source");
builder.addStateStore(Stores.create("kv-store").withStringKeys().withStringValues().inMemory().build(), "the-processor");
builder.addStateStore(Stores.create("window-store").withStringKeys().withStringValues().persistent().windowed(10, 10, 2, false).build(), "the-processor");
final Properties properties = new Properties();
final String applicationId = "applicationId";
properties.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
stateDir = TestUtils.tempDirectory();
final String stateConfigDir = stateDir.getPath();
properties.put(StreamsConfig.STATE_DIR_CONFIG, stateConfigDir);
final StreamsConfig streamsConfig = new StreamsConfig(properties);
final MockClientSupplier clientSupplier = new MockClientSupplier();
configureRestoreConsumer(clientSupplier, "applicationId-kv-store-changelog");
configureRestoreConsumer(clientSupplier, "applicationId-window-store-changelog");
builder.setApplicationId(applicationId);
final ProcessorTopology topology = builder.build(null);
final Map<TaskId, StreamTask> tasks = new HashMap<>();
stateDirectory = new StateDirectory(applicationId, stateConfigDir, new MockTime());
taskOne = createStreamsTask(applicationId, streamsConfig, clientSupplier, topology, new TaskId(0, 0));
tasks.put(new TaskId(0, 0), taskOne);
taskTwo = createStreamsTask(applicationId, streamsConfig, clientSupplier, topology, new TaskId(0, 1));
tasks.put(new TaskId(0, 1), taskTwo);
storesAvailable = true;
thread = new StreamThread(builder, streamsConfig, clientSupplier, applicationId, "clientId", UUID.randomUUID(), new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {
@Override
public Map<TaskId, StreamTask> tasks() {
return tasks;
}
@Override
public boolean isInitialized() {
return storesAvailable;
}
};
provider = new StreamThreadStateStoreProvider(thread);
}
use of org.apache.kafka.streams.processor.internals.StateDirectory in project kafka by apache.
the class KafkaStreams method cleanUp.
/**
* Do a clean up of the local {@link StateStore} directory ({@link StreamsConfig#STATE_DIR_CONFIG}) by deleting all
* data with regard to the {@link StreamsConfig#APPLICATION_ID_CONFIG application ID}.
* <p>
* May only be called either before this {@code KafkaStreams} instance is {@link #start() started} or after the
* instance is {@link #close() closed}.
* <p>
* Calling this method triggers a restore of local {@link StateStore}s on the next {@link #start() application start}.
*
* @throws IllegalStateException if the instance is currently running
*/
public void cleanUp() {
if (state.isRunning()) {
throw new IllegalStateException("Cannot clean up while running.");
}
final String appId = config.getString(StreamsConfig.APPLICATION_ID_CONFIG);
final String stateDir = config.getString(StreamsConfig.STATE_DIR_CONFIG);
final String localApplicationDir = stateDir + File.separator + appId;
log.debug("{} Removing local Kafka Streams application data in {} for application {}.", logPrefix, localApplicationDir, appId);
final StateDirectory stateDirectory = new StateDirectory(appId, stateDir, Time.SYSTEM);
stateDirectory.cleanRemovedTasks(0);
}
Aggregations