use of co.cask.cdap.proto.id.TopicId in project cdap by caskdata.
the class HBaseTableCoprocessorTestRun method testInvalidTx.
@Test
public void testInvalidTx() throws Exception {
try (MetadataTable metadataTable = getMetadataTable();
MessageTable messageTable = getMessageTable()) {
TopicId topicId = NamespaceId.DEFAULT.topic("invalidTx");
TopicMetadata topic = new TopicMetadata(topicId, TopicMetadata.TTL_KEY, "1000000", TopicMetadata.GENERATION_KEY, Integer.toString(GENERATION));
metadataTable.createTopic(topic);
List<MessageTable.Entry> entries = new ArrayList<>();
long invalidTxWritePtr = invalidList.toRawList().get(0);
entries.add(new TestMessageEntry(topicId, GENERATION, "data", invalidTxWritePtr, (short) 0));
messageTable.store(entries.iterator());
// Fetch the entries and make sure we are able to read it
try (CloseableIterator<MessageTable.Entry> iterator = messageTable.fetch(topic, 0, Integer.MAX_VALUE, null)) {
checkEntry(iterator, invalidTxWritePtr);
}
// Fetch the entries with tx and make sure we are able to read it
Transaction tx = new Transaction(V[8], V[8], new long[0], new long[0], -1);
try (CloseableIterator<MessageTable.Entry> iterator = messageTable.fetch(topic, 0, Integer.MAX_VALUE, tx)) {
checkEntry(iterator, invalidTxWritePtr);
}
// Now run full compaction
forceFlushAndCompact(Table.MESSAGE);
// Try to fetch the entry non-transactionally and the entry should still be there
try (CloseableIterator<MessageTable.Entry> iterator = messageTable.fetch(topic, 0, Integer.MAX_VALUE, null)) {
checkEntry(iterator, invalidTxWritePtr);
}
// Fetch the entries transactionally and we should see no entries returned
try (CloseableIterator<MessageTable.Entry> iterator = messageTable.fetch(topic, 0, Integer.MAX_VALUE, tx)) {
Assert.assertFalse(iterator.hasNext());
}
metadataTable.deleteTopic(topicId);
// Sleep so that the metadata cache expires
TimeUnit.SECONDS.sleep(3 * METADATA_CACHE_EXPIRY);
forceFlushAndCompact(Table.MESSAGE);
// Test deletion of messages from a deleted topic
try (CloseableIterator<MessageTable.Entry> iterator = messageTable.fetch(topic, 0, Integer.MAX_VALUE, null)) {
Assert.assertFalse(iterator.hasNext());
}
}
}
use of co.cask.cdap.proto.id.TopicId in project cdap by caskdata.
the class MessagingMetricsCollectionServiceTest method assertMetricsFromMessaging.
private void assertMetricsFromMessaging(final Schema schema, ReflectionDatumReader recordReader, Table<String, String, Long> expected) throws InterruptedException, TopicNotFoundException, IOException {
// Consume from kafka
final Map<String, MetricValues> metrics = Maps.newHashMap();
ByteBufferInputStream is = new ByteBufferInputStream(null);
for (int i = 0; i < PARTITION_SIZE; i++) {
TopicId topicId = NamespaceId.SYSTEM.topic(TOPIC_PREFIX + i);
try (CloseableIterator<RawMessage> iterator = messagingService.prepareFetch(topicId).fetch()) {
while (iterator.hasNext()) {
RawMessage message = iterator.next();
MetricValues metricsRecord = (MetricValues) recordReader.read(new BinaryDecoder(is.reset(ByteBuffer.wrap(message.getPayload()))), schema);
StringBuilder flattenContext = new StringBuilder();
// for verifying expected results, sorting tags
Map<String, String> tags = Maps.newTreeMap();
tags.putAll(metricsRecord.getTags());
for (Map.Entry<String, String> tag : tags.entrySet()) {
flattenContext.append(tag.getKey()).append(".").append(tag.getValue()).append(".");
}
// removing trailing "."
if (flattenContext.length() > 0) {
flattenContext.deleteCharAt(flattenContext.length() - 1);
}
metrics.put(flattenContext.toString(), metricsRecord);
}
} catch (IOException e) {
LOG.info("Failed to decode message to MetricValue. Skipped. {}", e.getMessage());
}
}
Assert.assertEquals(expected.rowKeySet().size(), metrics.size());
checkReceivedMetrics(expected, metrics);
}
use of co.cask.cdap.proto.id.TopicId in project cdap by caskdata.
the class AbstractNamespaceResourceDeleter method deleteResources.
@Override
public void deleteResources(NamespaceMeta namespaceMeta) throws Exception {
final NamespaceId namespaceId = namespaceMeta.getNamespaceId();
// Delete Preferences associated with this namespace
preferencesStore.deleteProperties(namespaceId.getNamespace());
// Delete all dashboards associated with this namespace
dashboardStore.delete(namespaceId.getNamespace());
// Delete all applications
applicationLifecycleService.removeAll(namespaceId);
// Delete datasets and modules
dsFramework.deleteAllInstances(namespaceId);
dsFramework.deleteAllModules(namespaceId);
// Delete queues and streams data
queueAdmin.dropAllInNamespace(namespaceId);
// Delete all the streams in namespace
deleteStreams(namespaceId);
// Delete all meta data
store.removeAll(namespaceId);
deleteMetrics(namespaceId);
// delete all artifacts in the namespace
artifactRepository.clear(namespaceId);
// delete all messaging topics in the namespace
for (TopicId topicId : messagingService.listTopics(namespaceId)) {
messagingService.deleteTopic(topicId);
}
LOG.info("All data for namespace '{}' deleted.", namespaceId);
// namespace in the storage provider (Hive, HBase, etc), since we re-use their default namespace.
if (!NamespaceId.DEFAULT.equals(namespaceId)) {
impersonator.doAs(namespaceId, new Callable<Void>() {
@Override
public Void call() throws Exception {
// Delete namespace in storage providers
storageProviderNamespaceAdmin.delete(namespaceId);
return null;
}
});
}
}
use of co.cask.cdap.proto.id.TopicId in project cdap by caskdata.
the class AbstractContext method createRuntimeProgramContext.
/**
* Creates a new instance of {@link RuntimeProgramContext} to be
* provided to {@link RuntimeProgramContextAware} dataset.
*/
private RuntimeProgramContext createRuntimeProgramContext(final DatasetId datasetId) {
return new RuntimeProgramContext() {
@Override
public void notifyNewPartitions(Collection<? extends PartitionKey> partitionKeys) throws IOException {
String topic = cConf.get(Constants.Dataset.DATA_EVENT_TOPIC);
if (Strings.isNullOrEmpty(topic)) {
// Don't publish if there is no data event topic
return;
}
TopicId dataEventTopic = NamespaceId.SYSTEM.topic(topic);
MessagePublisher publisher = getMessagingContext().getMessagePublisher();
byte[] payload = Bytes.toBytes(GSON.toJson(Notification.forPartitions(datasetId, partitionKeys)));
int failure = 0;
long startTime = System.currentTimeMillis();
while (true) {
try {
publisher.publish(dataEventTopic.getNamespace(), dataEventTopic.getTopic(), payload);
return;
} catch (TopicNotFoundException e) {
// this shouldn't happen since the TMS creates the data event topic on startup.
throw new IOException("Unexpected exception due to missing topic '" + dataEventTopic + "'", e);
} catch (IOException e) {
long sleepTime = retryStrategy.nextRetry(++failure, startTime);
if (sleepTime < 0) {
throw e;
}
try {
TimeUnit.MILLISECONDS.sleep(sleepTime);
} catch (InterruptedException ex) {
// If interrupted during sleep, just reset the interrupt flag and return
Thread.currentThread().interrupt();
return;
}
}
}
}
@Override
public ProgramRunId getProgramRunId() {
return programRunId;
}
@Nullable
@Override
public NamespacedEntityId getComponentId() {
return AbstractContext.this.getComponentId();
}
};
}
use of co.cask.cdap.proto.id.TopicId in project cdap by caskdata.
the class CoreSchedulerServiceTest method testProgramEvents.
@Test
@Category(XSlowTests.class)
public void testProgramEvents() throws Exception {
// Deploy the app
deploy(AppWithMultipleSchedules.class);
CConfiguration cConf = getInjector().getInstance(CConfiguration.class);
TopicId programEventTopic = NamespaceId.SYSTEM.topic(cConf.get(Constants.AppFabric.PROGRAM_STATUS_RECORD_EVENT_TOPIC));
ProgramStateWriter programStateWriter = new MessagingProgramStateWriter(cConf, messagingService);
// These notifications should not trigger the program
ProgramRunId anotherWorkflowRun = ANOTHER_WORKFLOW.run(RunIds.generate());
programStateWriter.start(anotherWorkflowRun, new SimpleProgramOptions(anotherWorkflowRun.getParent()), null);
programStateWriter.running(anotherWorkflowRun, null);
long lastProcessed = TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis());
programStateWriter.error(anotherWorkflowRun, null);
waitUntilProcessed(programEventTopic, lastProcessed);
ProgramRunId someWorkflowRun = SOME_WORKFLOW.run(RunIds.generate());
programStateWriter.start(someWorkflowRun, new SimpleProgramOptions(someWorkflowRun.getParent()), null);
programStateWriter.running(someWorkflowRun, null);
lastProcessed = TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis());
programStateWriter.killed(someWorkflowRun);
waitUntilProcessed(programEventTopic, lastProcessed);
Assert.assertEquals(0, getRuns(TRIGGERED_WORKFLOW, ProgramRunStatus.ALL));
// Enable the schedule
scheduler.enableSchedule(APP_MULT_ID.schedule(AppWithMultipleSchedules.WORKFLOW_COMPLETED_SCHEDULE));
// Start a program with user arguments
startProgram(ANOTHER_WORKFLOW, ImmutableMap.of(AppWithMultipleSchedules.ANOTHER_RUNTIME_ARG_KEY, AppWithMultipleSchedules.ANOTHER_RUNTIME_ARG_VALUE), 200);
// Wait for a completed run record
waitForCompleteRuns(1, TRIGGERED_WORKFLOW);
assertProgramRuns(TRIGGERED_WORKFLOW, ProgramRunStatus.COMPLETED, 1);
RunRecord run = getProgramRuns(TRIGGERED_WORKFLOW, ProgramRunStatus.COMPLETED).get(0);
Map<String, List<WorkflowTokenDetail.NodeValueDetail>> tokenData = getWorkflowToken(TRIGGERED_WORKFLOW, run.getPid(), null, null).getTokenData();
// There should be 2 entries in tokenData
Assert.assertEquals(2, tokenData.size());
// The value of TRIGGERED_RUNTIME_ARG_KEY should be ANOTHER_RUNTIME_ARG_VALUE from the triggering workflow
Assert.assertEquals(AppWithMultipleSchedules.ANOTHER_RUNTIME_ARG_VALUE, tokenData.get(AppWithMultipleSchedules.TRIGGERED_RUNTIME_ARG_KEY).get(0).getValue());
// The value of TRIGGERED_TOKEN_KEY should be ANOTHER_TOKEN_VALUE from the triggering workflow
Assert.assertEquals(AppWithMultipleSchedules.ANOTHER_TOKEN_VALUE, tokenData.get(AppWithMultipleSchedules.TRIGGERED_TOKEN_KEY).get(0).getValue());
}
Aggregations