use of co.cask.cdap.common.NotFoundException in project cdap by caskdata.
the class HBaseQueueDebugger method scanQueue.
/**
* Only works for {@link co.cask.cdap.data2.transaction.queue.hbase.ShardedHBaseQueueStrategy}.
*/
public QueueStatistics scanQueue(final QueueName queueName, @Nullable Long consumerGroupId) throws Exception {
HBaseConsumerStateStore stateStore;
try {
stateStore = queueAdmin.getConsumerStateStore(queueName);
} catch (IllegalStateException e) {
throw new NotFoundException(queueName);
}
TransactionExecutor txExecutor = Transactions.createTransactionExecutor(txExecutorFactory, stateStore);
Multimap<Long, QueueBarrier> barriers = txExecutor.execute(new TransactionExecutor.Function<HBaseConsumerStateStore, Multimap<Long, QueueBarrier>>() {
@Override
public Multimap<Long, QueueBarrier> apply(HBaseConsumerStateStore input) throws Exception {
return input.getAllBarriers();
}
}, stateStore);
printProgress("Got %d barriers\n", barriers.size());
QueueStatistics stats = new QueueStatistics();
if (consumerGroupId != null) {
barriers = Multimaps.filterKeys(barriers, Predicates.equalTo(consumerGroupId));
}
for (Map.Entry<Long, Collection<QueueBarrier>> entry : barriers.asMap().entrySet()) {
long groupId = entry.getKey();
Collection<QueueBarrier> groupBarriers = entry.getValue();
printProgress("Scanning barriers for group %d\n", groupId);
int currentSection = 1;
PeekingIterator<QueueBarrier> barrierIterator = Iterators.peekingIterator(groupBarriers.iterator());
while (barrierIterator.hasNext()) {
QueueBarrier start = barrierIterator.next();
QueueBarrier end = barrierIterator.hasNext() ? barrierIterator.peek() : null;
printProgress("Scanning section %d/%d...\n", currentSection, groupBarriers.size());
scanQueue(txExecutor, stateStore, queueName, start, end, stats);
printProgress("Current results: %s\n", stats.getReport(showTxTimestampOnly()));
currentSection++;
}
printProgress("Scanning complete");
}
System.out.printf("Results for queue %s: %s\n", queueName.toString(), stats.getReport(showTxTimestampOnly()));
return stats;
}
use of co.cask.cdap.common.NotFoundException in project cdap by caskdata.
the class LogHandler method getRunRecordMeta.
private RunRecordMeta getRunRecordMeta(String namespace, String app, ProgramType programType, String programName, String run) throws NotFoundException {
ProgramRunId programRunId = new ProgramRunId(namespace, app, programType, programName, run);
RunRecordMeta runRecord = programStore.getRun(programRunId.getParent(), programRunId.getRun());
if (runRecord == null) {
throw new NotFoundException(programRunId);
}
return runRecord;
}
use of co.cask.cdap.common.NotFoundException in project cdap by caskdata.
the class KafkaOffsetResolver method getStartOffset.
/**
* Check whether the message fetched with the offset {@code checkpoint.getNextOffset() - 1} contains the
* same timestamp as in the given checkpoint. If they match, directly return {@code checkpoint.getNextOffset()}.
* If they don't, search for the smallest offset of the message with the same log event time
* as {@code checkpoint.getNextEventTime()}
*
* @param checkpoint A {@link Checkpoint} containing the next offset of a message and its log event timestamp.
* {@link Checkpoint#getNextOffset()}, {@link Checkpoint#getNextEventTime()}
* and {@link Checkpoint#getMaxEventTime()} all must return a non-negative long
* @param partition the partition in the topic for searching matching offset
* @return the next offset of the message with smallest offset and log event time equal to
* {@code checkpoint.getNextEventTime()}.
* {@code -1} if no such offset can be found or {@code checkpoint.getNextOffset()} is negative.
*
* @throws LeaderNotAvailableException if there is no Kafka broker to talk to.
* @throws OffsetOutOfRangeException if the given offset is out of range.
* @throws NotLeaderForPartitionException if the broker that the consumer is talking to is not the leader
* for the given topic and partition.
* @throws UnknownTopicOrPartitionException if the topic or partition is not known by the Kafka server
* @throws UnknownServerException if the Kafka server responded with error.
*/
long getStartOffset(final Checkpoint checkpoint, final int partition) {
// This should never happen
Preconditions.checkArgument(checkpoint.getNextOffset() > 0, "Invalid checkpoint offset");
// Get BrokerInfo for constructing SimpleConsumer
String topic = config.getTopic();
BrokerInfo brokerInfo = brokerService.getLeader(topic, partition);
if (brokerInfo == null) {
throw new LeaderNotAvailableException(String.format("BrokerInfo from BrokerService is null for topic %s partition %d. Will retry in next run.", topic, partition));
}
SimpleConsumer consumer = new SimpleConsumer(brokerInfo.getHost(), brokerInfo.getPort(), SO_TIMEOUT_MILLIS, BUFFER_SIZE, "offset-finder-" + topic + "-" + partition);
// Check whether the message fetched with the offset in the given checkpoint has the timestamp from
// checkpoint.getNextOffset() - 1 to get the offset corresponding to the timestamp in checkpoint
long offset = checkpoint.getNextOffset() - 1;
try {
long timestamp = getEventTimeByOffset(consumer, partition, offset);
if (timestamp == checkpoint.getNextEventTime()) {
return checkpoint.getNextOffset();
}
// This can happen in replicated cluster
LOG.debug("Event timestamp in {}:{} at offset {} is {}. It doesn't match with checkpoint timestamp {}", topic, partition, offset, timestamp, checkpoint.getNextEventTime());
} catch (NotFoundException | OffsetOutOfRangeException e) {
// This means we can't find the timestamp. This can happen in replicated cluster
LOG.debug("Cannot get valid log event in {}:{} at offset {}", topic, partition, offset);
}
// Find offset that has an event that matches the timestamp
long nextOffset = findStartOffset(consumer, partition, checkpoint.getNextEventTime());
LOG.debug("Found new nextOffset {} for topic {} partition {} with existing checkpoint {}.", nextOffset, topic, partition, checkpoint);
return nextOffset;
}
use of co.cask.cdap.common.NotFoundException in project cdap by caskdata.
the class DefaultNamespaceAdminTest method testNamespaces.
@Test
public void testNamespaces() throws Exception {
String namespace = "namespace";
NamespaceId namespaceId = new NamespaceId(namespace);
NamespaceMeta.Builder builder = new NamespaceMeta.Builder();
int initialCount = namespaceAdmin.list().size();
// TEST_NAMESPACE_META1 is already created in AppFabricTestBase#beforeClass
Assert.assertTrue(namespaceAdmin.exists(new NamespaceId(TEST_NAMESPACE1)));
// It should be present in cache too
Assert.assertNotNull(getFromCache(new NamespaceId(TEST_NAMESPACE1)));
try {
namespaceAdmin.create(TEST_NAMESPACE_META1);
Assert.fail("Should not create duplicate namespace.");
} catch (NamespaceAlreadyExistsException e) {
Assert.assertEquals(TEST_NAMESPACE_META1.getNamespaceId(), e.getId());
}
// "random" namespace should not exist
try {
namespaceAdmin.get(new NamespaceId("random"));
Assert.fail("Namespace 'random' should not exist.");
} catch (NamespaceNotFoundException e) {
Assert.assertEquals(new NamespaceId("random"), e.getId());
}
try {
namespaceAdmin.create(null);
Assert.fail("Namespace with null metadata should fail.");
} catch (IllegalArgumentException e) {
Assert.assertEquals("Namespace metadata should not be null.", e.getMessage());
}
Assert.assertEquals(initialCount, namespaceAdmin.list().size());
Assert.assertFalse(namespaceAdmin.exists(new NamespaceId(namespace)));
try {
namespaceAdmin.create(builder.build());
Assert.fail("Namespace with no name should fail");
} catch (IllegalArgumentException e) {
Assert.assertEquals("Namespace id cannot be null.", e.getMessage());
}
Assert.assertEquals(initialCount, namespaceAdmin.list().size());
Assert.assertFalse(namespaceAdmin.exists(namespaceId));
// namespace with default fields
namespaceAdmin.create(builder.setName(namespace).build());
Assert.assertEquals(initialCount + 1, namespaceAdmin.list().size());
Assert.assertTrue(namespaceAdmin.exists(namespaceId));
// it should be loaded in cache too since exists calls get
Assert.assertNotNull(getFromCache(namespaceId));
try {
NamespaceMeta namespaceMeta = namespaceAdmin.get(namespaceId);
Assert.assertEquals(namespaceId.getNamespace(), namespaceMeta.getName());
Assert.assertEquals("", namespaceMeta.getDescription());
namespaceAdmin.delete(namespaceId);
// it should be deleted from the cache too
Assert.assertNull(getFromCache(namespaceId));
} catch (NotFoundException e) {
Assert.fail(String.format("Namespace '%s' should be found since it was just created.", namespaceId.getNamespace()));
}
namespaceAdmin.create(builder.setDescription("describes " + namespace).build());
Assert.assertEquals(initialCount + 1, namespaceAdmin.list().size());
Assert.assertTrue(namespaceAdmin.exists(namespaceId));
try {
NamespaceMeta namespaceMeta = namespaceAdmin.get(namespaceId);
// it should be loaded in cache too
Assert.assertNotNull(getFromCache(namespaceId));
Assert.assertEquals(namespaceId.getNamespace(), namespaceMeta.getName());
Assert.assertEquals("describes " + namespaceId.getNamespace(), namespaceMeta.getDescription());
namespaceAdmin.delete(namespaceId);
// it should be deleted from the cache
Assert.assertNull(getFromCache(namespaceId));
} catch (NotFoundException e) {
Assert.fail(String.format("Namespace '%s' should be found since it was just created.", namespaceId.getNamespace()));
}
// Verify NotFoundException's contents as well, instead of just checking namespaceService.exists = false
verifyNotFound(namespaceId);
}
use of co.cask.cdap.common.NotFoundException in project cdap by caskdata.
the class WorkflowClientTestRun method testWorkflowClient.
@Test
public void testWorkflowClient() throws Exception {
String keyValueTableType = "co.cask.cdap.api.dataset.lib.KeyValueTable";
String filesetType = "co.cask.cdap.api.dataset.lib.FileSet";
String outputPath = new File(TMP_FOLDER.newFolder(), "output").getAbsolutePath();
Map<String, String> runtimeArgs = ImmutableMap.of("inputPath", createInput("input"), "outputPath", outputPath, "dataset.*.keep.local", "true");
final WorkflowId workflowId = NamespaceId.DEFAULT.app(AppWithWorkflow.NAME).workflow(AppWithWorkflow.SampleWorkflow.NAME);
programClient.start(workflowId, false, runtimeArgs);
programClient.waitForStatus(workflowId, ProgramStatus.STOPPED, 60, TimeUnit.SECONDS);
Tasks.waitFor(1, new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return programClient.getProgramRuns(workflowId, ProgramRunStatus.COMPLETED.name(), 0, Long.MAX_VALUE, 10).size();
}
}, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
List<RunRecord> workflowRuns = programClient.getProgramRuns(workflowId, ProgramRunStatus.COMPLETED.name(), 0, Long.MAX_VALUE, 10);
Assert.assertEquals(1, workflowRuns.size());
String runId = workflowRuns.get(0).getPid();
ProgramRunId workflowRunId = workflowId.run(runId);
// Invalid test scenarios
try {
ProgramId nonExistentWorkflowId = new ProgramId(NamespaceId.DEFAULT.getNamespace(), AppWithWorkflow.NAME, ProgramType.WORKFLOW, "NonExistentWorkflow");
ProgramRunId nonExistentWorkflowRun = nonExistentWorkflowId.run(runId);
workflowClient.getWorkflowToken(nonExistentWorkflowRun);
Assert.fail("Should not find a workflow token for a non-existing workflow");
} catch (NotFoundException expected) {
// expected
}
try {
ProgramRunId invalidRunId = workflowId.run(RunIds.generate().getId());
workflowClient.getWorkflowToken(invalidRunId);
Assert.fail("Should not find a workflow token for a random run id");
} catch (NotFoundException expected) {
// expected
}
// Valid test scenarios
WorkflowTokenDetail workflowToken = workflowClient.getWorkflowToken(workflowRunId);
Assert.assertEquals(5, workflowToken.getTokenData().size());
workflowToken = workflowClient.getWorkflowToken(workflowRunId, WorkflowToken.Scope.SYSTEM);
Assert.assertTrue(workflowToken.getTokenData().size() > 0);
workflowToken = workflowClient.getWorkflowToken(workflowRunId, "start_time");
Map<String, List<WorkflowTokenDetail.NodeValueDetail>> tokenData = workflowToken.getTokenData();
Assert.assertEquals(AppWithWorkflow.WordCountMapReduce.NAME, tokenData.get("start_time").get(0).getNode());
Assert.assertTrue(Long.parseLong(tokenData.get("start_time").get(0).getValue()) < System.currentTimeMillis());
workflowToken = workflowClient.getWorkflowToken(workflowRunId, WorkflowToken.Scope.USER, "action_type");
tokenData = workflowToken.getTokenData();
Assert.assertEquals(AppWithWorkflow.WordCountMapReduce.NAME, tokenData.get("action_type").get(0).getNode());
Assert.assertEquals("MapReduce", tokenData.get("action_type").get(0).getValue());
String nodeName = AppWithWorkflow.SampleWorkflow.FIRST_ACTION;
WorkflowTokenNodeDetail workflowTokenAtNode = workflowClient.getWorkflowTokenAtNode(workflowRunId, nodeName);
Assert.assertEquals(AppWithWorkflow.DummyAction.TOKEN_VALUE, workflowTokenAtNode.getTokenDataAtNode().get(AppWithWorkflow.DummyAction.TOKEN_KEY));
workflowTokenAtNode = workflowClient.getWorkflowTokenAtNode(workflowRunId, nodeName, WorkflowToken.Scope.SYSTEM);
Assert.assertEquals(0, workflowTokenAtNode.getTokenDataAtNode().size());
workflowTokenAtNode = workflowClient.getWorkflowTokenAtNode(workflowRunId, nodeName, AppWithWorkflow.DummyAction.TOKEN_KEY);
Assert.assertEquals(AppWithWorkflow.DummyAction.TOKEN_VALUE, workflowTokenAtNode.getTokenDataAtNode().get(AppWithWorkflow.DummyAction.TOKEN_KEY));
String reduceOutputRecordsCounter = "org.apache.hadoop.mapreduce.TaskCounter.REDUCE_OUTPUT_RECORDS";
workflowTokenAtNode = workflowClient.getWorkflowTokenAtNode(workflowRunId, AppWithWorkflow.WordCountMapReduce.NAME, WorkflowToken.Scope.SYSTEM, reduceOutputRecordsCounter);
Assert.assertEquals(6, Integer.parseInt(workflowTokenAtNode.getTokenDataAtNode().get(reduceOutputRecordsCounter)));
Map<String, DatasetSpecificationSummary> localDatasetSummaries = workflowClient.getWorkflowLocalDatasets(workflowRunId);
Assert.assertEquals(2, localDatasetSummaries.size());
DatasetSpecificationSummary keyValueTableSummary = new DatasetSpecificationSummary("MyTable." + runId, keyValueTableType, ImmutableMap.of("foo", "bar"));
Assert.assertEquals(keyValueTableSummary, localDatasetSummaries.get("MyTable"));
DatasetSpecificationSummary filesetSummary = new DatasetSpecificationSummary("MyFile." + runId, filesetType, ImmutableMap.of("anotherFoo", "anotherBar"));
Assert.assertEquals(filesetSummary, localDatasetSummaries.get("MyFile"));
workflowClient.deleteWorkflowLocalDatasets(workflowRunId);
localDatasetSummaries = workflowClient.getWorkflowLocalDatasets(workflowRunId);
Assert.assertEquals(0, localDatasetSummaries.size());
Map<String, WorkflowNodeStateDetail> nodeStates = workflowClient.getWorkflowNodeStates(workflowRunId);
Assert.assertEquals(3, nodeStates.size());
WorkflowNodeStateDetail nodeState = nodeStates.get(AppWithWorkflow.SampleWorkflow.FIRST_ACTION);
Assert.assertTrue(AppWithWorkflow.SampleWorkflow.FIRST_ACTION.equals(nodeState.getNodeId()));
Assert.assertTrue(NodeStatus.COMPLETED == nodeState.getNodeStatus());
nodeState = nodeStates.get(AppWithWorkflow.SampleWorkflow.SECOND_ACTION);
Assert.assertTrue(AppWithWorkflow.SampleWorkflow.SECOND_ACTION.equals(nodeState.getNodeId()));
Assert.assertTrue(NodeStatus.COMPLETED == nodeState.getNodeStatus());
nodeState = nodeStates.get(AppWithWorkflow.SampleWorkflow.WORD_COUNT_MR);
Assert.assertTrue(AppWithWorkflow.SampleWorkflow.WORD_COUNT_MR.equals(nodeState.getNodeId()));
Assert.assertTrue(NodeStatus.COMPLETED == nodeState.getNodeStatus());
}
Aggregations