use of co.cask.cdap.proto.id.DatasetId in project cdap by caskdata.
the class HBaseQueueAdmin method upgradeQueues.
private Map<TableId, Future<?>> upgradeQueues(final NamespaceMeta namespaceMeta, ExecutorService executor) throws Exception {
try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
String hbaseNamespace = tableUtil.getHBaseNamespace(namespaceMeta);
List<TableId> tableIds = tableUtil.listTablesInNamespace(admin, hbaseNamespace);
List<TableId> stateStoreTableIds = Lists.newArrayList();
Map<TableId, Future<?>> futures = new HashMap<>();
for (final TableId tableId : tableIds) {
// It's important to skip config table enabled.
if (isDataTable(tableId)) {
Runnable runnable = new Runnable() {
public void run() {
try {
LOG.info("Upgrading queue table: {}", tableId);
Properties properties = new Properties();
HTableDescriptor desc = tableUtil.getHTableDescriptor(admin, tableId);
if (desc.getValue(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES) == null) {
// It's the old queue table. Set the property prefix bytes to SALT_BYTES
properties.setProperty(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES, Integer.toString(SaltedHBaseQueueStrategy.SALT_BYTES));
}
upgrade(tableId, properties);
LOG.info("Upgraded queue table: {}", tableId);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
Future<?> future = executor.submit(runnable);
futures.put(tableId, future);
} else if (isStateStoreTable(tableId)) {
stateStoreTableIds.add(tableId);
}
}
// Upgrade of state store table
for (final TableId tableId : stateStoreTableIds) {
Runnable runnable = new Runnable() {
public void run() {
try {
LOG.info("Upgrading queue state store: {}", tableId);
DatasetId stateStoreId = createStateStoreDataset(namespaceMeta.getName());
DatasetAdmin datasetAdmin = datasetFramework.getAdmin(stateStoreId, null);
if (datasetAdmin == null) {
LOG.error("No dataset admin available for {}", stateStoreId);
return;
}
datasetAdmin.upgrade();
LOG.info("Upgraded queue state store: {}", tableId);
} catch (Exception e) {
new RuntimeException(e);
}
}
};
Future<?> future = executor.submit(runnable);
futures.put(tableId, future);
}
return futures;
}
}
use of co.cask.cdap.proto.id.DatasetId in project cdap by caskdata.
the class HBaseQueueAdmin method createStateStoreDataset.
private DatasetId createStateStoreDataset(String namespace) throws IOException {
try {
DatasetId stateStoreId = getStateStoreId(namespace);
DatasetProperties configProperties = TableProperties.builder().setColumnFamily(QueueEntryRow.COLUMN_FAMILY).build();
DatasetsUtil.createIfNotExists(datasetFramework, stateStoreId, HBaseQueueDatasetModule.STATE_STORE_TYPE_NAME, configProperties);
return stateStoreId;
} catch (DatasetManagementException e) {
throw new IOException(e);
}
}
use of co.cask.cdap.proto.id.DatasetId in project cdap by caskdata.
the class LevelDBDatasetMetricsReporter method report.
private void report(Map<TableId, LevelDBTableService.TableStats> datasetStat) throws DatasetManagementException {
for (Map.Entry<TableId, LevelDBTableService.TableStats> statEntry : datasetStat.entrySet()) {
String namespace = statEntry.getKey().getNamespace();
// emit metrics for only user datasets, tables in system namespace are ignored
if (NamespaceId.SYSTEM.getNamespace().equals(namespace)) {
continue;
}
String tableName = statEntry.getKey().getTableName();
Collection<DatasetSpecificationSummary> instances = dsFramework.getInstances(new NamespaceId(namespace));
for (DatasetSpecificationSummary spec : instances) {
DatasetSpecification specification = dsFramework.getDatasetSpec(new DatasetId(namespace, spec.getName()));
if (specification.isParent(tableName)) {
MetricsContext collector = metricsService.getContext(ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, namespace, Constants.Metrics.Tag.DATASET, spec.getName()));
int sizeInMb = (int) (statEntry.getValue().getDiskSizeBytes() / BYTES_IN_MB);
collector.gauge("dataset.size.mb", sizeInMb);
break;
}
}
}
}
use of co.cask.cdap.proto.id.DatasetId in project cdap by caskdata.
the class UsageDataset method unregister.
/**
* Unregisters all usage information of an application.
* @param applicationId application
*/
public void unregister(ApplicationId applicationId) {
ProgramId programId = ProgramKeyMaker.getProgramId(applicationId);
// Delete datasets associated with applicationId
for (DatasetId datasetInstanceId : getDatasets(applicationId)) {
deleteAll(orderedPairs.get(DATASET, PROGRAM).makeKey(datasetInstanceId, programId));
}
// Delete streams associated with applicationId
for (StreamId streamId : getStreams(applicationId)) {
deleteAll(orderedPairs.get(STREAM, PROGRAM).makeKey(streamId, programId));
}
// Delete all mappings for applicationId
deleteAll(orderedPairs.get(PROGRAM, DATASET).makeScanKey(programId));
deleteAll(orderedPairs.get(PROGRAM, STREAM).makeScanKey(programId));
}
use of co.cask.cdap.proto.id.DatasetId in project cdap by caskdata.
the class ExploreDisabledTest method testDeployNotRecordScannable.
@Test
public void testDeployNotRecordScannable() throws Exception {
// Try to deploy a dataset that is not record scannable, when explore is enabled.
// This should be processed with no exceptionbeing thrown
DatasetModuleId module2 = namespaceId.datasetModule("module2");
DatasetId instance2 = namespaceId.dataset("table1");
datasetFramework.addModule(module2, new NotRecordScannableTableDefinition.NotRecordScannableTableModule());
// Performing admin operations to create dataset instance
datasetFramework.addInstance("NotRecordScannableTableDef", instance2, DatasetProperties.EMPTY);
Transaction tx1 = transactionManager.startShort(100);
// Accessing dataset instance to perform data operations
NotRecordScannableTableDefinition.KeyValueTable table = datasetFramework.getDataset(instance2, DatasetDefinition.NO_ARGUMENTS, null);
Assert.assertNotNull(table);
table.startTx(tx1);
table.write("key1", "value1");
table.write("key2", "value2");
byte[] value = table.read("key1");
Assert.assertEquals("value1", Bytes.toString(value));
Assert.assertTrue(table.commitTx());
transactionManager.canCommit(tx1, table.getTxChanges());
transactionManager.commit(tx1);
table.postTxCommit();
Transaction tx2 = transactionManager.startShort(100);
table.startTx(tx2);
value = table.read("key1");
Assert.assertNotNull(value);
Assert.assertEquals("value1", Bytes.toString(value));
datasetFramework.deleteInstance(instance2);
datasetFramework.deleteModule(module2);
}
Aggregations