use of co.cask.cdap.common.ServiceUnavailableException in project cdap by caskdata.
the class MetricsDataMigrator method getOrCreateMetricsTable.
private MetricsTable getOrCreateMetricsTable(String tableName, DatasetProperties empty) throws DataMigrationException {
MetricsTable table = null;
// for default namespace, we have to provide the complete table name.
tableName = "system." + tableName;
// metrics tables are in the system namespace
DatasetId metricsDatasetInstanceId = NamespaceId.DEFAULT.dataset(tableName);
try {
table = DatasetsUtil.getOrCreateDataset(dsFramework, metricsDatasetInstanceId, MetricsTable.class.getName(), empty, null);
} catch (DatasetManagementException | ServiceUnavailableException e) {
String msg = String.format("Cannot access or create table %s.", tableName) + " " + e.getMessage();
LOG.warn(msg);
throw new DataMigrationException(msg);
} catch (IOException e) {
String msg = String.format("Exception while creating table %s", tableName);
LOG.error(msg, e);
throw new DataMigrationException(msg);
}
return table;
}
use of co.cask.cdap.common.ServiceUnavailableException in project cdap by caskdata.
the class LocalLogAppenderResilientTest method testResilientLogging.
@Test
public void testResilientLogging() throws Exception {
Configuration hConf = new Configuration();
CConfiguration cConf = CConfiguration.create();
File datasetDir = new File(tmpFolder.newFolder(), "datasetUser");
//noinspection ResultOfMethodCallIgnored
datasetDir.mkdirs();
cConf.set(Constants.Dataset.Manager.OUTPUT_DIR, datasetDir.getAbsolutePath());
cConf.set(Constants.Service.MASTER_SERVICES_BIND_ADDRESS, "localhost");
cConf.set(Constants.Dataset.Executor.ADDRESS, "localhost");
cConf.setInt(Constants.Dataset.Executor.PORT, Networks.getRandomPort());
cConf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), new IOModule(), new ZKClientModule(), new KafkaClientModule(), new DiscoveryRuntimeModule().getInMemoryModules(), new NonCustomLocationUnitTestModule().getModule(), new DataFabricModules().getInMemoryModules(), new DataSetsModules().getStandaloneModules(), new DataSetServiceModules().getInMemoryModules(), new TransactionMetricsModule(), new ExploreClientModule(), new LoggingModules().getInMemoryModules(), new NamespaceClientRuntimeModule().getInMemoryModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), new AbstractModule() {
@Override
protected void configure() {
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
bind(OwnerAdmin.class).to(NoOpOwnerAdmin.class);
}
});
TransactionManager txManager = injector.getInstance(TransactionManager.class);
txManager.startAndWait();
DatasetOpExecutorService opExecutorService = injector.getInstance(DatasetOpExecutorService.class);
opExecutorService.startAndWait();
// Start the logging before starting the service.
LoggingContextAccessor.setLoggingContext(new FlowletLoggingContext("TRL_ACCT_1", "APP_1", "FLOW_1", "FLOWLET_1", "RUN", "INSTANCE"));
String logBaseDir = "trl-log/log_files_" + new Random(System.currentTimeMillis()).nextLong();
cConf.set(LoggingConfiguration.LOG_BASE_DIR, logBaseDir);
cConf.setInt(LoggingConfiguration.LOG_MAX_FILE_SIZE_BYTES, 20 * 1024);
final LogAppender appender = injector.getInstance(LocalLogAppender.class);
new LogAppenderInitializer(appender).initialize("TestResilientLogging");
int failureMsgCount = 3;
final CountDownLatch failureLatch = new CountDownLatch(failureMsgCount);
LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
loggerContext.getStatusManager().add(new StatusListener() {
@Override
public void addStatusEvent(Status status) {
if (status.getLevel() != Status.ERROR || status.getOrigin() != appender) {
return;
}
Throwable cause = status.getThrowable();
if (cause != null) {
Throwable rootCause = Throwables.getRootCause(cause);
if (rootCause instanceof ServiceUnavailableException) {
String serviceName = ((ServiceUnavailableException) rootCause).getServiceName();
if (Constants.Service.DATASET_MANAGER.equals(serviceName)) {
failureLatch.countDown();
}
}
}
}
});
Logger logger = LoggerFactory.getLogger("TestResilientLogging");
for (int i = 0; i < failureMsgCount; ++i) {
Exception e1 = new Exception("Test Exception1");
Exception e2 = new Exception("Test Exception2", e1);
logger.warn("Test log message " + i + " {} {}", "arg1", "arg2", e2);
}
// Wait for the three failure to append to happen
// The wait time has to be > 3 seconds because DatasetServiceClient has 1 second timeout on discovery
failureLatch.await(5, TimeUnit.SECONDS);
// Start dataset service, wait for it to be discoverable
DatasetService dsService = injector.getInstance(DatasetService.class);
dsService.startAndWait();
final CountDownLatch startLatch = new CountDownLatch(1);
DiscoveryServiceClient discoveryClient = injector.getInstance(DiscoveryServiceClient.class);
discoveryClient.discover(Constants.Service.DATASET_MANAGER).watchChanges(new ServiceDiscovered.ChangeListener() {
@Override
public void onChange(ServiceDiscovered serviceDiscovered) {
if (!Iterables.isEmpty(serviceDiscovered)) {
startLatch.countDown();
}
}
}, Threads.SAME_THREAD_EXECUTOR);
startLatch.await(5, TimeUnit.SECONDS);
// Do some more logging after the service is started.
for (int i = 5; i < 10; ++i) {
Exception e1 = new Exception("Test Exception1");
Exception e2 = new Exception("Test Exception2", e1);
logger.warn("Test log message " + i + " {} {}", "arg1", "arg2", e2);
}
appender.stop();
// Verify - we should have at least 5 events.
LoggingContext loggingContext = new FlowletLoggingContext("TRL_ACCT_1", "APP_1", "FLOW_1", "", "RUN", "INSTANCE");
FileLogReader logTail = injector.getInstance(FileLogReader.class);
LoggingTester.LogCallback logCallback1 = new LoggingTester.LogCallback();
logTail.getLogPrev(loggingContext, ReadRange.LATEST, 10, Filter.EMPTY_FILTER, logCallback1);
List<LogEvent> allEvents = logCallback1.getEvents();
Assert.assertTrue(allEvents.toString(), allEvents.size() >= 5);
// Finally - stop all services
Services.chainStop(dsService, opExecutorService, txManager);
}
use of co.cask.cdap.common.ServiceUnavailableException in project cdap by caskdata.
the class MonitorHandler method resetServiceLogLevels.
/**
* Reset the log levels of the service.
* All loggers will be reset to the level when the service started.
*/
@Path("system/services/{service-name}/resetloglevels")
@POST
public void resetServiceLogLevels(HttpRequest request, HttpResponder responder, @PathParam("service-name") String serviceName) throws Exception {
if (!serviceManagementMap.containsKey(serviceName)) {
throw new NotFoundException(String.format("Invalid service name %s", serviceName));
}
MasterServiceManager masterServiceManager = serviceManagementMap.get(serviceName);
if (!masterServiceManager.isServiceEnabled()) {
throw new ForbiddenException(String.format("Failed to reset log levels for service %s " + "because the service is not enabled", serviceName));
}
try {
Set<String> loggerNames = parseBody(request, SET_STRING_TYPE);
masterServiceManager.resetServiceLogLevels(loggerNames == null ? Collections.<String>emptySet() : loggerNames);
responder.sendStatus(HttpResponseStatus.OK);
} catch (IllegalStateException ise) {
throw new ServiceUnavailableException(String.format("Failed to reset log levels for service %s " + "because the service may not be ready yet", serviceName));
} catch (JsonSyntaxException e) {
throw new BadRequestException("Invalid Json in the body");
}
}
use of co.cask.cdap.common.ServiceUnavailableException in project cdap by caskdata.
the class SingleThreadDatasetCache method getDataset.
@Override
public <T extends Dataset> T getDataset(DatasetCacheKey key, boolean bypass) throws DatasetInstantiationException {
Dataset dataset;
try {
if (bypass) {
dataset = datasetLoader.load(key);
} else {
try {
dataset = datasetCache.get(key);
} catch (ExecutionException | UncheckedExecutionException e) {
throw e.getCause();
}
}
} catch (DatasetInstantiationException | ServiceUnavailableException e) {
throw e;
} catch (Throwable t) {
throw new DatasetInstantiationException(String.format("Could not instantiate dataset '%s:%s'", key.getNamespace(), key.getName()), t);
}
// make sure the dataset exists and is of the right type
if (dataset == null) {
throw new DatasetInstantiationException(String.format("Dataset '%s' does not exist", key.getName()));
}
T typedDataset;
try {
@SuppressWarnings("unchecked") T t = (T) dataset;
typedDataset = t;
} catch (Throwable t) {
// must be ClassCastException
throw new DatasetInstantiationException(String.format("Could not cast dataset '%s' to requested type. Actual type is %s.", key.getName(), dataset.getClass().getName()), t);
}
// any transaction aware that is not in the active tx-awares is added to the current tx context (if there is one).
if (!bypass && dataset instanceof TransactionAware) {
TransactionAware txAware = (TransactionAware) dataset;
TransactionAware existing = activeTxAwares.get(key);
if (existing == null) {
activeTxAwares.put(key, txAware);
if (txContext != null) {
txContext.addTransactionAware(txAware);
}
} else if (existing != dataset) {
// this better be the same dataset, otherwise the cache did not work
throw new IllegalStateException(String.format("Unexpected state: Cache returned %s for %s, which is different from the " + "active transaction aware %s for the same key. This should never happen.", dataset, key, existing));
}
}
return typedDataset;
}
use of co.cask.cdap.common.ServiceUnavailableException in project cdap by caskdata.
the class LineageWriterDatasetFramework method getDataset.
@Nullable
@Override
public <T extends Dataset> T getDataset(final DatasetId datasetInstanceId, final Map<String, String> arguments, @Nullable final ClassLoader classLoader, final DatasetClassLoaderProvider classLoaderProvider, @Nullable final Iterable<? extends EntityId> owners, final AccessType accessType) throws DatasetManagementException, IOException {
Principal principal = authenticationContext.getPrincipal();
try {
// For system, skip authorization and lineage (user program shouldn't allow to access system dataset CDAP-6649)
// For non-system dataset, always perform authorization and lineage.
AuthorizationEnforcer enforcer;
DefaultDatasetRuntimeContext.DatasetAccessRecorder accessRecorder;
if (!DatasetsUtil.isUserDataset(datasetInstanceId)) {
enforcer = SYSTEM_NAMESPACE_ENFORCER;
accessRecorder = SYSTEM_NAMESPACE_ACCESS_RECORDER;
} else {
enforcer = authorizationEnforcer;
accessRecorder = new BasicDatasetAccessRecorder(datasetInstanceId, accessType, owners);
}
return DefaultDatasetRuntimeContext.execute(enforcer, accessRecorder, principal, datasetInstanceId, getConstructorDefaultAnnotation(accessType), new Callable<T>() {
@Override
public T call() throws Exception {
return LineageWriterDatasetFramework.super.getDataset(datasetInstanceId, arguments, classLoader, classLoaderProvider, owners, accessType);
}
});
} catch (IOException | DatasetManagementException | ServiceUnavailableException e) {
throw e;
} catch (Exception e) {
throw new DatasetManagementException("Failed to create dataset instance: " + datasetInstanceId, e);
}
}
Aggregations