use of co.cask.cdap.common.guice.ZKClientModule in project cdap by caskdata.
the class LocalLogAppenderResilientTest method testResilientLogging.
@Test
public void testResilientLogging() throws Exception {
Configuration hConf = new Configuration();
CConfiguration cConf = CConfiguration.create();
File datasetDir = new File(tmpFolder.newFolder(), "datasetUser");
// noinspection ResultOfMethodCallIgnored
datasetDir.mkdirs();
cConf.set(Constants.Dataset.Manager.OUTPUT_DIR, datasetDir.getAbsolutePath());
cConf.set(Constants.Service.MASTER_SERVICES_BIND_ADDRESS, "localhost");
cConf.set(Constants.Dataset.Executor.ADDRESS, "localhost");
cConf.setInt(Constants.Dataset.Executor.PORT, Networks.getRandomPort());
cConf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), new IOModule(), new ZKClientModule(), new KafkaClientModule(), new DiscoveryRuntimeModule().getInMemoryModules(), new NonCustomLocationUnitTestModule().getModule(), new DataFabricModules().getInMemoryModules(), new DataSetsModules().getStandaloneModules(), new DataSetServiceModules().getInMemoryModules(), new TransactionMetricsModule(), new ExploreClientModule(), new LoggingModules().getInMemoryModules(), new NamespaceClientRuntimeModule().getInMemoryModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), new AbstractModule() {
@Override
protected void configure() {
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
bind(OwnerAdmin.class).to(NoOpOwnerAdmin.class);
}
});
TransactionManager txManager = injector.getInstance(TransactionManager.class);
txManager.startAndWait();
DatasetOpExecutorService opExecutorService = injector.getInstance(DatasetOpExecutorService.class);
opExecutorService.startAndWait();
// Start the logging before starting the service.
LoggingContextAccessor.setLoggingContext(new FlowletLoggingContext("TRL_ACCT_1", "APP_1", "FLOW_1", "FLOWLET_1", "RUN", "INSTANCE"));
String logBaseDir = "trl-log/log_files_" + new Random(System.currentTimeMillis()).nextLong();
cConf.set(LoggingConfiguration.LOG_BASE_DIR, logBaseDir);
cConf.setInt(LoggingConfiguration.LOG_MAX_FILE_SIZE_BYTES, 20 * 1024);
final LogAppender appender = injector.getInstance(LocalLogAppender.class);
new LogAppenderInitializer(appender).initialize("TestResilientLogging");
int failureMsgCount = 3;
final CountDownLatch failureLatch = new CountDownLatch(failureMsgCount);
LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
loggerContext.getStatusManager().add(new StatusListener() {
@Override
public void addStatusEvent(Status status) {
if (status.getLevel() != Status.ERROR || status.getOrigin() != appender) {
return;
}
Throwable cause = status.getThrowable();
if (cause != null) {
Throwable rootCause = Throwables.getRootCause(cause);
if (rootCause instanceof ServiceUnavailableException) {
String serviceName = ((ServiceUnavailableException) rootCause).getServiceName();
if (Constants.Service.DATASET_MANAGER.equals(serviceName)) {
failureLatch.countDown();
}
}
}
}
});
Logger logger = LoggerFactory.getLogger("TestResilientLogging");
for (int i = 0; i < failureMsgCount; ++i) {
Exception e1 = new Exception("Test Exception1");
Exception e2 = new Exception("Test Exception2", e1);
logger.warn("Test log message " + i + " {} {}", "arg1", "arg2", e2);
}
// Wait for the three failure to append to happen
// The wait time has to be > 3 seconds because DatasetServiceClient has 1 second timeout on discovery
failureLatch.await(5, TimeUnit.SECONDS);
// Start dataset service, wait for it to be discoverable
DatasetService dsService = injector.getInstance(DatasetService.class);
dsService.startAndWait();
final CountDownLatch startLatch = new CountDownLatch(1);
DiscoveryServiceClient discoveryClient = injector.getInstance(DiscoveryServiceClient.class);
discoveryClient.discover(Constants.Service.DATASET_MANAGER).watchChanges(new ServiceDiscovered.ChangeListener() {
@Override
public void onChange(ServiceDiscovered serviceDiscovered) {
if (!Iterables.isEmpty(serviceDiscovered)) {
startLatch.countDown();
}
}
}, Threads.SAME_THREAD_EXECUTOR);
startLatch.await(5, TimeUnit.SECONDS);
// Do some more logging after the service is started.
for (int i = 5; i < 10; ++i) {
Exception e1 = new Exception("Test Exception1");
Exception e2 = new Exception("Test Exception2", e1);
logger.warn("Test log message " + i + " {} {}", "arg1", "arg2", e2);
}
appender.stop();
// Verify - we should have at least 5 events.
LoggingContext loggingContext = new FlowletLoggingContext("TRL_ACCT_1", "APP_1", "FLOW_1", "", "RUN", "INSTANCE");
FileLogReader logTail = injector.getInstance(FileLogReader.class);
LoggingTester.LogCallback logCallback1 = new LoggingTester.LogCallback();
logTail.getLogPrev(loggingContext, ReadRange.LATEST, 10, Filter.EMPTY_FILTER, logCallback1);
List<LogEvent> allEvents = logCallback1.getEvents();
Assert.assertTrue(allEvents.toString(), allEvents.size() >= 5);
// Finally - stop all services
Services.chainStop(dsService, opExecutorService, txManager);
}
use of co.cask.cdap.common.guice.ZKClientModule in project cdap by caskdata.
the class TransactionServiceTest method createTxService.
static TransactionService createTxService(String zkConnectionString, int txServicePort, Configuration hConf, final File outPath, @Nullable CConfiguration cConfig) {
final CConfiguration cConf = cConfig == null ? CConfiguration.create() : cConfig;
// tests should use the current user for HDFS
cConf.set(Constants.CFG_HDFS_USER, System.getProperty("user.name"));
cConf.set(Constants.Zookeeper.QUORUM, zkConnectionString);
cConf.set(Constants.CFG_LOCAL_DATA_DIR, outPath.getAbsolutePath());
cConf.set(TxConstants.Service.CFG_DATA_TX_BIND_PORT, Integer.toString(txServicePort));
// we want persisting for this test
cConf.setBoolean(TxConstants.Manager.CFG_DO_PERSIST, true);
cConf.setBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, false);
final Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), new NonCustomLocationUnitTestModule().getModule(), new ZKClientModule(), new DiscoveryRuntimeModule().getDistributedModules(), new TransactionMetricsModule(), new AbstractModule() {
@Override
protected void configure() {
bind(NamespaceQueryAdmin.class).to(SimpleNamespaceQueryAdmin.class);
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
}
}, new DataFabricModules().getDistributedModules(), new SystemDatasetRuntimeModule().getInMemoryModules(), new DataSetsModules().getInMemoryModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getNoOpModule());
injector.getInstance(ZKClientService.class).startAndWait();
return injector.getInstance(TransactionService.class);
}
use of co.cask.cdap.common.guice.ZKClientModule in project cdap by caskdata.
the class ZKRouteStoreTest method init.
@BeforeClass
public static void init() throws IOException {
zkServer = InMemoryZKServer.builder().setDataDir(TMP_FOLDER.newFolder()).build();
zkServer.startAndWait();
CConfiguration cConf = CConfiguration.create();
cConf.set(Constants.Zookeeper.QUORUM, zkServer.getConnectionStr());
Injector injector = Guice.createInjector(new ConfigModule(cConf), new ZKClientModule());
zkClientService = injector.getInstance(ZKClientService.class);
zkClientService.startAndWait();
}
use of co.cask.cdap.common.guice.ZKClientModule in project cdap by caskdata.
the class DistributedProgramRunnableModule method getCoreModules.
private List<Module> getCoreModules(final ProgramId programId, String txClientId) {
return new ArrayList<>(Arrays.<Module>asList(new ConfigModule(cConf, hConf), new IOModule(), new ZKClientModule(), new KafkaClientModule(), new MetricsClientRuntimeModule().getDistributedModules(), new MessagingClientModule(), new LocationRuntimeModule().getDistributedModules(), new LoggingModules().getDistributedModules(), new DiscoveryRuntimeModule().getDistributedModules(), new DataFabricModules(txClientId).getDistributedModules(), new DataSetsModules().getDistributedModules(), new ViewAdminModules().getDistributedModules(), new StreamAdminModules().getDistributedModules(), new NotificationFeedClientModule(), new AuditModule().getDistributedModules(), new NamespaceClientRuntimeModule().getDistributedModules(), new AuthorizationEnforcementModule().getDistributedModules(), new SecureStoreModules().getDistributedModules(), new AbstractModule() {
@Override
protected void configure() {
// For Binding queue stuff
bind(QueueReaderFactory.class).in(Scopes.SINGLETON);
// For binding DataSet transaction stuff
install(new DataFabricFacadeModule());
bind(ProgramStateWriter.class).to(MessagingProgramStateWriter.class);
bind(RuntimeStore.class).to(RemoteRuntimeStore.class);
// For binding StreamWriter
install(createStreamFactoryModule());
// don't need to perform any impersonation from within user programs
bind(UGIProvider.class).to(CurrentUGIProvider.class).in(Scopes.SINGLETON);
// bind PrivilegesManager to a remote implementation, so it does not need to instantiate the authorizer
bind(PrivilegesManager.class).to(RemotePrivilegesManager.class);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
// Bind ProgramId to the passed in instance programId so that we can retrieve it back later when needed.
// For example see ProgramDiscoveryExploreClient.
// Also binding to instance is fine here as the programId is guaranteed to not change throughout the
// lifecycle of this program runnable
bind(ProgramId.class).toInstance(programId);
// bind explore client to ProgramDiscoveryExploreClient which is aware of the programId
bind(ExploreClient.class).to(ProgramDiscoveryExploreClient.class).in(Scopes.SINGLETON);
// Bind the ArtifactManager implementation
install(new FactoryModuleBuilder().implement(ArtifactManager.class, RemoteArtifactManager.class).build(ArtifactManagerFactory.class));
// Bind the PluginFinder implementation
bind(PluginFinder.class).to(RemotePluginFinder.class);
}
}));
}
use of co.cask.cdap.common.guice.ZKClientModule in project cdap by caskdata.
the class TwillAppLifecycleEventHandler method initialize.
@Override
public void initialize(EventHandlerContext context) {
super.initialize(context);
this.runningPublished = new AtomicBoolean();
this.twillRunId = context.getRunId();
this.programRunId = GSON.fromJson(context.getSpecification().getConfigs().get("programRunId"), ProgramRunId.class);
// Fetch cConf and hConf from resources jar
File cConfFile = new File("resources.jar/resources/" + CDAP_CONF_FILE_NAME);
File hConfFile = new File("resources.jar/resources/" + HADOOP_CONF_FILE_NAME);
if (cConfFile.exists() && hConfFile.exists()) {
CConfiguration cConf = CConfiguration.create();
cConf.clear();
Configuration hConf = new Configuration();
hConf.clear();
try {
cConf.addResource(cConfFile.toURI().toURL());
hConf.addResource(hConfFile.toURI().toURL());
// Create the injector to inject a program state writer
Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), new ZKClientModule(), new KafkaClientModule(), new DiscoveryRuntimeModule().getDistributedModules(), new MessagingClientModule(), new AbstractModule() {
@Override
protected void configure() {
bind(ProgramStateWriter.class).to(MessagingProgramStateWriter.class);
}
});
zkClientService = injector.getInstance(ZKClientService.class);
zkClientService.startAndWait();
this.programStateWriter = injector.getInstance(ProgramStateWriter.class);
} catch (Exception e) {
throw Throwables.propagate(e);
}
} else {
LOG.warn("{} and {} were not found in the resources.jar. Not recording program states", CDAP_CONF_FILE_NAME, HADOOP_CONF_FILE_NAME);
this.programStateWriter = new NoOpProgramStateWriter();
}
}
Aggregations