use of org.apache.log4j.Layout in project symmetric-ds by JumpMind.
the class SnapshotUtil method findSymmetricLogFile.
@SuppressWarnings("unchecked")
public static Map<File, Layout> findSymmetricLogFile() {
Enumeration<Appender> appenders = org.apache.log4j.Logger.getRootLogger().getAllAppenders();
while (appenders.hasMoreElements()) {
Appender appender = appenders.nextElement();
if (appender instanceof FileAppender) {
FileAppender fileAppender = (FileAppender) appender;
if (fileAppender != null) {
File file = new File(fileAppender.getFile());
if (file != null && file.exists()) {
Map<File, Layout> matches = new HashMap<File, Layout>();
matches.put(file, fileAppender.getLayout());
return matches;
}
}
}
}
return null;
}
use of org.apache.log4j.Layout in project symmetric-ds by JumpMind.
the class SnapshotUtil method createSnapshot.
public static File createSnapshot(ISymmetricEngine engine) {
String dirName = engine.getEngineName().replaceAll(" ", "-") + "-" + new SimpleDateFormat("yyyyMMddHHmmss").format(new Date());
IParameterService parameterService = engine.getParameterService();
File tmpDir = new File(parameterService.getTempDirectory(), dirName);
tmpDir.mkdirs();
File logDir = null;
String parameterizedLogDir = parameterService.getString("server.log.dir");
if (isNotBlank(parameterizedLogDir)) {
logDir = new File(parameterizedLogDir);
}
if (logDir != null && logDir.exists()) {
log.info("Using server.log.dir setting as the location of the log files");
} else {
logDir = new File("logs");
if (!logDir.exists()) {
Map<File, Layout> matches = findSymmetricLogFile();
if (matches != null && matches.size() == 1) {
logDir = matches.keySet().iterator().next().getParentFile();
}
}
if (!logDir.exists()) {
logDir = new File("../logs");
}
if (!logDir.exists()) {
logDir = new File("target");
}
if (logDir.exists()) {
File[] files = logDir.listFiles();
if (files != null) {
for (File file : files) {
if (file.getName().toLowerCase().endsWith(".log")) {
try {
FileUtils.copyFileToDirectory(file, tmpDir);
} catch (IOException e) {
log.warn("Failed to copy " + file.getName() + " to the snapshot directory", e);
}
}
}
}
}
}
FileWriter fwriter = null;
try {
fwriter = new FileWriter(new File(tmpDir, "config-export.csv"));
engine.getDataExtractorService().extractConfigurationStandalone(engine.getNodeService().findIdentity(), fwriter, TableConstants.SYM_NODE, TableConstants.SYM_NODE_SECURITY, TableConstants.SYM_NODE_IDENTITY, TableConstants.SYM_NODE_HOST, TableConstants.SYM_NODE_CHANNEL_CTL, TableConstants.SYM_CONSOLE_USER, TableConstants.SYM_MONITOR_EVENT, TableConstants.SYM_CONSOLE_EVENT);
} catch (Exception e) {
log.warn("Failed to export symmetric configuration", e);
} finally {
IOUtils.closeQuietly(fwriter);
}
File serviceConfFile = new File("conf/sym_service.conf");
try {
if (serviceConfFile.exists()) {
FileUtils.copyFileToDirectory(serviceConfFile, tmpDir);
}
} catch (Exception e) {
log.warn("Failed to copy " + serviceConfFile.getName() + " to the snapshot directory", e);
}
TreeSet<Table> tables = new TreeSet<Table>();
FileOutputStream fos = null;
try {
ITriggerRouterService triggerRouterService = engine.getTriggerRouterService();
List<TriggerHistory> triggerHistories = triggerRouterService.getActiveTriggerHistories();
for (TriggerHistory triggerHistory : triggerHistories) {
Table table = engine.getDatabasePlatform().getTableFromCache(triggerHistory.getSourceCatalogName(), triggerHistory.getSourceSchemaName(), triggerHistory.getSourceTableName(), false);
if (table != null && !table.getName().toUpperCase().startsWith(engine.getSymmetricDialect().getTablePrefix().toUpperCase())) {
tables.add(table);
}
}
List<Trigger> triggers = triggerRouterService.getTriggers();
for (Trigger trigger : triggers) {
Table table = engine.getDatabasePlatform().getTableFromCache(trigger.getSourceCatalogName(), trigger.getSourceSchemaName(), trigger.getSourceTableName(), false);
if (table != null) {
tables.add(table);
}
}
fos = new FileOutputStream(new File(tmpDir, "table-definitions.xml"));
DbExport export = new DbExport(engine.getDatabasePlatform());
export.setFormat(Format.XML);
export.setNoData(true);
export.exportTables(fos, tables.toArray(new Table[tables.size()]));
} catch (Exception e) {
log.warn("Failed to export table definitions", e);
} finally {
IOUtils.closeQuietly(fos);
}
String tablePrefix = engine.getTablePrefix();
DbExport export = new DbExport(engine.getDatabasePlatform());
export.setFormat(Format.CSV);
export.setNoCreateInfo(true);
extract(export, new File(tmpDir, "sym_identity.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE_IDENTITY));
extract(export, new File(tmpDir, "sym_node.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE));
extract(export, new File(tmpDir, "sym_node_security.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE_SECURITY));
extract(export, new File(tmpDir, "sym_node_host.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE_HOST));
extract(export, new File(tmpDir, "sym_trigger_hist.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_TRIGGER_HIST));
try {
if (!parameterService.is(ParameterConstants.CLUSTER_LOCKING_ENABLED)) {
engine.getNodeCommunicationService().persistToTableForSnapshot();
engine.getClusterService().persistToTableForSnapshot();
}
} catch (Exception e) {
log.warn("Unable to add SYM_NODE_COMMUNICATION to the snapshot.", e);
}
extract(export, new File(tmpDir, "sym_lock.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_LOCK));
extract(export, new File(tmpDir, "sym_node_communication.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE_COMMUNICATION));
extract(export, 10000, "order by create_time desc", new File(tmpDir, "sym_outgoing_batch.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_OUTGOING_BATCH));
extract(export, 10000, "where status != 'OK' order by create_time", new File(tmpDir, "sym_outgoing_batch_not_ok.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_OUTGOING_BATCH));
extract(export, 10000, "order by create_time desc", new File(tmpDir, "sym_incoming_batch.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_INCOMING_BATCH));
extract(export, 10000, "where status != 'OK' order by create_time", new File(tmpDir, "sym_incoming_batch_not_ok.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_INCOMING_BATCH));
extract(export, 5000, "order by start_id, end_id desc", new File(tmpDir, "sym_data_gap.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_DATA_GAP));
extract(export, new File(tmpDir, "sym_table_reload_request.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_TABLE_RELOAD_REQUEST));
extract(export, 5000, "order by relative_dir, file_name", new File(tmpDir, "sym_file_snapshot.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_FILE_SNAPSHOT));
extract(export, new File(tmpDir, "sym_console_event.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_CONSOLE_EVENT));
extract(export, new File(tmpDir, "sym_monitor_event.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_MONITOR_EVENT));
extract(export, new File(tmpDir, "sym_extract_request.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_EXTRACT_REQUEST));
if (engine.getSymmetricDialect() instanceof FirebirdSymmetricDialect) {
final String[] monTables = { "mon$database", "mon$attachments", "mon$transactions", "mon$statements", "mon$io_stats", "mon$record_stats", "mon$memory_usage", "mon$call_stack", "mon$context_variables" };
for (String table : monTables) {
extract(export, new File(tmpDir, "firebird-" + table + ".csv"), table);
}
}
fwriter = null;
try {
fwriter = new FileWriter(new File(tmpDir, "threads.txt"));
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
long[] threadIds = threadBean.getAllThreadIds();
for (long l : threadIds) {
ThreadInfo info = threadBean.getThreadInfo(l, 100);
if (info != null) {
String threadName = info.getThreadName();
fwriter.append(StringUtils.rightPad(threadName, THREAD_INDENT_SPACE));
fwriter.append(AppUtils.formatStackTrace(info.getStackTrace(), THREAD_INDENT_SPACE, false));
fwriter.append("\n");
}
}
} catch (Exception e) {
log.warn("Failed to export thread information", e);
} finally {
IOUtils.closeQuietly(fwriter);
}
fos = null;
try {
fos = new FileOutputStream(new File(tmpDir, "parameters.properties"));
Properties effectiveParameters = engine.getParameterService().getAllParameters();
SortedProperties parameters = new SortedProperties();
parameters.putAll(effectiveParameters);
parameters.remove("db.password");
parameters.store(fos, "parameters.properties");
} catch (IOException e) {
log.warn("Failed to export parameter information", e);
} finally {
IOUtils.closeQuietly(fos);
}
fos = null;
try {
fos = new FileOutputStream(new File(tmpDir, "parameters-changed.properties"));
Properties defaultParameters = new Properties();
InputStream in = SnapshotUtil.class.getResourceAsStream("/symmetric-default.properties");
defaultParameters.load(in);
IOUtils.closeQuietly(in);
in = SnapshotUtil.class.getResourceAsStream("/symmetric-console-default.properties");
if (in != null) {
defaultParameters.load(in);
IOUtils.closeQuietly(in);
}
Properties effectiveParameters = engine.getParameterService().getAllParameters();
Properties changedParameters = new SortedProperties();
Map<String, ParameterMetaData> parameters = ParameterConstants.getParameterMetaData();
for (String key : parameters.keySet()) {
String defaultValue = defaultParameters.getProperty((String) key);
String currentValue = effectiveParameters.getProperty((String) key);
if (defaultValue == null && currentValue != null || (defaultValue != null && !defaultValue.equals(currentValue))) {
changedParameters.put(key, currentValue == null ? "" : currentValue);
}
}
changedParameters.remove("db.password");
changedParameters.store(fos, "parameters-changed.properties");
} catch (Exception e) {
log.warn("Failed to export parameters-changed information", e);
} finally {
IOUtils.closeQuietly(fos);
}
writeRuntimeStats(engine, tmpDir);
writeJobsStats(engine, tmpDir);
if ("true".equals(System.getProperty(SystemConstants.SYSPROP_STANDALONE_WEB))) {
writeDirectoryListing(engine, tmpDir);
}
fos = null;
try {
fos = new FileOutputStream(new File(tmpDir, "system.properties"));
SortedProperties props = new SortedProperties();
props.putAll(System.getProperties());
props.store(fos, "system.properties");
} catch (Exception e) {
log.warn("Failed to export thread information", e);
} finally {
IOUtils.closeQuietly(fos);
}
try {
File jarFile = new File(getSnapshotDirectory(engine), tmpDir.getName() + ".zip");
JarBuilder builder = new JarBuilder(tmpDir, jarFile, new File[] { tmpDir }, Version.version());
builder.build();
FileUtils.deleteDirectory(tmpDir);
return jarFile;
} catch (Exception e) {
throw new IoException("Failed to package snapshot files into archive", e);
}
}
use of org.apache.log4j.Layout in project ddf by codice.
the class ReliableResourceInputStreamTest method testInputStreamReadRetry.
@Test
public void testInputStreamReadRetry() throws Exception {
LOGGER.info("Testing testInputStreamReadTwice()");
ReliableResourceInputStream is = new ReliableResourceInputStream(fbos, countingFbos, downloadState, downloadIdentifier, resourceResponse);
is.setCallableAndItsFuture(reliableResourceCallable, downloadFuture);
org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(is.getClass());
logger.setLevel(Level.TRACE);
ByteArrayOutputStream out = new ByteArrayOutputStream();
Layout layout = new SimpleLayout();
Appender appender = new WriterAppender(layout, out);
logger.addAppender(appender);
//downloadState.setDownloadState(DownloadManagerState.DownloadState.IN_PROGRESS);
try {
// Write zero bytes to FileBackedOutputStream
byte[] bytes = new String("").getBytes();
countingFbos.write(bytes, 0, bytes.length);
// Attempt to read from FileBackedOutputStream
final byte[] buffer = new byte[50];
int numBytesRead = is.read(buffer, 0, 50);
// Verify bytes read is -1
assertThat(numBytesRead, is(-1));
// Verify read inputstream performed twice
String logMsg = out.toString();
assertThat(logMsg, is(notNullValue()));
assertThat(logMsg, containsString("First time reading inputstream"));
//assertThat(logMsg, containsString("Retry reading inputstream"));
} finally {
logger.removeAppender(appender);
}
}
use of org.apache.log4j.Layout in project zookeeper by apache.
the class QuorumPeerMainTest method testInconsistentPeerType.
/**
* Verify handling of inconsistent peer type
*/
@Test
public void testInconsistentPeerType() throws Exception {
ClientBase.setupTestEnv();
// setup the logger to capture all logs
Layout layout = Logger.getRootLogger().getAppender("CONSOLE").getLayout();
ByteArrayOutputStream os = new ByteArrayOutputStream();
WriterAppender appender = new WriterAppender(layout, os);
appender.setThreshold(Level.INFO);
Logger qlogger = Logger.getLogger("org.apache.zookeeper.server.quorum");
qlogger.addAppender(appender);
// servers list, but there's no "peerType=observer" token in config
try {
final int CLIENT_PORT_QP1 = PortAssignment.unique();
final int CLIENT_PORT_QP2 = PortAssignment.unique();
final int CLIENT_PORT_QP3 = PortAssignment.unique();
String quorumCfgSection = "server.1=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ";" + CLIENT_PORT_QP1 + "\nserver.2=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ";" + CLIENT_PORT_QP2 + "\nserver.3=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ":observer" + ";" + CLIENT_PORT_QP3;
MainThread q1 = new MainThread(1, CLIENT_PORT_QP1, quorumCfgSection);
MainThread q2 = new MainThread(2, CLIENT_PORT_QP2, quorumCfgSection);
MainThread q3 = new MainThread(3, CLIENT_PORT_QP3, quorumCfgSection);
q1.start();
q2.start();
q3.start();
Assert.assertTrue("waiting for server 1 being up", ClientBase.waitForServerUp("127.0.0.1:" + CLIENT_PORT_QP1, CONNECTION_TIMEOUT));
Assert.assertTrue("waiting for server 2 being up", ClientBase.waitForServerUp("127.0.0.1:" + CLIENT_PORT_QP2, CONNECTION_TIMEOUT));
Assert.assertTrue("waiting for server 3 being up", ClientBase.waitForServerUp("127.0.0.1:" + CLIENT_PORT_QP3, CONNECTION_TIMEOUT));
q1.shutdown();
q2.shutdown();
q3.shutdown();
Assert.assertTrue("waiting for server 1 down", ClientBase.waitForServerDown("127.0.0.1:" + CLIENT_PORT_QP1, ClientBase.CONNECTION_TIMEOUT));
Assert.assertTrue("waiting for server 2 down", ClientBase.waitForServerDown("127.0.0.1:" + CLIENT_PORT_QP2, ClientBase.CONNECTION_TIMEOUT));
Assert.assertTrue("waiting for server 3 down", ClientBase.waitForServerDown("127.0.0.1:" + CLIENT_PORT_QP3, ClientBase.CONNECTION_TIMEOUT));
} finally {
qlogger.removeAppender(appender);
}
LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
String line;
boolean warningPresent = false;
boolean defaultedToObserver = false;
Pattern pWarn = Pattern.compile(".*Peer type from servers list.* doesn't match peerType.*");
Pattern pObserve = Pattern.compile(".*OBSERVING.*");
while ((line = r.readLine()) != null) {
if (pWarn.matcher(line).matches()) {
warningPresent = true;
}
if (pObserve.matcher(line).matches()) {
defaultedToObserver = true;
}
if (warningPresent && defaultedToObserver) {
break;
}
}
Assert.assertTrue("Should warn about inconsistent peer type", warningPresent && defaultedToObserver);
}
use of org.apache.log4j.Layout in project zookeeper by apache.
the class QuorumPeerMainTest method testQuorumDefaults.
/**
* Verify handling of quorum defaults
* * default electionAlg is fast leader election
*/
@Test
public void testQuorumDefaults() throws Exception {
ClientBase.setupTestEnv();
// setup the logger to capture all logs
Layout layout = Logger.getRootLogger().getAppender("CONSOLE").getLayout();
ByteArrayOutputStream os = new ByteArrayOutputStream();
WriterAppender appender = new WriterAppender(layout, os);
appender.setImmediateFlush(true);
appender.setThreshold(Level.INFO);
Logger zlogger = Logger.getLogger("org.apache.zookeeper");
zlogger.addAppender(appender);
try {
final int CLIENT_PORT_QP1 = PortAssignment.unique();
final int CLIENT_PORT_QP2 = PortAssignment.unique();
String quorumCfgSection = "server.1=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ";" + CLIENT_PORT_QP1 + "\nserver.2=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ";" + CLIENT_PORT_QP2;
MainThread q1 = new MainThread(1, CLIENT_PORT_QP1, quorumCfgSection);
MainThread q2 = new MainThread(2, CLIENT_PORT_QP2, quorumCfgSection);
q1.start();
q2.start();
Assert.assertTrue("waiting for server 1 being up", ClientBase.waitForServerUp("127.0.0.1:" + CLIENT_PORT_QP1, CONNECTION_TIMEOUT));
Assert.assertTrue("waiting for server 2 being up", ClientBase.waitForServerUp("127.0.0.1:" + CLIENT_PORT_QP2, CONNECTION_TIMEOUT));
q1.shutdown();
q2.shutdown();
Assert.assertTrue("waiting for server 1 down", ClientBase.waitForServerDown("127.0.0.1:" + CLIENT_PORT_QP1, ClientBase.CONNECTION_TIMEOUT));
Assert.assertTrue("waiting for server 2 down", ClientBase.waitForServerDown("127.0.0.1:" + CLIENT_PORT_QP2, ClientBase.CONNECTION_TIMEOUT));
} finally {
zlogger.removeAppender(appender);
}
os.close();
LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
String line;
boolean found = false;
Pattern p = Pattern.compile(".*FastLeaderElection.*");
while ((line = r.readLine()) != null) {
found = p.matcher(line).matches();
if (found) {
break;
}
}
Assert.assertTrue("fastleaderelection used", found);
}
Aggregations