use of org.jumpmind.symmetric.model.TriggerHistory in project symmetric-ds by JumpMind.
the class DataService method insertScriptEvent.
public void insertScriptEvent(ISqlTransaction transaction, String channelId, Node targetNode, String script, boolean isLoad, long loadId, String createBy) {
TriggerHistory history = engine.getTriggerRouterService().findTriggerHistoryForGenericSync();
Trigger trigger = engine.getTriggerRouterService().getTriggerById(history.getTriggerId(), false);
String reloadChannelId = getReloadChannelIdForTrigger(trigger, engine.getConfigurationService().getChannels(false));
Data data = new Data(history.getSourceTableName(), DataEventType.BSH, CsvUtils.escapeCsvData(script), null, history, isLoad ? reloadChannelId : channelId, null, null);
data.setNodeList(targetNode.getNodeId());
if (isLoad) {
insertDataAndDataEventAndOutgoingBatch(transaction, data, targetNode.getNodeId(), Constants.UNKNOWN_ROUTER_ID, isLoad, loadId, createBy, Status.NE);
} else {
insertData(transaction, data);
}
}
use of org.jumpmind.symmetric.model.TriggerHistory in project symmetric-ds by JumpMind.
the class SnapshotUtil method createSnapshot.
public static File createSnapshot(ISymmetricEngine engine) {
String dirName = engine.getEngineName().replaceAll(" ", "-") + "-" + new SimpleDateFormat("yyyyMMddHHmmss").format(new Date());
IParameterService parameterService = engine.getParameterService();
File tmpDir = new File(parameterService.getTempDirectory(), dirName);
tmpDir.mkdirs();
File logDir = null;
String parameterizedLogDir = parameterService.getString("server.log.dir");
if (isNotBlank(parameterizedLogDir)) {
logDir = new File(parameterizedLogDir);
}
if (logDir != null && logDir.exists()) {
log.info("Using server.log.dir setting as the location of the log files");
} else {
logDir = new File("logs");
if (!logDir.exists()) {
Map<File, Layout> matches = findSymmetricLogFile();
if (matches != null && matches.size() == 1) {
logDir = matches.keySet().iterator().next().getParentFile();
}
}
if (!logDir.exists()) {
logDir = new File("../logs");
}
if (!logDir.exists()) {
logDir = new File("target");
}
if (logDir.exists()) {
File[] files = logDir.listFiles();
if (files != null) {
for (File file : files) {
if (file.getName().toLowerCase().endsWith(".log")) {
try {
FileUtils.copyFileToDirectory(file, tmpDir);
} catch (IOException e) {
log.warn("Failed to copy " + file.getName() + " to the snapshot directory", e);
}
}
}
}
}
}
FileWriter fwriter = null;
try {
fwriter = new FileWriter(new File(tmpDir, "config-export.csv"));
engine.getDataExtractorService().extractConfigurationStandalone(engine.getNodeService().findIdentity(), fwriter, TableConstants.SYM_NODE, TableConstants.SYM_NODE_SECURITY, TableConstants.SYM_NODE_IDENTITY, TableConstants.SYM_NODE_HOST, TableConstants.SYM_NODE_CHANNEL_CTL, TableConstants.SYM_CONSOLE_USER, TableConstants.SYM_MONITOR_EVENT, TableConstants.SYM_CONSOLE_EVENT);
} catch (Exception e) {
log.warn("Failed to export symmetric configuration", e);
} finally {
IOUtils.closeQuietly(fwriter);
}
File serviceConfFile = new File("conf/sym_service.conf");
try {
if (serviceConfFile.exists()) {
FileUtils.copyFileToDirectory(serviceConfFile, tmpDir);
}
} catch (Exception e) {
log.warn("Failed to copy " + serviceConfFile.getName() + " to the snapshot directory", e);
}
TreeSet<Table> tables = new TreeSet<Table>();
FileOutputStream fos = null;
try {
ITriggerRouterService triggerRouterService = engine.getTriggerRouterService();
List<TriggerHistory> triggerHistories = triggerRouterService.getActiveTriggerHistories();
for (TriggerHistory triggerHistory : triggerHistories) {
Table table = engine.getDatabasePlatform().getTableFromCache(triggerHistory.getSourceCatalogName(), triggerHistory.getSourceSchemaName(), triggerHistory.getSourceTableName(), false);
if (table != null && !table.getName().toUpperCase().startsWith(engine.getSymmetricDialect().getTablePrefix().toUpperCase())) {
tables.add(table);
}
}
List<Trigger> triggers = triggerRouterService.getTriggers();
for (Trigger trigger : triggers) {
Table table = engine.getDatabasePlatform().getTableFromCache(trigger.getSourceCatalogName(), trigger.getSourceSchemaName(), trigger.getSourceTableName(), false);
if (table != null) {
tables.add(table);
}
}
fos = new FileOutputStream(new File(tmpDir, "table-definitions.xml"));
DbExport export = new DbExport(engine.getDatabasePlatform());
export.setFormat(Format.XML);
export.setNoData(true);
export.exportTables(fos, tables.toArray(new Table[tables.size()]));
} catch (Exception e) {
log.warn("Failed to export table definitions", e);
} finally {
IOUtils.closeQuietly(fos);
}
String tablePrefix = engine.getTablePrefix();
DbExport export = new DbExport(engine.getDatabasePlatform());
export.setFormat(Format.CSV);
export.setNoCreateInfo(true);
extract(export, new File(tmpDir, "sym_identity.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE_IDENTITY));
extract(export, new File(tmpDir, "sym_node.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE));
extract(export, new File(tmpDir, "sym_node_security.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE_SECURITY));
extract(export, new File(tmpDir, "sym_node_host.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE_HOST));
extract(export, new File(tmpDir, "sym_trigger_hist.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_TRIGGER_HIST));
try {
if (!parameterService.is(ParameterConstants.CLUSTER_LOCKING_ENABLED)) {
engine.getNodeCommunicationService().persistToTableForSnapshot();
engine.getClusterService().persistToTableForSnapshot();
}
} catch (Exception e) {
log.warn("Unable to add SYM_NODE_COMMUNICATION to the snapshot.", e);
}
extract(export, new File(tmpDir, "sym_lock.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_LOCK));
extract(export, new File(tmpDir, "sym_node_communication.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_NODE_COMMUNICATION));
extract(export, 10000, "order by create_time desc", new File(tmpDir, "sym_outgoing_batch.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_OUTGOING_BATCH));
extract(export, 10000, "where status != 'OK' order by create_time", new File(tmpDir, "sym_outgoing_batch_not_ok.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_OUTGOING_BATCH));
extract(export, 10000, "order by create_time desc", new File(tmpDir, "sym_incoming_batch.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_INCOMING_BATCH));
extract(export, 10000, "where status != 'OK' order by create_time", new File(tmpDir, "sym_incoming_batch_not_ok.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_INCOMING_BATCH));
extract(export, 5000, "order by start_id, end_id desc", new File(tmpDir, "sym_data_gap.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_DATA_GAP));
extract(export, new File(tmpDir, "sym_table_reload_request.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_TABLE_RELOAD_REQUEST));
extract(export, 5000, "order by relative_dir, file_name", new File(tmpDir, "sym_file_snapshot.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_FILE_SNAPSHOT));
extract(export, new File(tmpDir, "sym_console_event.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_CONSOLE_EVENT));
extract(export, new File(tmpDir, "sym_monitor_event.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_MONITOR_EVENT));
extract(export, new File(tmpDir, "sym_extract_request.csv"), TableConstants.getTableName(tablePrefix, TableConstants.SYM_EXTRACT_REQUEST));
if (engine.getSymmetricDialect() instanceof FirebirdSymmetricDialect) {
final String[] monTables = { "mon$database", "mon$attachments", "mon$transactions", "mon$statements", "mon$io_stats", "mon$record_stats", "mon$memory_usage", "mon$call_stack", "mon$context_variables" };
for (String table : monTables) {
extract(export, new File(tmpDir, "firebird-" + table + ".csv"), table);
}
}
fwriter = null;
try {
fwriter = new FileWriter(new File(tmpDir, "threads.txt"));
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
long[] threadIds = threadBean.getAllThreadIds();
for (long l : threadIds) {
ThreadInfo info = threadBean.getThreadInfo(l, 100);
if (info != null) {
String threadName = info.getThreadName();
fwriter.append(StringUtils.rightPad(threadName, THREAD_INDENT_SPACE));
fwriter.append(AppUtils.formatStackTrace(info.getStackTrace(), THREAD_INDENT_SPACE, false));
fwriter.append("\n");
}
}
} catch (Exception e) {
log.warn("Failed to export thread information", e);
} finally {
IOUtils.closeQuietly(fwriter);
}
fos = null;
try {
fos = new FileOutputStream(new File(tmpDir, "parameters.properties"));
Properties effectiveParameters = engine.getParameterService().getAllParameters();
SortedProperties parameters = new SortedProperties();
parameters.putAll(effectiveParameters);
parameters.remove("db.password");
parameters.store(fos, "parameters.properties");
} catch (IOException e) {
log.warn("Failed to export parameter information", e);
} finally {
IOUtils.closeQuietly(fos);
}
fos = null;
try {
fos = new FileOutputStream(new File(tmpDir, "parameters-changed.properties"));
Properties defaultParameters = new Properties();
InputStream in = SnapshotUtil.class.getResourceAsStream("/symmetric-default.properties");
defaultParameters.load(in);
IOUtils.closeQuietly(in);
in = SnapshotUtil.class.getResourceAsStream("/symmetric-console-default.properties");
if (in != null) {
defaultParameters.load(in);
IOUtils.closeQuietly(in);
}
Properties effectiveParameters = engine.getParameterService().getAllParameters();
Properties changedParameters = new SortedProperties();
Map<String, ParameterMetaData> parameters = ParameterConstants.getParameterMetaData();
for (String key : parameters.keySet()) {
String defaultValue = defaultParameters.getProperty((String) key);
String currentValue = effectiveParameters.getProperty((String) key);
if (defaultValue == null && currentValue != null || (defaultValue != null && !defaultValue.equals(currentValue))) {
changedParameters.put(key, currentValue == null ? "" : currentValue);
}
}
changedParameters.remove("db.password");
changedParameters.store(fos, "parameters-changed.properties");
} catch (Exception e) {
log.warn("Failed to export parameters-changed information", e);
} finally {
IOUtils.closeQuietly(fos);
}
writeRuntimeStats(engine, tmpDir);
writeJobsStats(engine, tmpDir);
if ("true".equals(System.getProperty(SystemConstants.SYSPROP_STANDALONE_WEB))) {
writeDirectoryListing(engine, tmpDir);
}
fos = null;
try {
fos = new FileOutputStream(new File(tmpDir, "system.properties"));
SortedProperties props = new SortedProperties();
props.putAll(System.getProperties());
props.store(fos, "system.properties");
} catch (Exception e) {
log.warn("Failed to export thread information", e);
} finally {
IOUtils.closeQuietly(fos);
}
try {
File jarFile = new File(getSnapshotDirectory(engine), tmpDir.getName() + ".zip");
JarBuilder builder = new JarBuilder(tmpDir, jarFile, new File[] { tmpDir }, Version.version());
builder.build();
FileUtils.deleteDirectory(tmpDir);
return jarFile;
} catch (Exception e) {
throw new IoException("Failed to package snapshot files into archive", e);
}
}
use of org.jumpmind.symmetric.model.TriggerHistory in project symmetric-ds by JumpMind.
the class DataService method insertReloadEvents.
public void insertReloadEvents(Node targetNode, boolean reverse, List<TableReloadRequest> reloadRequests, ProcessInfo processInfo) {
if (engine.getClusterService().lock(ClusterConstants.SYNCTRIGGERS)) {
try {
synchronized (engine.getTriggerRouterService()) {
engine.getClusterService().lock(ClusterConstants.SYNCTRIGGERS);
boolean isFullLoad = reloadRequests == null || (reloadRequests.size() == 1 && reloadRequests.get(0).isFullLoadRequest());
if (!reverse) {
log.info("Queueing up " + (isFullLoad ? "an initial" : "a") + " load to node " + targetNode.getNodeId());
} else {
log.info("Queueing up a reverse " + (isFullLoad ? "initial" : "") + " load to node " + targetNode.getNodeId());
}
/*
* Outgoing data events are pointless because we are
* reloading all data
*/
if (isFullLoad) {
engine.getOutgoingBatchService().markAllAsSentForNode(targetNode.getNodeId(), false);
}
INodeService nodeService = engine.getNodeService();
ITriggerRouterService triggerRouterService = engine.getTriggerRouterService();
Node sourceNode = nodeService.findIdentity();
boolean transactional = parameterService.is(ParameterConstants.DATA_RELOAD_IS_BATCH_INSERT_TRANSACTIONAL);
String nodeIdRecord = reverse ? nodeService.findIdentityNodeId() : targetNode.getNodeId();
NodeSecurity nodeSecurity = nodeService.findNodeSecurity(nodeIdRecord);
ISqlTransaction transaction = null;
try {
transaction = platform.getSqlTemplate().startSqlTransaction();
long loadId = engine.getSequenceService().nextVal(transaction, Constants.SEQUENCE_OUTGOING_BATCH_LOAD_ID);
processInfo.setCurrentLoadId(loadId);
String createBy = reverse ? nodeSecurity.getRevInitialLoadCreateBy() : nodeSecurity.getInitialLoadCreateBy();
List<TriggerHistory> triggerHistories = new ArrayList<TriggerHistory>();
if (isFullLoad) {
triggerHistories = triggerRouterService.getActiveTriggerHistories();
} else {
for (TableReloadRequest reloadRequest : reloadRequests) {
triggerHistories.addAll(engine.getTriggerRouterService().getActiveTriggerHistories(new Trigger(reloadRequest.getTriggerId(), null)));
}
}
processInfo.setDataCount(triggerHistories.size());
Map<Integer, List<TriggerRouter>> triggerRoutersByHistoryId = triggerRouterService.fillTriggerRoutersByHistIdAndSortHist(sourceNode.getNodeGroupId(), targetNode.getNodeGroupId(), triggerHistories);
if (isFullLoad) {
callReloadListeners(true, targetNode, transactional, transaction, loadId);
insertCreateSchemaScriptPriorToReload(targetNode, nodeIdRecord, loadId, createBy, transactional, transaction);
}
Map<String, TableReloadRequest> mapReloadRequests = convertReloadListToMap(reloadRequests);
String symNodeSecurityReloadChannel = null;
try {
symNodeSecurityReloadChannel = triggerRoutersByHistoryId.get(triggerHistories.get(0).getTriggerHistoryId()).get(0).getTrigger().getReloadChannelId();
} catch (Exception e) {
}
if (isFullLoad || (reloadRequests != null && reloadRequests.size() > 0)) {
insertSqlEventsPriorToReload(targetNode, nodeIdRecord, loadId, createBy, transactional, transaction, reverse, triggerHistories, triggerRoutersByHistoryId, mapReloadRequests, isFullLoad, symNodeSecurityReloadChannel);
}
insertCreateBatchesForReload(targetNode, loadId, createBy, triggerHistories, triggerRoutersByHistoryId, transactional, transaction, mapReloadRequests);
insertDeleteBatchesForReload(targetNode, loadId, createBy, triggerHistories, triggerRoutersByHistoryId, transactional, transaction, mapReloadRequests);
insertSQLBatchesForReload(targetNode, loadId, createBy, triggerHistories, triggerRoutersByHistoryId, transactional, transaction, mapReloadRequests);
insertLoadBatchesForReload(targetNode, loadId, createBy, triggerHistories, triggerRoutersByHistoryId, transactional, transaction, mapReloadRequests, processInfo);
if (isFullLoad) {
String afterSql = parameterService.getString(reverse ? ParameterConstants.INITIAL_LOAD_REVERSE_AFTER_SQL : ParameterConstants.INITIAL_LOAD_AFTER_SQL);
if (isNotBlank(afterSql)) {
insertSqlEvent(transaction, targetNode, afterSql, true, loadId, createBy);
}
}
insertFileSyncBatchForReload(targetNode, loadId, createBy, transactional, transaction, processInfo);
if (isFullLoad) {
callReloadListeners(false, targetNode, transactional, transaction, loadId);
if (!reverse) {
nodeService.setInitialLoadEnabled(transaction, nodeIdRecord, false, false, loadId, createBy);
} else {
nodeService.setReverseInitialLoadEnabled(transaction, nodeIdRecord, false, false, loadId, createBy);
}
}
if (!Constants.DEPLOYMENT_TYPE_REST.equals(targetNode.getDeploymentType())) {
insertNodeSecurityUpdate(transaction, nodeIdRecord, targetNode.getNodeId(), true, loadId, createBy, symNodeSecurityReloadChannel);
}
engine.getStatisticManager().incrementNodesLoaded(1);
if (reloadRequests != null && reloadRequests.size() > 0) {
for (TableReloadRequest request : reloadRequests) {
transaction.prepareAndExecute(getSql("updateProcessedTableReloadRequest"), loadId, new Date(), request.getTargetNodeId(), request.getSourceNodeId(), request.getTriggerId(), request.getRouterId(), request.getCreateTime());
}
log.info("Table reload request(s) for load id " + loadId + " have been processed.");
}
transaction.commit();
} catch (Error ex) {
if (transaction != null) {
transaction.rollback();
}
throw ex;
} catch (RuntimeException ex) {
if (transaction != null) {
transaction.rollback();
}
throw ex;
} finally {
close(transaction);
}
if (!reverse) {
/*
* Remove all incoming events for the node that we are
* starting a reload for
*/
engine.getPurgeService().purgeAllIncomingEventsForNode(targetNode.getNodeId());
}
}
} finally {
engine.getClusterService().unlock(ClusterConstants.SYNCTRIGGERS);
}
} else {
log.info("Not attempting to insert reload events because sync trigger is currently running");
}
}
use of org.jumpmind.symmetric.model.TriggerHistory in project symmetric-ds by JumpMind.
the class DataService method insertSqlEvent.
public void insertSqlEvent(ISqlTransaction transaction, Node targetNode, String sql, boolean isLoad, long loadId, String createBy) {
TriggerHistory history = engine.getTriggerRouterService().findTriggerHistoryForGenericSync();
insertSqlEvent(transaction, history, Constants.CHANNEL_CONFIG, targetNode, sql, isLoad, loadId, createBy);
}
use of org.jumpmind.symmetric.model.TriggerHistory in project symmetric-ds by JumpMind.
the class DataService method insertReloadEvent.
public boolean insertReloadEvent(TableReloadRequest request, boolean deleteAtClient) {
boolean successful = false;
if (request != null) {
ITriggerRouterService triggerRouterService = engine.getTriggerRouterService();
INodeService nodeService = engine.getNodeService();
Node targetNode = nodeService.findNode(request.getTargetNodeId());
if (targetNode != null) {
TriggerRouter triggerRouter = triggerRouterService.getTriggerRouterForCurrentNode(request.getTriggerId(), request.getRouterId(), false);
if (triggerRouter != null) {
Trigger trigger = triggerRouter.getTrigger();
Router router = triggerRouter.getRouter();
NodeGroupLink link = router.getNodeGroupLink();
Node me = nodeService.findIdentity();
if (link.getSourceNodeGroupId().equals(me.getNodeGroupId())) {
if (link.getTargetNodeGroupId().equals(targetNode.getNodeGroupId())) {
TriggerHistory triggerHistory = lookupTriggerHistory(trigger);
ISqlTransaction transaction = null;
try {
transaction = sqlTemplate.startSqlTransaction();
if (parameterService.is(ParameterConstants.INITIAL_LOAD_DELETE_BEFORE_RELOAD)) {
String overrideDeleteStatement = StringUtils.isNotBlank(request.getBeforeCustomSql()) ? request.getBeforeCustomSql() : null;
insertPurgeEvent(transaction, targetNode, triggerRouter, triggerHistory, false, overrideDeleteStatement, -1, null);
}
insertReloadEvent(transaction, targetNode, triggerRouter, triggerHistory, request.getReloadSelect(), false, -1, null, Status.NE);
if (!targetNode.requires13Compatiblity() && deleteAtClient) {
insertSqlEvent(transaction, triggerHistory, trigger.getChannelId(), targetNode, String.format("delete from %s where target_node_id='%s' and source_node_id='%s' and trigger_id='%s' and router_id='%s'", TableConstants.getTableName(tablePrefix, TableConstants.SYM_TABLE_RELOAD_REQUEST), request.getTargetNodeId(), request.getSourceNodeId(), request.getTriggerId(), request.getRouterId()), false, -1, null);
}
deleteTableReloadRequest(transaction, request);
transaction.commit();
} catch (Error ex) {
if (transaction != null) {
transaction.rollback();
}
throw ex;
} catch (RuntimeException ex) {
if (transaction != null) {
transaction.rollback();
}
throw ex;
} finally {
close(transaction);
}
} else {
log.error("Could not reload table {} for node {} because the router {} target node group id {} did not match", new Object[] { trigger.getSourceTableName(), request.getTargetNodeId(), request.getRouterId(), link.getTargetNodeGroupId() });
}
} else {
log.error("Could not reload table {} for node {} because the router {} source node group id {} did not match", new Object[] { trigger.getSourceTableName(), request.getTargetNodeId(), request.getRouterId(), link.getSourceNodeGroupId() });
}
} else {
log.error("Could not reload table for node {} because the trigger router ({}, {}) could not be found", new Object[] { request.getTargetNodeId(), request.getTriggerId(), request.getRouterId() });
}
} else {
log.error("Could not reload table for node {} because the target node could not be found", request.getTargetNodeId());
}
}
return successful;
}
Aggregations