Search in sources :

Example 1 with DataEventType

use of org.jumpmind.symmetric.io.data.DataEventType in project symmetric-ds by JumpMind.

the class AbstractDatabaseWriterConflictResolver method needsResolved.

public void needsResolved(AbstractDatabaseWriter writer, CsvData data, LoadStatus loadStatus) {
    DataEventType originalEventType = data.getDataEventType();
    DatabaseWriterSettings writerSettings = writer.getWriterSettings();
    Conflict conflict = writerSettings.pickConflict(writer.getTargetTable(), writer.getBatch());
    Statistics statistics = writer.getStatistics().get(writer.getBatch());
    long statementCount = statistics.get(DataWriterStatisticConstants.STATEMENTCOUNT);
    long lineNumber = statistics.get(DataWriterStatisticConstants.LINENUMBER);
    ResolvedData resolvedData = writerSettings.getResolvedData(statementCount);
    logConflictHappened(conflict, data, writer, resolvedData, lineNumber);
    switch(originalEventType) {
        case INSERT:
            if (resolvedData != null) {
                attemptToResolve(resolvedData, data, writer, conflict);
            } else {
                switch(conflict.getResolveType()) {
                    case FALLBACK:
                        performFallbackToUpdate(writer, data, conflict, true);
                        break;
                    case NEWER_WINS:
                        if ((conflict.getDetectType() == DetectConflict.USE_TIMESTAMP && isTimestampNewer(conflict, writer, data)) || (conflict.getDetectType() == DetectConflict.USE_VERSION && isVersionNewer(conflict, writer, data))) {
                            performFallbackToUpdate(writer, data, conflict, true);
                        } else {
                            if (!conflict.isResolveRowOnly()) {
                                throw new IgnoreBatchException();
                            }
                        }
                        break;
                    case IGNORE:
                        ignore(writer, conflict);
                        break;
                    case MANUAL:
                    default:
                        attemptToResolve(resolvedData, data, writer, conflict);
                        break;
                }
            }
            break;
        case UPDATE:
            if (resolvedData != null) {
                attemptToResolve(resolvedData, data, writer, conflict);
            } else {
                switch(conflict.getResolveType()) {
                    case FALLBACK:
                        if (conflict.getDetectType() == DetectConflict.USE_PK_DATA) {
                            CsvData withoutOldData = data.copyWithoutOldData();
                            try {
                                // we already tried to update using the pk
                                performFallbackToInsert(writer, withoutOldData, conflict, true);
                            } catch (ConflictException ex) {
                                performFallbackToUpdate(writer, withoutOldData, conflict, true);
                            }
                        } else {
                            try {
                                performFallbackToUpdate(writer, data, conflict, true);
                            } catch (ConflictException ex) {
                                performFallbackToInsert(writer, data, conflict, true);
                            }
                        }
                        break;
                    case NEWER_WINS:
                        if ((conflict.getDetectType() == DetectConflict.USE_TIMESTAMP && isTimestampNewer(conflict, writer, data)) || (conflict.getDetectType() == DetectConflict.USE_VERSION && isVersionNewer(conflict, writer, data))) {
                            try {
                                performFallbackToUpdate(writer, data, conflict, false);
                            } catch (ConflictException ex) {
                                performFallbackToInsert(writer, data, conflict, true);
                            }
                        } else {
                            if (!conflict.isResolveRowOnly()) {
                                throw new IgnoreBatchException();
                            }
                        }
                        break;
                    case IGNORE:
                        ignore(writer, conflict);
                        break;
                    case MANUAL:
                    default:
                        attemptToResolve(resolvedData, data, writer, conflict);
                        break;
                }
            }
            break;
        case DELETE:
            switch(conflict.getResolveType()) {
                case FALLBACK:
                    LoadStatus status = LoadStatus.CONFLICT;
                    if (conflict.getDetectType() != DetectConflict.USE_PK_DATA) {
                        status = writer.delete(data, false);
                    }
                    if (status == LoadStatus.CONFLICT) {
                        writer.getStatistics().get(writer.getBatch()).increment(DataWriterStatisticConstants.MISSINGDELETECOUNT);
                    }
                    break;
                case IGNORE:
                    ignore(writer, conflict);
                    break;
                case NEWER_WINS:
                    // nothing to do ...
                    break;
                case MANUAL:
                default:
                    if (resolvedData != null) {
                        if (!resolvedData.isIgnoreRow()) {
                            writer.delete(data, false);
                        } else {
                            if (!conflict.isResolveRowOnly()) {
                                throw new IgnoreBatchException();
                            }
                        }
                    } else {
                        throw new ConflictException(data, writer.getTargetTable(), false, conflict, (Exception) writer.getContext().get(AbstractDatabaseWriter.CONFLICT_ERROR));
                    }
                    break;
            }
            break;
        default:
            break;
    }
    logConflictResolution(conflict, data, writer, resolvedData, lineNumber);
}
Also used : DetectConflict(org.jumpmind.symmetric.io.data.writer.Conflict.DetectConflict) LoadStatus(org.jumpmind.symmetric.io.data.writer.AbstractDatabaseWriter.LoadStatus) DataEventType(org.jumpmind.symmetric.io.data.DataEventType) Statistics(org.jumpmind.util.Statistics) CsvData(org.jumpmind.symmetric.io.data.CsvData)

Example 2 with DataEventType

use of org.jumpmind.symmetric.io.data.DataEventType in project symmetric-ds by JumpMind.

the class ConfigurationChangedDataRouter method routeSymNodeSecurity.

protected void routeSymNodeSecurity(Node me, String nodeIdForRecordBeingRouted, DataMetaData dataMetaData, Set<String> nodeIds, Map<String, String> columnValues) {
    DataEventType eventType = dataMetaData.getData().getDataEventType();
    boolean fromAnotherNode = isNotBlank(dataMetaData.getData().getSourceNodeId());
    if (nodeIds.contains(nodeIdForRecordBeingRouted)) {
        /*
             * Don't route node security to it's own node. That node will
             * get node security via registration and it will be updated by
             * initial load. Otherwise, updates can be unpredictable in the
             * order they will be applied at the node because updates are on
             * a different channel than reloads
             */
        boolean remove = true;
        if (eventType == DataEventType.UPDATE) {
            if ("1".equals(columnValues.get("REV_INITIAL_LOAD_ENABLED"))) {
                boolean reverseLoadQueued = engine.getParameterService().is(ParameterConstants.INITIAL_LOAD_REVERSE_FIRST) || "0".equals(columnValues.get("INITIAL_LOAD_ENABLED"));
                /*
                     * Only send the update if the client is going
                     * to be expected to queue up a reverse load.
                     * The trigger to do this is the arrival of
                     * sym_node_security with
                     * REV_INITIAL_LOAD_ENABLED set to 1.
                     */
                if (reverseLoadQueued) {
                    remove = false;
                }
            }
        }
        if (remove) {
            nodeIds.remove(nodeIdForRecordBeingRouted);
        }
    }
    boolean removeParentNode = true;
    if (eventType == DataEventType.UPDATE) {
        if ("1".equals(columnValues.get("INITIAL_LOAD_ENABLED")) && me.getNodeId().equals(nodeIdForRecordBeingRouted)) {
            removeParentNode = false;
        }
    }
    if (removeParentNode) {
        nodeIds.remove(columnValues.get("CREATED_AT_NODE_ID"));
    }
    if (engine.getConfigurationService().isMasterToMaster() || fromAnotherNode) {
        /*
             * Don't send updates where the initial load flags are enabled to other 
             * nodes in the cluster 
             */
        if ("1".equals(columnValues.get("INITIAL_LOAD_ENABLED"))) {
            nodeIds.clear();
        }
    }
}
Also used : DataEventType(org.jumpmind.symmetric.io.data.DataEventType)

Example 3 with DataEventType

use of org.jumpmind.symmetric.io.data.DataEventType in project symmetric-ds by JumpMind.

the class TemplatedPublisherDataLoaderFilter method addTextElement.

@Override
protected String addTextElement(DataContext context, Table table, CsvData data) {
    if (this.dataFilter == null || this.dataFilter.beforeWrite(context, table, data)) {
        DataEventType eventType = data.getDataEventType();
        String template = null;
        if ((processInsert && eventType == DataEventType.INSERT) || (processUpdate && eventType == DataEventType.UPDATE) || (processDelete && eventType == DataEventType.DELETE)) {
            template = contentTableTemplate;
            if (template != null) {
                template = fillOutTemplate(table, data, template, context);
            }
        }
        return template;
    } else {
        return null;
    }
}
Also used : DataEventType(org.jumpmind.symmetric.io.data.DataEventType)

Example 4 with DataEventType

use of org.jumpmind.symmetric.io.data.DataEventType in project symmetric-ds by JumpMind.

the class TemplatedPublisherDataLoaderFilter method fillOutTemplate.

protected String fillOutTemplate(Table table, CsvData data, String template, DataContext context) {
    DataEventType eventType = data.getDataEventType();
    String[] colNames = null;
    String[] colValues = null;
    if (eventType == DataEventType.DELETE) {
        colNames = table.getPrimaryKeyColumnNames();
        colValues = data.getParsedData(CsvData.PK_DATA);
    } else {
        colNames = table.getColumnNames();
        colValues = data.getParsedData(CsvData.ROW_DATA);
    }
    for (int i = 0; i < colValues.length; i++) {
        String col = colNames[i];
        template = replace(template, col, format(col, colValues[i]));
    }
    template = template.replace("DMLTYPE", eventType.name());
    template = template.replace("TIMESTAMP", Long.toString(System.currentTimeMillis()));
    return template;
}
Also used : DataEventType(org.jumpmind.symmetric.io.data.DataEventType)

Example 5 with DataEventType

use of org.jumpmind.symmetric.io.data.DataEventType in project symmetric-ds by JumpMind.

the class MySqlBulkDatabaseWriter method write.

public void write(CsvData data) {
    DataEventType dataEventType = data.getDataEventType();
    switch(dataEventType) {
        case INSERT:
            statistics.get(batch).increment(DataWriterStatisticConstants.STATEMENTCOUNT);
            statistics.get(batch).increment(DataWriterStatisticConstants.LINENUMBER);
            statistics.get(batch).startTimer(DataWriterStatisticConstants.DATABASEMILLIS);
            try {
                String[] parsedData = data.getParsedData(CsvData.ROW_DATA);
                byte[] byteData = null;
                if (needsBinaryConversion) {
                    ByteArrayOutputStream out = new ByteArrayOutputStream();
                    CsvWriter writer = new CsvWriter(new OutputStreamWriter(out), ',');
                    writer.setEscapeMode(CsvWriter.ESCAPE_MODE_BACKSLASH);
                    writer.setRecordDelimiter('\n');
                    writer.setTextQualifier('"');
                    writer.setUseTextQualifier(true);
                    writer.setForceQualifier(true);
                    writer.setNullString("\\N");
                    Column[] columns = targetTable.getColumns();
                    for (int i = 0; i < columns.length; i++) {
                        if (columns[i].isOfBinaryType() && parsedData[i] != null) {
                            if (i > 0) {
                                out.write(',');
                            }
                            out.write('"');
                            if (batch.getBinaryEncoding().equals(BinaryEncoding.HEX)) {
                                out.write(escape(Hex.decodeHex(parsedData[i].toCharArray())));
                            } else if (batch.getBinaryEncoding().equals(BinaryEncoding.BASE64)) {
                                out.write(new String(Hex.encodeHex(Base64.decodeBase64(parsedData[i].getBytes()))).getBytes());
                            }
                            out.write('"');
                        } else {
                            writer.write(parsedData[i], true);
                            writer.flush();
                        }
                    }
                    writer.endRecord();
                    writer.close();
                    byteData = out.toByteArray();
                } else {
                    String formattedData = CsvUtils.escapeCsvData(parsedData, '\n', '"', CsvWriter.ESCAPE_MODE_BACKSLASH, "\\N");
                    byteData = formattedData.getBytes();
                }
                this.stagedInputFile.getOutputStream().write(byteData);
                loadedRows++;
                loadedBytes += byteData.length;
            } catch (Exception ex) {
                throw getPlatform().getSqlTemplate().translate(ex);
            } finally {
                statistics.get(batch).stopTimer(DataWriterStatisticConstants.DATABASEMILLIS);
            }
            break;
        case UPDATE:
        case DELETE:
        default:
            flush();
            super.write(data);
            break;
    }
    if (loadedRows >= maxRowsBeforeFlush || loadedBytes >= maxBytesBeforeFlush) {
        flush();
    }
}
Also used : CsvWriter(org.jumpmind.symmetric.csv.CsvWriter) Column(org.jumpmind.db.model.Column) DataEventType(org.jumpmind.symmetric.io.data.DataEventType) OutputStreamWriter(java.io.OutputStreamWriter) ByteArrayOutputStream(java.io.ByteArrayOutputStream) SQLException(java.sql.SQLException)

Aggregations

DataEventType (org.jumpmind.symmetric.io.data.DataEventType)16 SQLException (java.sql.SQLException)4 ArrayList (java.util.ArrayList)4 List (java.util.List)3 Column (org.jumpmind.db.model.Column)3 Table (org.jumpmind.db.model.Table)2 CsvData (org.jumpmind.symmetric.io.data.CsvData)2 IgnoreColumnException (org.jumpmind.symmetric.io.data.transform.IgnoreColumnException)2 IgnoreRowException (org.jumpmind.symmetric.io.data.transform.IgnoreRowException)2 NewAndOldValue (org.jumpmind.symmetric.io.data.transform.NewAndOldValue)2 TransformColumn (org.jumpmind.symmetric.io.data.transform.TransformColumn)2 IncludeOnType (org.jumpmind.symmetric.io.data.transform.TransformColumn.IncludeOnType)2 TransformedData (org.jumpmind.symmetric.io.data.transform.TransformedData)2 Statistics (org.jumpmind.util.Statistics)2 AmazonClientException (com.amazonaws.AmazonClientException)1 AmazonServiceException (com.amazonaws.AmazonServiceException)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 OutputStream (java.io.OutputStream)1 OutputStreamWriter (java.io.OutputStreamWriter)1 Date (java.util.Date)1