Search in sources :

Example 6 with DbBatch

use of com.alibaba.otter.shared.etl.model.DbBatch in project otter by alibaba.

the class RowDataPipeDelegate method get.

public DbBatch get(List<PipeKey> keys) {
    Assert.notNull(keys);
    DbBatch dbBatch = new DbBatch();
    Future<File> future = null;
    for (final PipeKey key : keys) {
        if (key == null) {
            // 忽略空的key
            continue;
        }
        if (key instanceof MemoryPipeKey) {
            dbBatch = rowDataMemoryPipe.get((MemoryPipeKey) key);
            // 直接返回
            return dbBatch;
        } else if (key instanceof HttpPipeKey) {
            if (key.getDataType().isDbBatch()) {
                // 区分一下数据下载
                dbBatch = rowDataHttpPipe.get((HttpPipeKey) key);
            } else {
                future = executorService.submit(new Callable<File>() {

                    public File call() throws Exception {
                        try {
                            HttpPipeKey pipeKey = (HttpPipeKey) key;
                            MDC.put(OtterConstants.splitPipelineLogFileKey, String.valueOf(pipeKey.getIdentity().getPipelineId()));
                            return attachmentHttpPipe.get(pipeKey);
                        } finally {
                            MDC.remove(OtterConstants.splitPipelineLogFileKey);
                        }
                    }
                });
            }
        } else if (key instanceof RpcPipeKey) {
            dbBatch = rowDataRpcPipe.get((RpcPipeKey) key);
        } else {
            throw new PipeException("unknow_PipeKey", key.toString());
        }
    }
    if (future != null && dbBatch != null) {
        try {
            dbBatch.setRoot(future.get());
        } catch (Exception e) {
            throw new PipeException(e);
        }
    }
    return dbBatch;
}
Also used : HttpPipeKey(com.alibaba.otter.node.etl.common.pipe.impl.http.HttpPipeKey) MemoryPipeKey(com.alibaba.otter.node.etl.common.pipe.impl.memory.MemoryPipeKey) MemoryPipeKey(com.alibaba.otter.node.etl.common.pipe.impl.memory.MemoryPipeKey) RpcPipeKey(com.alibaba.otter.node.etl.common.pipe.impl.rpc.RpcPipeKey) HttpPipeKey(com.alibaba.otter.node.etl.common.pipe.impl.http.HttpPipeKey) PipeKey(com.alibaba.otter.node.etl.common.pipe.PipeKey) PipeException(com.alibaba.otter.node.etl.common.pipe.exception.PipeException) File(java.io.File) DbBatch(com.alibaba.otter.shared.etl.model.DbBatch) PipeException(com.alibaba.otter.node.etl.common.pipe.exception.PipeException) RpcPipeKey(com.alibaba.otter.node.etl.common.pipe.impl.rpc.RpcPipeKey)

Example 7 with DbBatch

use of com.alibaba.otter.shared.etl.model.DbBatch in project otter by alibaba.

the class ExtractTask method run.

public void run() {
    MDC.put(OtterConstants.splitPipelineLogFileKey, String.valueOf(pipelineId));
    while (running) {
        try {
            final EtlEventData etlEventData = arbitrateEventService.extractEvent().await(pipelineId);
            Runnable task = new Runnable() {

                public void run() {
                    // 设置profiling信息
                    boolean profiling = isProfiling();
                    Long profilingStartTime = null;
                    if (profiling) {
                        profilingStartTime = System.currentTimeMillis();
                    }
                    MDC.put(OtterConstants.splitPipelineLogFileKey, String.valueOf(pipelineId));
                    String currentName = Thread.currentThread().getName();
                    Thread.currentThread().setName(createTaskName(pipelineId, "ExtractWorker"));
                    try {
                        pipeline = configClientService.findPipeline(pipelineId);
                        List<PipeKey> keys = (List<PipeKey>) etlEventData.getDesc();
                        long nextNodeId = etlEventData.getNextNid();
                        DbBatch dbBatch = rowDataPipeDelegate.get(keys);
                        // 可能拿到为null,因为内存不足或者网络异常,长时间阻塞时,导致从pipe拿数据出现异常,数据可能被上一个节点已经删除
                        if (dbBatch == null) {
                            processMissData(pipelineId, "extract miss data with keys:" + keys.toString());
                            return;
                        }
                        // 重新装配一下数据
                        otterExtractorFactory.extract(dbBatch);
                        if (dbBatch.getFileBatch() != null && !CollectionUtils.isEmpty(dbBatch.getFileBatch().getFiles()) && pipeline.getParameters().getFileDetect()) {
                            // 判断一下是否有文件同步,并且需要进行文件对比
                            // 对比一下中美图片是否有变化
                            FileBatch fileBatch = fileBatchConflictDetectService.detect(dbBatch.getFileBatch(), nextNodeId);
                            dbBatch.setFileBatch(fileBatch);
                        }
                        List<PipeKey> pipeKeys = rowDataPipeDelegate.put(dbBatch, nextNodeId);
                        etlEventData.setDesc(pipeKeys);
                        if (profiling) {
                            Long profilingEndTime = System.currentTimeMillis();
                            stageAggregationCollector.push(pipelineId, StageType.EXTRACT, new AggregationItem(profilingStartTime, profilingEndTime));
                        }
                        arbitrateEventService.extractEvent().single(etlEventData);
                    } catch (Throwable e) {
                        if (!isInterrupt(e)) {
                            logger.error(String.format("[%d] extractwork executor is error! data:%s", pipelineId, etlEventData), e);
                            sendRollbackTermin(pipelineId, e);
                        } else {
                            logger.info(String.format("[%d] extractwork executor is interrrupt! data:%s", pipelineId, etlEventData), e);
                        }
                    } finally {
                        Thread.currentThread().setName(currentName);
                        MDC.remove(OtterConstants.splitPipelineLogFileKey);
                    }
                }
            };
            // 构造pending任务,可在关闭线程时退出任务
            SetlFuture extractFuture = new SetlFuture(StageType.EXTRACT, etlEventData.getProcessId(), pendingFuture, task);
            executorService.execute(extractFuture);
        } catch (Throwable e) {
            if (isInterrupt(e)) {
                logger.info(String.format("[%s] extractTask is interrupted!", pipelineId), e);
                return;
            } else {
                logger.error(String.format("[%s] extractTask is error!", pipelineId), e);
                sendRollbackTermin(pipelineId, e);
            }
        }
    }
}
Also used : FileBatch(com.alibaba.otter.shared.etl.model.FileBatch) PipeKey(com.alibaba.otter.node.etl.common.pipe.PipeKey) DbBatch(com.alibaba.otter.shared.etl.model.DbBatch) EtlEventData(com.alibaba.otter.shared.arbitrate.model.EtlEventData) AggregationItem(com.alibaba.otter.node.etl.common.jmx.StageAggregation.AggregationItem) List(java.util.List)

Example 8 with DbBatch

use of com.alibaba.otter.shared.etl.model.DbBatch in project otter by alibaba.

the class RowDataHttpPipe method getDbBatch.

// 处理对应的dbBatch
private DbBatch getDbBatch(HttpPipeKey key) {
    String dataUrl = key.getUrl();
    Pipeline pipeline = configClientService.findPipeline(key.getIdentity().getPipelineId());
    DataRetriever dataRetriever = dataRetrieverFactory.createRetriever(pipeline.getParameters().getRetriever(), dataUrl, downloadDir);
    File archiveFile = null;
    try {
        dataRetriever.connect();
        dataRetriever.doRetrieve();
        archiveFile = dataRetriever.getDataAsFile();
    } catch (Exception e) {
        dataRetriever.abort();
        throw new PipeException("download_error", e);
    } finally {
        dataRetriever.disconnect();
    }
    // 处理下有加密的数据
    if (StringUtils.isNotEmpty(key.getKey()) && StringUtils.isNotEmpty(key.getCrc())) {
        decodeFile(archiveFile, key.getKey(), key.getCrc());
    }
    InputStream input = null;
    JSONReader reader = null;
    try {
        input = new BufferedInputStream(new FileInputStream(archiveFile));
        DbBatch dbBatch = new DbBatch();
        byte[] lengthBytes = new byte[4];
        input.read(lengthBytes);
        int length = ByteUtils.bytes2int(lengthBytes);
        BatchProto.RowBatch rowbatchProto = BatchProto.RowBatch.parseFrom(new LimitedInputStream(input, length));
        // 构造原始的model对象
        RowBatch rowBatch = new RowBatch();
        rowBatch.setIdentity(build(rowbatchProto.getIdentity()));
        for (BatchProto.RowData rowDataProto : rowbatchProto.getRowsList()) {
            EventData eventData = new EventData();
            eventData.setPairId(rowDataProto.getPairId());
            eventData.setTableId(rowDataProto.getTableId());
            eventData.setTableName(rowDataProto.getTableName());
            eventData.setSchemaName(rowDataProto.getSchemaName());
            eventData.setEventType(EventType.valuesOf(rowDataProto.getEventType()));
            eventData.setExecuteTime(rowDataProto.getExecuteTime());
            // add by ljh at 2012-10-31
            if (StringUtils.isNotEmpty(rowDataProto.getSyncMode())) {
                eventData.setSyncMode(SyncMode.valuesOf(rowDataProto.getSyncMode()));
            }
            if (StringUtils.isNotEmpty(rowDataProto.getSyncConsistency())) {
                eventData.setSyncConsistency(SyncConsistency.valuesOf(rowDataProto.getSyncConsistency()));
            }
            // 处理主键
            List<EventColumn> keys = new ArrayList<EventColumn>();
            for (BatchProto.Column columnProto : rowDataProto.getKeysList()) {
                keys.add(buildColumn(columnProto));
            }
            eventData.setKeys(keys);
            // 处理old主键
            if (CollectionUtils.isEmpty(rowDataProto.getOldKeysList()) == false) {
                List<EventColumn> oldKeys = new ArrayList<EventColumn>();
                for (BatchProto.Column columnProto : rowDataProto.getOldKeysList()) {
                    oldKeys.add(buildColumn(columnProto));
                }
                eventData.setOldKeys(oldKeys);
            }
            // 处理具体的column value
            List<EventColumn> columns = new ArrayList<EventColumn>();
            for (BatchProto.Column columnProto : rowDataProto.getColumnsList()) {
                columns.add(buildColumn(columnProto));
            }
            eventData.setColumns(columns);
            eventData.setRemedy(rowDataProto.getRemedy());
            eventData.setSize(rowDataProto.getSize());
            eventData.setSql(rowDataProto.getSql());
            eventData.setDdlSchemaName(rowDataProto.getDdlSchemaName());
            eventData.setHint(rowDataProto.getHint());
            eventData.setWithoutSchema(rowDataProto.getWithoutSchema());
            // 添加到总记录
            rowBatch.merge(eventData);
        }
        dbBatch.setRowBatch(rowBatch);
        input.read(lengthBytes);
        length = ByteUtils.bytes2int(lengthBytes);
        BatchProto.FileBatch filebatchProto = BatchProto.FileBatch.parseFrom(new LimitedInputStream(input, length));
        // 构造原始的model对象
        FileBatch fileBatch = new FileBatch();
        fileBatch.setIdentity(build(filebatchProto.getIdentity()));
        for (BatchProto.FileData fileDataProto : filebatchProto.getFilesList()) {
            FileData fileData = new FileData();
            fileData.setPairId(fileDataProto.getPairId());
            fileData.setTableId(fileDataProto.getTableId());
            fileData.setEventType(EventType.valuesOf(fileDataProto.getEventType()));
            fileData.setLastModifiedTime(fileDataProto.getLastModifiedTime());
            fileData.setNameSpace(fileDataProto.getNamespace());
            fileData.setPath(fileDataProto.getPath());
            fileData.setSize(fileDataProto.getSize());
            // 添加到filebatch中
            fileBatch.getFiles().add(fileData);
        }
        dbBatch.setFileBatch(fileBatch);
        return dbBatch;
    } catch (IOException e) {
        throw new PipeException("deserial_error", e);
    } finally {
        IOUtils.closeQuietly(reader);
    }
}
Also used : EventColumn(com.alibaba.otter.shared.etl.model.EventColumn) ArrayList(java.util.ArrayList) DbBatch(com.alibaba.otter.shared.etl.model.DbBatch) EventData(com.alibaba.otter.shared.etl.model.EventData) BufferedInputStream(java.io.BufferedInputStream) FileData(com.alibaba.otter.shared.etl.model.FileData) FileBatch(com.alibaba.otter.shared.etl.model.FileBatch) BufferedInputStream(java.io.BufferedInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) DataRetriever(com.alibaba.otter.node.etl.common.io.download.DataRetriever) IOException(java.io.IOException) BatchProto(com.alibaba.otter.node.etl.model.protobuf.BatchProto) IOException(java.io.IOException) PipeException(com.alibaba.otter.node.etl.common.pipe.exception.PipeException) FileInputStream(java.io.FileInputStream) Pipeline(com.alibaba.otter.shared.common.model.config.pipeline.Pipeline) RowBatch(com.alibaba.otter.shared.etl.model.RowBatch) PipeException(com.alibaba.otter.node.etl.common.pipe.exception.PipeException) JSONReader(com.alibaba.fastjson.JSONReader) File(java.io.File)

Example 9 with DbBatch

use of com.alibaba.otter.shared.etl.model.DbBatch in project otter by alibaba.

the class LoadTask method run.

public void run() {
    MDC.put(OtterConstants.splitPipelineLogFileKey, String.valueOf(pipelineId));
    while (running) {
        try {
            final EtlEventData etlEventData = arbitrateEventService.loadEvent().await(pipelineId);
            Runnable task = new Runnable() {

                public void run() {
                    // 设置profiling信息
                    boolean profiling = isProfiling();
                    Long profilingStartTime = null;
                    if (profiling) {
                        profilingStartTime = System.currentTimeMillis();
                    }
                    MDC.put(OtterConstants.splitPipelineLogFileKey, String.valueOf(pipelineId));
                    String currentName = Thread.currentThread().getName();
                    Thread.currentThread().setName(createTaskName(pipelineId, "LoadWorker"));
                    List<LoadContext> processedContexts = null;
                    try {
                        // 后续可判断同步数据是否为rowData
                        List<PipeKey> keys = (List<PipeKey>) etlEventData.getDesc();
                        DbBatch dbBatch = rowDataPipeDelegate.get(keys);
                        // 可能拿到为null,因为内存不足或者网络异常,长时间阻塞时,导致从pipe拿数据出现异常,数据可能被上一个节点已经删除
                        if (dbBatch == null) {
                            processMissData(pipelineId, "load miss data with keys:" + keys.toString());
                            return;
                        }
                        // 进行数据load处理
                        otterLoaderFactory.setStartTime(dbBatch.getRowBatch().getIdentity(), etlEventData.getStartTime());
                        processedContexts = otterLoaderFactory.load(dbBatch);
                        if (profiling) {
                            Long profilingEndTime = System.currentTimeMillis();
                            stageAggregationCollector.push(pipelineId, StageType.LOAD, new AggregationItem(profilingStartTime, profilingEndTime));
                        }
                        // 处理完成后通知single已完成
                        arbitrateEventService.loadEvent().single(etlEventData);
                    } catch (Throwable e) {
                        if (!isInterrupt(e)) {
                            logger.error(String.format("[%s] loadWork executor is error! data:%s", pipelineId, etlEventData), e);
                        } else {
                            logger.info(String.format("[%s] loadWork executor is interrrupt! data:%s", pipelineId, etlEventData), e);
                        }
                        if (processedContexts != null) {
                            // 说明load成功了,但是通知仲裁器失败了,需要记录下记录到store
                            for (LoadContext context : processedContexts) {
                                try {
                                    if (context instanceof DbLoadContext) {
                                        dbLoadInterceptor.error((DbLoadContext) context);
                                    }
                                } catch (Throwable ie) {
                                    logger.error(String.format("[%s] interceptor process error failed!", pipelineId), ie);
                                }
                            }
                        }
                        if (!isInterrupt(e)) {
                            sendRollbackTermin(pipelineId, e);
                        }
                    } finally {
                        Thread.currentThread().setName(currentName);
                        MDC.remove(OtterConstants.splitPipelineLogFileKey);
                    }
                }
            };
            // 构造pending任务,可在关闭线程时退出任务
            SetlFuture extractFuture = new SetlFuture(StageType.LOAD, etlEventData.getProcessId(), pendingFuture, task);
            executorService.execute(extractFuture);
        } catch (Throwable e) {
            if (isInterrupt(e)) {
                logger.info(String.format("[%s] loadTask is interrupted!", pipelineId), e);
                // 释放锁
                return;
            } else {
                logger.error(String.format("[%s] loadTask is error!", pipelineId), e);
                // arbitrateEventService.loadEvent().release(pipelineId); //
                // 释放锁
                // 先解除lock,后发送rollback信号
                sendRollbackTermin(pipelineId, e);
            }
        }
    }
}
Also used : PipeKey(com.alibaba.otter.node.etl.common.pipe.PipeKey) DbBatch(com.alibaba.otter.shared.etl.model.DbBatch) EtlEventData(com.alibaba.otter.shared.arbitrate.model.EtlEventData) DbLoadContext(com.alibaba.otter.node.etl.load.loader.db.context.DbLoadContext) DbLoadContext(com.alibaba.otter.node.etl.load.loader.db.context.DbLoadContext) LoadContext(com.alibaba.otter.node.etl.load.loader.LoadContext) AggregationItem(com.alibaba.otter.node.etl.common.jmx.StageAggregation.AggregationItem) List(java.util.List) SetlFuture(com.alibaba.otter.node.etl.extract.SetlFuture)

Example 10 with DbBatch

use of com.alibaba.otter.shared.etl.model.DbBatch in project otter by alibaba.

the class SelectTask method processSelect.

private void processSelect() {
    while (running) {
        try {
            // 等待ProcessTermin exhaust,会阻塞
            // ProcessTermin发现出现rollback,会立即通知暂停,比分布式permit及时性高
            canStartSelector.get();
            // 判断当前是否为工作节点,S模块不能出现双节点工作,selector容易出现数据错乱
            if (needCheck) {
                checkContinueWork();
            }
            // 出现阻塞挂起时,等待mananger处理完成,解挂开启同步
            // 出现rollback后能及时停住
            arbitrateEventService.toolEvent().waitForPermit(pipelineId);
            // 使用startVersion要解决的一个问题:出现rollback时,尽可能判断取出来的数据是rollback前还是rollback后,想办法丢弃rollback前的数据。
            // (因为出现rollback,之前取出去的几个批次的数据其实是没有执行成功,get取出来的数据会是其后一批数据,如果不丢弃的话,会出现后面的数据先执行,然后又回到出错的点,再执行一遍)
            // int startVersion = rversion.get();
            Message gotMessage = otterSelector.selector();
            // modify by ljh at 2012-09-10,startVersion获取操作应该放在拿到数据之后
            // 放在前面 : (遇到一个并发bug)
            // // a.
            // 先拿startVersion,再获取数据,在拿数据过程中rollback开始并完成了,导致selector返回时数据已经取到了末尾
            // // b. 在进行version判断时发现已经有变化,导致又触发一次拿数据的过程,此时的get
            // cursor已经到队列的末尾,拿不出任何数据,所以出现死等情况
            // 放在后面 : (一点点瑕疵)
            // // a.
            // 并发操作rollback和selector时,针对拿到rollback前的老数据,此时startVersion还未初始化,导致判断不出出现过rollback操作,后面的变更数据会提前同步
            // (概率性会比较高,取决于selector和初始化startVersion的时间间隔)
            int startVersion = rversion.get();
            if (canStartSelector.state() == false) {
                // 是否出现异常
                // 回滚在出现异常的瞬间,拿出来的数据,因为otterSelector.selector()会循环,可能出现了rollback,其还未感知到
                rollback(gotMessage.getId());
                continue;
            }
            if (CollectionUtils.isEmpty(gotMessage.getDatas())) {
                // 处理下空数据,也得更新下游标,可能是回环数据被过滤掉
                // 添加到待响应的buffer列表,不需要await termin信号,因为没启动过s/e/t/l流程
                batchBuffer.put(new BatchTermin(gotMessage.getId(), false));
                continue;
            }
            final EtlEventData etlEventData = arbitrateEventService.selectEvent().await(pipelineId);
            if (rversion.get() != startVersion) {
                // 说明存在过变化,中间出现过rollback,需要丢弃该数据
                logger.warn("rollback happend , should skip this data and get new message.");
                // 确认一下rollback是否完成
                canStartSelector.get();
                // 这时不管有没有数据,都需要执行一次s/e/t/l
                gotMessage = otterSelector.selector();
            }
            final Message message = gotMessage;
            final BatchTermin batchTermin = new BatchTermin(message.getId(), etlEventData.getProcessId());
            // 添加到待响应的buffer列表
            batchBuffer.put(batchTermin);
            Runnable task = new Runnable() {

                public void run() {
                    // 设置profiling信息
                    boolean profiling = isProfiling();
                    Long profilingStartTime = null;
                    if (profiling) {
                        profilingStartTime = System.currentTimeMillis();
                    }
                    MDC.put(OtterConstants.splitPipelineLogFileKey, String.valueOf(pipelineId));
                    String currentName = Thread.currentThread().getName();
                    Thread.currentThread().setName(createTaskName(pipelineId, "SelectWorker"));
                    try {
                        pipeline = configClientService.findPipeline(pipelineId);
                        List<EventData> eventData = message.getDatas();
                        long startTime = etlEventData.getStartTime();
                        if (!CollectionUtils.isEmpty(eventData)) {
                            startTime = eventData.get(0).getExecuteTime();
                        }
                        Channel channel = configClientService.findChannelByPipelineId(pipelineId);
                        RowBatch rowBatch = new RowBatch();
                        // 构造唯一标识
                        Identity identity = new Identity();
                        identity.setChannelId(channel.getId());
                        identity.setPipelineId(pipelineId);
                        identity.setProcessId(etlEventData.getProcessId());
                        rowBatch.setIdentity(identity);
                        // 进行数据合并
                        for (EventData data : eventData) {
                            rowBatch.merge(data);
                        }
                        long nextNodeId = etlEventData.getNextNid();
                        List<PipeKey> pipeKeys = rowDataPipeDelegate.put(new DbBatch(rowBatch), nextNodeId);
                        etlEventData.setDesc(pipeKeys);
                        etlEventData.setNumber((long) eventData.size());
                        // 使用原始数据的第一条
                        etlEventData.setFirstTime(startTime);
                        etlEventData.setBatchId(message.getId());
                        if (profiling) {
                            Long profilingEndTime = System.currentTimeMillis();
                            stageAggregationCollector.push(pipelineId, StageType.SELECT, new AggregationItem(profilingStartTime, profilingEndTime));
                        }
                        arbitrateEventService.selectEvent().single(etlEventData);
                    } catch (Throwable e) {
                        if (!isInterrupt(e)) {
                            logger.error(String.format("[%s] selectwork executor is error! data:%s", pipelineId, etlEventData), e);
                            sendRollbackTermin(pipelineId, e);
                        } else {
                            logger.info(String.format("[%s] selectwork executor is interrrupt! data:%s", pipelineId, etlEventData), e);
                        }
                    } finally {
                        Thread.currentThread().setName(currentName);
                        MDC.remove(OtterConstants.splitPipelineLogFileKey);
                    }
                }
            };
            // 构造pending任务,可在关闭线程时退出任务
            SetlFuture extractFuture = new SetlFuture(StageType.SELECT, etlEventData.getProcessId(), pendingFuture, task);
            executorService.execute(extractFuture);
        } catch (Throwable e) {
            if (!isInterrupt(e)) {
                logger.error(String.format("[%s] selectTask is error!", pipelineId), e);
                sendRollbackTermin(pipelineId, e);
            } else {
                logger.info(String.format("[%s] selectTask is interrrupt!", pipelineId), e);
                return;
            }
        }
    }
}
Also used : Message(com.alibaba.otter.node.etl.select.selector.Message) Channel(com.alibaba.otter.shared.common.model.config.channel.Channel) PipeKey(com.alibaba.otter.node.etl.common.pipe.PipeKey) TerminEventData(com.alibaba.otter.shared.arbitrate.model.TerminEventData) EtlEventData(com.alibaba.otter.shared.arbitrate.model.EtlEventData) EventData(com.alibaba.otter.shared.etl.model.EventData) DbBatch(com.alibaba.otter.shared.etl.model.DbBatch) EtlEventData(com.alibaba.otter.shared.arbitrate.model.EtlEventData) RowBatch(com.alibaba.otter.shared.etl.model.RowBatch) AggregationItem(com.alibaba.otter.node.etl.common.jmx.StageAggregation.AggregationItem) Identity(com.alibaba.otter.shared.etl.model.Identity) SetlFuture(com.alibaba.otter.node.etl.extract.SetlFuture)

Aggregations

DbBatch (com.alibaba.otter.shared.etl.model.DbBatch)16 RowBatch (com.alibaba.otter.shared.etl.model.RowBatch)12 Test (org.testng.annotations.Test)9 EventData (com.alibaba.otter.shared.etl.model.EventData)7 Identity (com.alibaba.otter.shared.etl.model.Identity)7 Pipeline (com.alibaba.otter.shared.common.model.config.pipeline.Pipeline)6 BaseOtterTest (com.alibaba.otter.node.etl.BaseOtterTest)5 PipeKey (com.alibaba.otter.node.etl.common.pipe.PipeKey)5 BaseDbTest (com.alibaba.otter.node.etl.BaseDbTest)4 AggregationItem (com.alibaba.otter.node.etl.common.jmx.StageAggregation.AggregationItem)4 EtlEventData (com.alibaba.otter.shared.arbitrate.model.EtlEventData)4 DataMediaPair (com.alibaba.otter.shared.common.model.config.data.DataMediaPair)4 FileBatch (com.alibaba.otter.shared.etl.model.FileBatch)4 MemoryPipeKey (com.alibaba.otter.node.etl.common.pipe.impl.memory.MemoryPipeKey)3 RpcPipeKey (com.alibaba.otter.node.etl.common.pipe.impl.rpc.RpcPipeKey)3 SetlFuture (com.alibaba.otter.node.etl.extract.SetlFuture)3 File (java.io.File)3 List (java.util.List)3 NodeCommmunicationClient (com.alibaba.otter.node.common.communication.NodeCommmunicationClient)2 PipeException (com.alibaba.otter.node.etl.common.pipe.exception.PipeException)2