use of org.apache.inlong.manager.common.pojo.sink.SinkFieldRequest in project incubator-inlong by apache.
the class InlongStreamSinkTransfer method createClickHouseRequest.
private static SinkRequest createClickHouseRequest(StreamSink streamSink, InlongStreamInfo streamInfo) {
ClickHouseSinkRequest clickHouseSinkRequest = new ClickHouseSinkRequest();
ClickHouseSink clickHouseSink = (ClickHouseSink) streamSink;
clickHouseSinkRequest.setSinkName(clickHouseSink.getSinkName());
clickHouseSinkRequest.setDatabaseName(clickHouseSink.getDatabaseName());
clickHouseSinkRequest.setSinkType(clickHouseSink.getSinkType().name());
clickHouseSinkRequest.setJdbcUrl(clickHouseSink.getJdbcUrl());
DefaultAuthentication defaultAuthentication = clickHouseSink.getAuthentication();
AssertUtil.notNull(defaultAuthentication, String.format("Clickhouse storage:%s must be authenticated", clickHouseSink.getDatabaseName()));
clickHouseSinkRequest.setUsername(defaultAuthentication.getUserName());
clickHouseSinkRequest.setPassword(defaultAuthentication.getPassword());
clickHouseSinkRequest.setTableName(clickHouseSink.getTableName());
clickHouseSinkRequest.setDistributedTable(clickHouseSink.getDistributedTable());
clickHouseSinkRequest.setFlushInterval(clickHouseSink.getFlushInterval());
clickHouseSinkRequest.setFlushRecordNumber(clickHouseSink.getFlushRecordNumber());
clickHouseSinkRequest.setKeyFieldNames(clickHouseSink.getKeyFieldNames());
clickHouseSinkRequest.setPartitionKey(clickHouseSink.getPartitionKey());
clickHouseSinkRequest.setPartitionStrategy(clickHouseSink.getPartitionStrategy());
clickHouseSinkRequest.setWriteMaxRetryTimes(clickHouseSink.getWriteMaxRetryTimes());
clickHouseSinkRequest.setInlongGroupId(streamInfo.getInlongGroupId());
clickHouseSinkRequest.setInlongStreamId(streamInfo.getInlongStreamId());
clickHouseSinkRequest.setProperties(clickHouseSink.getProperties());
clickHouseSinkRequest.setEnableCreateResource(clickHouseSink.isNeedCreated() ? 1 : 0);
if (CollectionUtils.isNotEmpty(clickHouseSink.getSinkFields())) {
List<SinkFieldRequest> fieldRequests = createSinkFieldRequests(streamSink.getSinkFields());
clickHouseSinkRequest.setFieldList(fieldRequests);
}
return clickHouseSinkRequest;
}
use of org.apache.inlong.manager.common.pojo.sink.SinkFieldRequest in project incubator-inlong by apache.
the class InlongStreamSinkTransfer method createKafkaRequest.
private static SinkRequest createKafkaRequest(StreamSink streamSink, InlongStreamInfo streamInfo) {
KafkaSinkRequest kafkaSinkRequest = new KafkaSinkRequest();
KafkaSink kafkaSink = (KafkaSink) streamSink;
kafkaSinkRequest.setSinkName(streamSink.getSinkName());
kafkaSinkRequest.setAddress(kafkaSink.getAddress());
kafkaSinkRequest.setTopicName(kafkaSink.getTopicName());
kafkaSinkRequest.setSinkType(kafkaSink.getSinkType().name());
kafkaSinkRequest.setInlongGroupId(streamInfo.getInlongGroupId());
kafkaSinkRequest.setInlongStreamId(streamInfo.getInlongStreamId());
kafkaSinkRequest.setSerializationType(kafkaSink.getDataFormat().name());
kafkaSinkRequest.setEnableCreateResource(kafkaSink.isNeedCreated() ? 1 : 0);
kafkaSinkRequest.setProperties(kafkaSink.getProperties());
if (CollectionUtils.isNotEmpty(kafkaSink.getSinkFields())) {
List<SinkFieldRequest> fieldRequests = createSinkFieldRequests(kafkaSink.getSinkFields());
kafkaSinkRequest.setFieldList(fieldRequests);
}
return kafkaSinkRequest;
}
use of org.apache.inlong.manager.common.pojo.sink.SinkFieldRequest in project incubator-inlong by apache.
the class HiveStreamSinkOperation method saveFieldOpt.
@Override
public void saveFieldOpt(SinkRequest request) {
List<SinkFieldRequest> fieldList = request.getFieldList();
LOGGER.info("begin to save field={}", fieldList);
if (CollectionUtils.isEmpty(fieldList)) {
return;
}
int size = fieldList.size();
List<StreamSinkFieldEntity> entityList = new ArrayList<>(size);
String groupId = request.getInlongGroupId();
String streamId = request.getInlongStreamId();
String sinkType = request.getSinkType();
Integer sinkId = request.getId();
for (SinkFieldRequest fieldInfo : fieldList) {
StreamSinkFieldEntity fieldEntity = CommonBeanUtils.copyProperties(fieldInfo, StreamSinkFieldEntity::new);
if (StringUtils.isEmpty(fieldEntity.getFieldComment())) {
fieldEntity.setFieldComment(fieldEntity.getFieldName());
}
fieldEntity.setInlongGroupId(groupId);
fieldEntity.setInlongStreamId(streamId);
fieldEntity.setSinkType(sinkType);
fieldEntity.setSinkId(sinkId);
fieldEntity.setIsDeleted(EntityStatus.UN_DELETED.getCode());
entityList.add(fieldEntity);
}
sinkFieldMapper.insertAll(entityList);
LOGGER.info("success to save hive field");
}
use of org.apache.inlong.manager.common.pojo.sink.SinkFieldRequest in project incubator-inlong by apache.
the class KafkaStreamSinkOperation method saveFieldOpt.
@Override
public void saveFieldOpt(SinkRequest request) {
List<SinkFieldRequest> fieldList = request.getFieldList();
LOGGER.info("begin to save field={}", fieldList);
if (CollectionUtils.isEmpty(fieldList)) {
return;
}
int size = fieldList.size();
List<StreamSinkFieldEntity> entityList = new ArrayList<>(size);
String groupId = request.getInlongGroupId();
String streamId = request.getInlongStreamId();
String sinkType = request.getSinkType();
Integer sinkId = request.getId();
for (SinkFieldRequest fieldInfo : fieldList) {
StreamSinkFieldEntity fieldEntity = CommonBeanUtils.copyProperties(fieldInfo, StreamSinkFieldEntity::new);
if (StringUtils.isEmpty(fieldEntity.getFieldComment())) {
fieldEntity.setFieldComment(fieldEntity.getFieldName());
}
fieldEntity.setInlongGroupId(groupId);
fieldEntity.setInlongStreamId(streamId);
fieldEntity.setSinkType(sinkType);
fieldEntity.setSinkId(sinkId);
fieldEntity.setIsDeleted(EntityStatus.UN_DELETED.getCode());
entityList.add(fieldEntity);
}
sinkFieldMapper.insertAll(entityList);
LOGGER.info("success to save field");
}
use of org.apache.inlong.manager.common.pojo.sink.SinkFieldRequest in project incubator-inlong by apache.
the class DisableZkForSortTest method createHiveSink.
public HiveSinkRequest createHiveSink(InlongStreamInfo streamInfo) {
HiveSinkRequest hiveSinkRequest = new HiveSinkRequest();
hiveSinkRequest.setInlongGroupId(streamInfo.getInlongGroupId());
hiveSinkRequest.setSinkType("HIVE");
hiveSinkRequest.setSinkName("HIVE");
hiveSinkRequest.setInlongStreamId(streamInfo.getInlongStreamId());
List<SinkFieldRequest> sinkFieldRequests = createStreamFields(streamInfo.getInlongGroupId(), streamInfo.getInlongStreamId()).stream().map(streamFieldInfo -> {
SinkFieldRequest fieldInfo = new SinkFieldRequest();
fieldInfo.setFieldName(streamFieldInfo.getFieldName());
fieldInfo.setFieldType(streamFieldInfo.getFieldType());
fieldInfo.setFieldComment(streamFieldInfo.getFieldComment());
return fieldInfo;
}).collect(Collectors.toList());
hiveSinkRequest.setFieldList(sinkFieldRequests);
hiveSinkRequest.setEnableCreateTable(0);
hiveSinkRequest.setUsername(OPERATOR);
hiveSinkRequest.setPassword("password");
hiveSinkRequest.setDbName("default");
hiveSinkRequest.setTableName("kip_test");
hiveSinkRequest.setJdbcUrl("jdbc:hive2://172.17.12.135:7001");
hiveSinkRequest.setFileFormat("TextFile");
hiveSinkRequest.setHdfsDefaultFs("hdfs://172.17.12.235:4007");
hiveSinkRequest.setWarehouseDir("/user/hive/warehouse");
hiveSinkRequest.setFileFormat(StandardCharsets.UTF_8.name());
hiveSinkRequest.setDataSeparator("124");
streamSinkService.save(hiveSinkRequest, OPERATOR);
return hiveSinkRequest;
}
Aggregations