use of io.confluent.connect.s3.storage.S3OutputStream in project kafka-connect-storage-cloud by confluentinc.
the class DataWriterByteArrayTest method testBufferOverflowFix.
@Test
public void testBufferOverflowFix() throws Exception {
localProps.put(S3SinkConnectorConfig.FORMAT_CLASS_CONFIG, ByteArrayFormat.class.getName());
setUp();
PowerMockito.doReturn(5).when(connectorConfig).getPartSize();
S3OutputStream out = new S3OutputStream(S3_TEST_BUCKET_NAME, connectorConfig, s3);
out.write(new byte[] { 65, 66, 67, 68, 69 });
out.write(70);
}
use of io.confluent.connect.s3.storage.S3OutputStream in project kafka-connect-storage-cloud by confluentinc.
the class AvroRecordWriterProvider method getRecordWriter.
@Override
public RecordWriter getRecordWriter(final S3SinkConnectorConfig conf, final String filename) {
// This is not meant to be a thread-safe writer!
return new RecordWriter() {
final DataFileWriter<Object> writer = new DataFileWriter<>(new GenericDatumWriter<>());
Schema schema = null;
S3OutputStream s3out;
@Override
public void write(SinkRecord record) {
if (schema == null) {
schema = record.valueSchema();
try {
log.info("Opening record writer for: {}", filename);
s3out = storage.create(filename, true);
org.apache.avro.Schema avroSchema = avroData.fromConnectSchema(schema);
writer.setCodec(CodecFactory.fromString(conf.getAvroCodec()));
writer.create(avroSchema, s3out);
} catch (IOException e) {
throw new ConnectException(e);
}
}
log.trace("Sink record: {}", record);
Object value = avroData.fromConnectData(schema, record.value());
try {
// NonRecordContainers to just their value to properly handle these types
if (value instanceof NonRecordContainer) {
value = ((NonRecordContainer) value).getValue();
}
writer.append(value);
} catch (IOException e) {
throw new ConnectException(e);
}
}
@Override
public void commit() {
try {
// Flush is required here, because closing the writer will close the underlying S3
// output stream before committing any data to S3.
writer.flush();
s3out.commit();
writer.close();
} catch (IOException e) {
throw new ConnectException(e);
}
}
@Override
public void close() {
try {
writer.close();
} catch (IOException e) {
throw new ConnectException(e);
}
}
};
}
use of io.confluent.connect.s3.storage.S3OutputStream in project kafka-connect-storage-cloud by confluentinc.
the class ByteArrayRecordWriterProvider method getRecordWriter.
@Override
public RecordWriter getRecordWriter(final S3SinkConnectorConfig conf, final String filename) {
return new RecordWriter() {
final S3OutputStream s3out = storage.create(filename, true);
final OutputStream s3outWrapper = s3out.wrapForCompression();
@Override
public void write(SinkRecord record) {
log.trace("Sink record: {}", record);
try {
byte[] bytes = converter.fromConnectData(record.topic(), record.valueSchema(), record.value());
s3outWrapper.write(bytes);
s3outWrapper.write(lineSeparatorBytes);
} catch (IOException | DataException e) {
throw new ConnectException(e);
}
}
@Override
public void commit() {
try {
s3out.commit();
s3outWrapper.close();
} catch (IOException e) {
throw new ConnectException(e);
}
}
@Override
public void close() {
}
};
}
use of io.confluent.connect.s3.storage.S3OutputStream in project kafka-connect-storage-cloud by confluentinc.
the class JsonRecordWriterProvider method getRecordWriter.
@Override
public RecordWriter getRecordWriter(final S3SinkConnectorConfig conf, final String filename) {
try {
return new RecordWriter() {
final S3OutputStream s3out = storage.create(filename, true);
final OutputStream s3outWrapper = s3out.wrapForCompression();
final JsonGenerator writer = mapper.getFactory().createGenerator(s3outWrapper).setRootValueSeparator(null);
@Override
public void write(SinkRecord record) {
log.trace("Sink record: {}", record);
try {
Object value = record.value();
if (value instanceof Struct) {
byte[] rawJson = converter.fromConnectData(record.topic(), record.valueSchema(), value);
s3outWrapper.write(rawJson);
s3outWrapper.write(LINE_SEPARATOR_BYTES);
} else {
writer.writeObject(value);
writer.writeRaw(LINE_SEPARATOR);
}
} catch (IOException e) {
throw new ConnectException(e);
}
}
@Override
public void commit() {
try {
// Flush is required here, because closing the writer will close the underlying S3
// output stream before committing any data to S3.
writer.flush();
s3out.commit();
s3outWrapper.close();
} catch (IOException e) {
throw new ConnectException(e);
}
}
@Override
public void close() {
try {
writer.close();
} catch (IOException e) {
throw new ConnectException(e);
}
}
};
} catch (IOException e) {
throw new ConnectException(e);
}
}
Aggregations