use of org.apache.flink.shaded.guava30.com.google.common.io.Closer in project wire by square.
the class SchemaLoader method load.
public Schema load() throws IOException {
if (sources.isEmpty()) {
throw new IllegalStateException("No sources added.");
}
try (Closer closer = Closer.create()) {
// Map the physical path to the file system root. For regular directories the key and the
// value are equal. For ZIP files the key is the path to the .zip, and the value is the root
// of the file system within it.
Map<Path, Path> directories = new LinkedHashMap<>();
for (Path source : sources) {
if (Files.isRegularFile(source)) {
FileSystem sourceFs = FileSystems.newFileSystem(source, getClass().getClassLoader());
closer.register(sourceFs);
directories.put(source, getOnlyElement(sourceFs.getRootDirectories()));
} else {
directories.put(source, source);
}
}
return loadFromDirectories(directories);
}
}
use of org.apache.flink.shaded.guava30.com.google.common.io.Closer in project incubator-gobblin by apache.
the class GobblinClusterManagerTest method testSendShutdownRequest.
@Test
public void testSendShutdownRequest() throws Exception {
Logger log = LoggerFactory.getLogger("testSendShutdownRequest");
Closer closer = Closer.create();
try {
CuratorFramework curatorFramework = TestHelper.createZkClient(this.testingZKServer, closer);
final GetInstanceMessageNumFunc getMessageNumFunc = new GetInstanceMessageNumFunc(GobblinClusterManagerTest.class.getSimpleName(), curatorFramework);
AssertWithBackoff assertWithBackoff = AssertWithBackoff.create().logger(log).timeoutMs(30000);
this.gobblinClusterManager.sendShutdownRequest();
Assert.assertEquals(curatorFramework.checkExists().forPath(String.format("/%s/INSTANCES/%s/MESSAGES", GobblinClusterManagerTest.class.getSimpleName(), TestHelper.TEST_HELIX_INSTANCE_NAME)).getVersion(), 0);
assertWithBackoff.assertEquals(getMessageNumFunc, 1, "1 message queued");
// Give Helix sometime to handle the message
assertWithBackoff.assertEquals(getMessageNumFunc, 0, "all messages processed");
} finally {
closer.close();
}
}
use of org.apache.flink.shaded.guava30.com.google.common.io.Closer in project incubator-gobblin by apache.
the class SerialCompactor method compact.
@Override
public void compact() throws IOException {
checkSchemaCompatibility();
Closer closer = Closer.create();
try {
this.conn = closer.register(HiveJdbcConnector.newConnectorWithProps(CompactionRunner.properties));
setHiveParameters();
createTables();
HiveTable mergedDelta = mergeDeltas();
HiveManagedTable notUpdated = getNotUpdatedRecords(this.snapshot, mergedDelta);
unionNotUpdatedRecordsAndDeltas(notUpdated, mergedDelta);
} catch (SQLException e) {
LOG.error("SQLException during compaction: " + e.getMessage());
throw new RuntimeException(e);
} catch (IOException e) {
LOG.error("IOException during compaction: " + e.getMessage());
throw new RuntimeException(e);
} catch (RuntimeException e) {
LOG.error("Runtime Exception during compaction: " + e.getMessage());
throw e;
} finally {
try {
deleteTmpFiles();
} finally {
closer.close();
}
}
}
use of org.apache.flink.shaded.guava30.com.google.common.io.Closer in project incubator-gobblin by apache.
the class HiveSerDeTest method testAvroOrcSerDes.
/**
* This test uses Avro SerDe to deserialize data from Avro files, and use ORC SerDe
* to serialize them into ORC files.
*/
@Test(groups = { "gobblin.serde" })
public void testAvroOrcSerDes() throws IOException, DataRecordException, DataConversionException {
Properties properties = new Properties();
properties.load(new FileReader("gobblin-core/src/test/resources/serde/serde.properties"));
SourceState sourceState = new SourceState(new State(properties), ImmutableList.<WorkUnitState>of());
OldApiWritableFileSource source = new OldApiWritableFileSource();
List<WorkUnit> workUnits = source.getWorkunits(sourceState);
Assert.assertEquals(workUnits.size(), 1);
WorkUnitState wus = new WorkUnitState(workUnits.get(0));
wus.addAll(sourceState);
Closer closer = Closer.create();
HiveWritableHdfsDataWriter writer = null;
try {
OldApiWritableFileExtractor extractor = closer.register((OldApiWritableFileExtractor) source.getExtractor(wus));
HiveSerDeConverter converter = closer.register(new HiveSerDeConverter());
writer = closer.register((HiveWritableHdfsDataWriter) new HiveWritableHdfsDataWriterBuilder<>().withBranches(1).withWriterId("0").writeTo(Destination.of(DestinationType.HDFS, sourceState)).writeInFormat(WriterOutputFormat.ORC).build());
converter.init(wus);
Writable record;
while ((record = extractor.readRecord(null)) != null) {
Iterable<Writable> convertedRecordIterable = converter.convertRecordImpl(null, record, wus);
Assert.assertEquals(Iterators.size(convertedRecordIterable.iterator()), 1);
writer.write(convertedRecordIterable.iterator().next());
}
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
if (writer != null) {
writer.commit();
}
Assert.assertTrue(this.fs.exists(new Path(sourceState.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR), sourceState.getProp(ConfigurationKeys.WRITER_FILE_NAME))));
HadoopUtils.deletePath(this.fs, new Path(sourceState.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR)), true);
}
}
use of org.apache.flink.shaded.guava30.com.google.common.io.Closer in project incubator-gobblin by apache.
the class OldApiHadoopFileInputSourceTest method testGetWorkUnitsAndExtractor.
@Test
public void testGetWorkUnitsAndExtractor() throws IOException, DataRecordException {
OldApiHadoopFileInputSource<String, Text, LongWritable, Text> fileInputSource = new TestHadoopFileInputSource();
List<WorkUnit> workUnitList = fileInputSource.getWorkunits(this.sourceState);
Assert.assertEquals(workUnitList.size(), 1);
WorkUnitState workUnitState = new WorkUnitState(workUnitList.get(0));
Closer closer = Closer.create();
try {
OldApiHadoopFileInputExtractor<String, Text, LongWritable, Text> extractor = (OldApiHadoopFileInputExtractor<String, Text, LongWritable, Text>) fileInputSource.getExtractor(workUnitState);
Text text = extractor.readRecord(null);
Assert.assertEquals(text.toString(), TEXT);
Assert.assertNull(extractor.readRecord(null));
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
Aggregations