use of java.io.Closeable in project camel by apache.
the class XMLTokenExpressionIteratorInvalidXMLTest method invokeAndVerify.
private void invokeAndVerify(Iterator<?> tokenizer, boolean error) throws IOException, XMLStreamException {
Exception exp = null;
try {
tokenizer.next();
tokenizer.next();
} catch (Exception e) {
exp = e;
} finally {
((Closeable) tokenizer).close();
}
if (error) {
assertNotNull("the error expected", exp);
} else {
assertNull("no error expected", exp);
}
}
use of java.io.Closeable in project camel by apache.
the class FallbackTypeConverter method unmarshal.
protected Object unmarshal(Unmarshaller unmarshaller, Exchange exchange, Object value) throws JAXBException, UnsupportedEncodingException, XMLStreamException {
try {
XMLStreamReader xmlReader;
if (value instanceof XMLStreamReader) {
xmlReader = (XMLStreamReader) value;
} else if (value instanceof InputStream) {
if (needFiltering(exchange)) {
xmlReader = staxConverter.createXMLStreamReader(new NonXmlFilterReader(new InputStreamReader((InputStream) value, IOHelper.getCharsetName(exchange))));
} else {
xmlReader = staxConverter.createXMLStreamReader((InputStream) value, exchange);
}
} else if (value instanceof Reader) {
Reader reader = (Reader) value;
if (needFiltering(exchange)) {
if (!(value instanceof NonXmlFilterReader)) {
reader = new NonXmlFilterReader((Reader) value);
}
}
xmlReader = staxConverter.createXMLStreamReader(reader);
} else if (value instanceof Source) {
xmlReader = staxConverter.createXMLStreamReader((Source) value);
} else {
throw new IllegalArgumentException("Cannot convert from " + value.getClass());
}
return unmarshaller.unmarshal(xmlReader);
} finally {
if (value instanceof Closeable) {
IOHelper.close((Closeable) value, "Unmarshalling", LOG);
}
}
}
use of java.io.Closeable in project hadoop by apache.
the class OpensslAesCtrCryptoCodec method finalize.
@Override
protected void finalize() throws Throwable {
try {
Closeable r = (Closeable) this.random;
r.close();
} catch (ClassCastException e) {
}
super.finalize();
}
use of java.io.Closeable in project hadoop by apache.
the class TestQuorumJournalManager method testReaderWhileAnotherWrites.
@Test
public void testReaderWhileAnotherWrites() throws Exception {
QuorumJournalManager readerQjm = closeLater(createSpyingQJM());
List<EditLogInputStream> streams = Lists.newArrayList();
readerQjm.selectInputStreams(streams, 0, false);
assertEquals(0, streams.size());
writeSegment(cluster, qjm, 1, 3, true);
readerQjm.selectInputStreams(streams, 0, false);
try {
assertEquals(1, streams.size());
// Validate the actual stream contents.
EditLogInputStream stream = streams.get(0);
assertEquals(1, stream.getFirstTxId());
assertEquals(3, stream.getLastTxId());
verifyEdits(streams, 1, 3);
assertNull(stream.readOp());
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
streams.clear();
}
// Ensure correct results when there is a stream in-progress, but we don't
// ask for in-progress.
writeSegment(cluster, qjm, 4, 3, false);
readerQjm.selectInputStreams(streams, 0, false);
try {
assertEquals(1, streams.size());
EditLogInputStream stream = streams.get(0);
assertEquals(1, stream.getFirstTxId());
assertEquals(3, stream.getLastTxId());
verifyEdits(streams, 1, 3);
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
streams.clear();
}
// TODO: check results for selectInputStreams with inProgressOK = true.
// This doesn't currently work, due to a bug where RedundantEditInputStream
// throws an exception if there are any unvalidated in-progress edits in the list!
// But, it shouldn't be necessary for current use cases.
qjm.finalizeLogSegment(4, 6);
readerQjm.selectInputStreams(streams, 0, false);
try {
assertEquals(2, streams.size());
assertEquals(4, streams.get(1).getFirstTxId());
assertEquals(6, streams.get(1).getLastTxId());
verifyEdits(streams, 1, 6);
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
streams.clear();
}
}
use of java.io.Closeable in project hadoop by apache.
the class TestFailoverController method testFailoverFromNonExistantServiceWithFencer.
@Test
public void testFailoverFromNonExistantServiceWithFencer() throws Exception {
DummyHAService svc1 = spy(new DummyHAService(null, svc1Addr));
// Getting a proxy to a dead server will throw IOException on call,
// not on creation of the proxy.
HAServiceProtocol errorThrowingProxy = Mockito.mock(HAServiceProtocol.class, Mockito.withSettings().defaultAnswer(new ThrowsException(new IOException("Could not connect to host"))).extraInterfaces(Closeable.class));
Mockito.doNothing().when((Closeable) errorThrowingProxy).close();
Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy(Mockito.<Configuration>any(), Mockito.anyInt());
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1, svc2, false, false);
} catch (FailoverFailedException ffe) {
fail("Non-existant active prevented failover");
}
// Verify that the proxy created to try to make it go to standby
// gracefully used the right rpc timeout
Mockito.verify(svc1).getProxy(Mockito.<Configuration>any(), Mockito.eq(CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT));
// Don't check svc1 because we can't reach it, but that's OK, it's been fenced.
assertEquals(HAServiceState.ACTIVE, svc2.state);
}
Aggregations