use of eu.esdihumboldt.hale.common.core.io.report.IOReporter in project hale by halestudio.
the class HeadlessIO method executeProvider.
/**
* Execute the given I/O provider with the given I/O advisor.
*
* @param provider the I/O provider
* @param advisor the I/O advisor
* @param progress the progress indicator, may be <code>null</code>
* @param reportHandler the report handler, may be <code>null</code>
* @throws IOException if executing the provider fails
*/
@SuppressWarnings("unchecked")
public static void executeProvider(final IOProvider provider, @SuppressWarnings("rawtypes") final IOAdvisor advisor, ProgressIndicator progress, ReportHandler reportHandler) throws IOException {
IOReporter reporter = provider.createReporter();
ATransaction trans = log.begin(reporter.getTaskName());
try {
// use advisor to configure provider
advisor.prepareProvider(provider);
advisor.updateConfiguration(provider);
// execute
IOReport report = provider.execute(progress);
if (reportHandler != null) {
reportHandler.publishReport(report);
}
// handle results
if (report.isSuccess()) {
advisor.handleResults(provider);
} else {
throw new IOException("Executing I/O provider not successful: " + report.getSummary());
}
} catch (Exception e) {
throw new IOException("Error executing an I/O provider.", e);
} finally {
trans.end();
}
}
use of eu.esdihumboldt.hale.common.core.io.report.IOReporter in project hale by halestudio.
the class AbstractWFSWriter method execute.
@Override
public IOReport execute(ProgressIndicator progress) throws IOProviderConfigurationException, IOException {
progress.begin("WFS Transaction", ProgressIndicator.UNKNOWN);
// configure internal provider
internalProvider.setDocumentWrapper(createTransaction());
final PipedInputStream pIn = new PipedInputStream();
PipedOutputStream pOut = new PipedOutputStream(pIn);
currentExecuteStream = pOut;
Future<Response> futureResponse = null;
IOReporter reporter = createReporter();
ExecutorService executor = Executors.newSingleThreadExecutor();
try {
// read the stream (in another thread)
futureResponse = executor.submit(new Callable<Response>() {
@Override
public Response call() throws Exception {
Proxy proxy = ProxyUtil.findProxy(targetWfs.getLocation());
Request request = Request.Post(targetWfs.getLocation()).bodyStream(pIn, ContentType.APPLICATION_XML);
Executor executor = FluentProxyUtil.setProxy(request, proxy);
// authentication
String user = getParameter(PARAM_USER).as(String.class);
String password = getParameter(PARAM_PASSWORD).as(String.class);
if (user != null) {
// target host
int port = targetWfs.getLocation().getPort();
String hostName = targetWfs.getLocation().getHost();
String scheme = targetWfs.getLocation().getScheme();
HttpHost host = new HttpHost(hostName, port, scheme);
// add credentials
Credentials cred = ClientProxyUtil.createCredentials(user, password);
executor.auth(new AuthScope(host), cred);
executor.authPreemptive(host);
}
try {
return executor.execute(request);
} finally {
pIn.close();
}
}
});
// write the stream
SubtaskProgressIndicator subprogress = new SubtaskProgressIndicator(progress);
reporter = (IOReporter) super.execute(subprogress);
} finally {
executor.shutdown();
}
try {
Response response = futureResponse.get();
HttpResponse res = response.returnResponse();
int statusCode = res.getStatusLine().getStatusCode();
XPathFactory xPathfactory = XPathFactory.newInstance();
XPath xpath = xPathfactory.newXPath();
if (statusCode >= 200 && statusCode < 300) {
// success
reporter.setSuccess(reporter.isSuccess());
// construct summary from response
try {
Document responseDoc = parseResponse(res.getEntity());
// totalInserted
String inserted = xpath.compile("//TransactionSummary/totalInserted").evaluate(responseDoc);
// XXX totalUpdated
// XXX totalReplaced
// XXX totalDeleted
reporter.setSummary("Inserted " + inserted + " features.");
} catch (XPathExpressionException e) {
log.error("Error in XPath used to evaluate service response");
} catch (ParserConfigurationException | SAXException e) {
reporter.error(new IOMessageImpl(MessageFormat.format("Server returned status code {0}, but could not parse server response", statusCode), e));
reporter.setSuccess(false);
}
} else {
// failure
reporter.error(new IOMessageImpl("Server reported failure with code " + res.getStatusLine().getStatusCode() + ": " + res.getStatusLine().getReasonPhrase(), null));
reporter.setSuccess(false);
try {
Document responseDoc = parseResponse(res.getEntity());
String errorText = xpath.compile("//ExceptionText/text()").evaluate(responseDoc);
reporter.setSummary("Request failed: " + errorText);
} catch (XPathExpressionException e) {
log.error("Error in XPath used to evaluate service response");
} catch (ParserConfigurationException | SAXException e) {
reporter.error(new IOMessageImpl("Could not parse server response", e));
reporter.setSuccess(false);
}
}
} catch (ExecutionException | InterruptedException e) {
reporter.error(new IOMessageImpl("Failed to execute WFS-T request", e));
reporter.setSuccess(false);
}
progress.end();
return reporter;
}
use of eu.esdihumboldt.hale.common.core.io.report.IOReporter in project hale by halestudio.
the class AppSchemaIsolatedWorkspacesMappingTest method testBothIsolated.
/**
* Isolated attribute must be true, names must match those specified in the
* workspace configuration, unique mapping names must be generated.
*
* @throws IOException
*/
@Test
public void testBothIsolated() throws IOException {
workspaceConf.getWorkspaces().forEach(ws -> ws.setIsolated(true));
AppSchemaMappingGenerator generator = new AppSchemaMappingGenerator(alignment, targetSchemaSpace, null, featureChainingConf, workspaceConf);
IOReporter reporter = new DefaultIOReporter(targetSchemaSpace.getSchemas().iterator().next(), "Generate App-Schema Mapping", AppSchemaIO.CONTENT_TYPE_MAPPING, false);
generator.generateMapping(reporter);
assertEquals(STATIONS_WS_RENAMED, generator.getMainNamespace().name());
assertTrue((boolean) generator.getMainNamespace().getAttribute(Namespace.ISOLATED));
assertEquals(STATIONS_WS_RENAMED, generator.getMainWorkspace().name());
assertTrue((boolean) generator.getMainWorkspace().getAttribute(Namespace.ISOLATED));
boolean measurementsNsFound = false;
for (Namespace ns : generator.getSecondaryNamespaces()) {
if (MEASUREMENTS_NS_URI.equals(ns.getAttribute(Namespace.URI))) {
measurementsNsFound = true;
assertEquals(MEASUREMENTS_WS_RENAMED, ns.name());
assertTrue((boolean) ns.getAttribute(Namespace.ISOLATED));
assertEquals(MEASUREMENTS_WS_RENAMED, generator.getWorkspace(ns).name());
assertTrue((boolean) generator.getWorkspace(ns).getAttribute(Namespace.ISOLATED));
}
}
assertTrue(measurementsNsFound);
List<FeatureTypeMapping> typeMappings = generator.getGeneratedMapping().getAppSchemaMapping().getTypeMappings().getFeatureTypeMapping();
assertEquals(2, typeMappings.size());
Set<String> mappingNames = new HashSet<String>();
for (FeatureTypeMapping typeMapping : typeMappings) {
assertFalse(Strings.isNullOrEmpty(typeMapping.getMappingName()));
mappingNames.add(typeMapping.getMappingName());
}
assertEquals(2, mappingNames.size());
}
use of eu.esdihumboldt.hale.common.core.io.report.IOReporter in project hale by halestudio.
the class AppSchemaIsolatedWorkspacesMappingTest method testNonIsolatedRenamed.
/**
* Isolated attribute must be false, names must match those specified in the
* workspace configuration, unique mapping names must not be generated.
*
* @throws IOException
*/
@Test
public void testNonIsolatedRenamed() throws IOException {
AppSchemaMappingGenerator generator = new AppSchemaMappingGenerator(alignment, targetSchemaSpace, null, featureChainingConf, workspaceConf);
IOReporter reporter = new DefaultIOReporter(targetSchemaSpace.getSchemas().iterator().next(), "Generate App-Schema Mapping", AppSchemaIO.CONTENT_TYPE_MAPPING, false);
generator.generateMapping(reporter);
assertEquals(STATIONS_WS_RENAMED, generator.getMainNamespace().name());
assertFalse((boolean) generator.getMainNamespace().getAttribute(Namespace.ISOLATED));
assertEquals(STATIONS_WS_RENAMED, generator.getMainWorkspace().name());
assertFalse((boolean) generator.getMainWorkspace().getAttribute(Namespace.ISOLATED));
boolean measurementsNsFound = false;
for (Namespace ns : generator.getSecondaryNamespaces()) {
if (MEASUREMENTS_NS_URI.equals(ns.getAttribute(Namespace.URI))) {
measurementsNsFound = true;
assertEquals(MEASUREMENTS_WS_RENAMED, ns.name());
assertFalse((boolean) ns.getAttribute(Namespace.ISOLATED));
assertEquals(MEASUREMENTS_WS_RENAMED, generator.getWorkspace(ns).name());
assertFalse((boolean) generator.getWorkspace(ns).getAttribute(Namespace.ISOLATED));
}
}
assertTrue(measurementsNsFound);
List<FeatureTypeMapping> typeMappings = generator.getGeneratedMapping().getAppSchemaMapping().getTypeMappings().getFeatureTypeMapping();
assertEquals(2, typeMappings.size());
for (FeatureTypeMapping typeMapping : typeMappings) {
assertTrue(Strings.isNullOrEmpty(typeMapping.getMappingName()));
}
}
use of eu.esdihumboldt.hale.common.core.io.report.IOReporter in project hale by halestudio.
the class AppSchemaIsolatedWorkspacesMappingTest method testStationsIsolated.
/**
* Isolated attribute must be true for the stations ws and false for the
* measurements ws, names must match those specified in the workspace
* configuration, unique mapping names must be generated only for the
* stations ws.
*
* @throws IOException
*/
@Test
public void testStationsIsolated() throws IOException {
workspaceConf.getWorkspace(STATIONS_NS_URI).setIsolated(true);
AppSchemaMappingGenerator generator = new AppSchemaMappingGenerator(alignment, targetSchemaSpace, null, featureChainingConf, workspaceConf);
IOReporter reporter = new DefaultIOReporter(targetSchemaSpace.getSchemas().iterator().next(), "Generate App-Schema Mapping", AppSchemaIO.CONTENT_TYPE_MAPPING, false);
generator.generateMapping(reporter);
assertEquals(STATIONS_WS_RENAMED, generator.getMainNamespace().name());
assertTrue((boolean) generator.getMainNamespace().getAttribute(Namespace.ISOLATED));
assertEquals(STATIONS_WS_RENAMED, generator.getMainWorkspace().name());
assertTrue((boolean) generator.getMainWorkspace().getAttribute(Namespace.ISOLATED));
boolean measurementsNsFound = false;
for (Namespace ns : generator.getSecondaryNamespaces()) {
if (MEASUREMENTS_NS_URI.equals(ns.getAttribute(Namespace.URI))) {
measurementsNsFound = true;
assertEquals(MEASUREMENTS_WS_RENAMED, ns.name());
assertFalse((boolean) ns.getAttribute(Namespace.ISOLATED));
assertEquals(MEASUREMENTS_WS_RENAMED, generator.getWorkspace(ns).name());
assertFalse((boolean) generator.getWorkspace(ns).getAttribute(Namespace.ISOLATED));
}
}
assertTrue(measurementsNsFound);
List<FeatureTypeMapping> typeMappings = generator.getGeneratedMapping().getAppSchemaMapping().getTypeMappings().getFeatureTypeMapping();
assertEquals(2, typeMappings.size());
boolean stationsFtFound = false, measurementsFtFound = false;
Set<String> mappingNames = new HashSet<String>();
for (FeatureTypeMapping typeMapping : typeMappings) {
if ((STATIONS_WS_RENAMED + ":Station_gml32").equals(typeMapping.getTargetElement())) {
stationsFtFound = true;
assertFalse(Strings.isNullOrEmpty(typeMapping.getMappingName()));
}
if ((MEASUREMENTS_WS_RENAMED + ":Measurement_gml32").equals(typeMapping.getTargetElement())) {
measurementsFtFound = true;
assertTrue(Strings.isNullOrEmpty(typeMapping.getMappingName()));
}
if (!Strings.isNullOrEmpty(typeMapping.getMappingName())) {
mappingNames.add(typeMapping.getMappingName());
}
}
assertEquals(1, mappingNames.size());
assertTrue(stationsFtFound);
assertTrue(measurementsFtFound);
}
Aggregations