use of co.cask.cdap.api.annotation.Beta in project cdap by caskdata.
the class ArtifactHttpHandler method callArtifactPluginMethod.
@Beta
@POST
@Path("/namespaces/{namespace-id}/artifacts/{artifact-name}/" + "versions/{artifact-version}/plugintypes/{plugin-type}/plugins/{plugin-name}/methods/{plugin-method}")
@AuditPolicy({ AuditDetail.REQUEST_BODY, AuditDetail.RESPONSE_BODY })
public void callArtifactPluginMethod(HttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespaceId, @PathParam("artifact-name") String artifactName, @PathParam("artifact-version") String artifactVersion, @PathParam("plugin-name") String pluginName, @PathParam("plugin-type") String pluginType, @PathParam("plugin-method") String methodName, @QueryParam("scope") @DefaultValue("user") String scope) throws Exception {
String requestBody = request.getContent().toString(Charsets.UTF_8);
NamespaceId namespace = Ids.namespace(namespaceId);
NamespaceId artifactNamespace = validateAndGetScopedNamespace(namespace, scope);
Id.Artifact artifactId = validateAndGetArtifactId(artifactNamespace, artifactName, artifactVersion);
if (requestBody.isEmpty()) {
throw new BadRequestException("Request body is used as plugin method parameter, " + "Received empty request body.");
}
try {
PluginEndpoint pluginEndpoint = pluginService.getPluginEndpoint(namespace, artifactId, pluginType, pluginName, methodName);
Object response = pluginEndpoint.invoke(GSON.fromJson(requestBody, pluginEndpoint.getMethodParameterType()));
responder.sendString(HttpResponseStatus.OK, GSON.toJson(response));
} catch (JsonSyntaxException e) {
LOG.error("Exception while invoking plugin method.", e);
responder.sendString(HttpResponseStatus.BAD_REQUEST, "Unable to deserialize request body to method parameter type");
} catch (InvocationTargetException e) {
LOG.error("Exception while invoking plugin method.", e);
if (e.getCause() instanceof javax.ws.rs.NotFoundException) {
throw new NotFoundException(e.getCause());
} else if (e.getCause() instanceof javax.ws.rs.BadRequestException) {
throw new BadRequestException(e.getCause());
} else if (e.getCause() instanceof IllegalArgumentException && e.getCause() != null) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getCause().getMessage());
} else {
Throwable rootCause = Throwables.getRootCause(e);
String message = String.format("Error while invoking plugin method %s.", methodName);
if (rootCause != null && rootCause.getMessage() != null) {
message = String.format("%s %s", message, rootCause.getMessage());
}
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, message);
}
}
}
use of co.cask.cdap.api.annotation.Beta in project cdap by caskdata.
the class PartitionedFileSetDataset method fixPartitions.
/**
* This method can bring a partitioned file set in sync with explore. It scans the partition table and adds
* every partition to explore. It will start multiple transactions, processing a batch of partitions in each
* transaction. Optionally, it can disable and re-enable explore first, that is, drop and recreate the Hive table.
* @param transactional the Transactional for executing transactions
* @param datasetName the name of the dataset to fix
* @param doDisable whether to disable and re-enable explore first
* @param partitionsPerTx how many partitions to process per transaction
* @param verbose whether to log verbosely. If true, this will log a message for every partition; otherwise it
* will only log a report of how many partitions were added / could not be added.
*/
@Beta
@SuppressWarnings("unused")
public static void fixPartitions(Transactional transactional, final String datasetName, boolean doDisable, final int partitionsPerTx, final boolean verbose) {
if (doDisable) {
try {
transactional.execute(new TxRunnable() {
@Override
public void run(co.cask.cdap.api.data.DatasetContext context) throws Exception {
PartitionedFileSetDataset pfs = context.getDataset(datasetName);
pfs.disableExplore();
// truncating = true, because this is like truncating
pfs.enableExplore(true);
}
});
} catch (TransactionFailureException e) {
throw new DataSetException("Unable to disable and enable Explore", e.getCause());
} catch (RuntimeException e) {
if (e.getCause() instanceof TransactionFailureException) {
throw new DataSetException("Unable to disable and enable Explore", e.getCause().getCause());
}
throw e;
}
}
final AtomicReference<PartitionKey> startKey = new AtomicReference<>();
final AtomicLong errorCount = new AtomicLong(0L);
final AtomicLong successCount = new AtomicLong(0L);
do {
try {
transactional.execute(new TxRunnable() {
@Override
public void run(co.cask.cdap.api.data.DatasetContext context) throws Exception {
final PartitionedFileSetDataset pfs = context.getDataset(datasetName);
// compute start row for the scan, reset remembered start key to null
byte[] startRow = startKey.get() == null ? null : generateRowKey(startKey.get(), pfs.getPartitioning());
startKey.set(null);
PartitionConsumer consumer = new PartitionConsumer() {
int count = 0;
@Override
public void consume(PartitionKey key, String path, @Nullable PartitionMetadata metadata) {
if (count >= partitionsPerTx) {
// reached the limit: remember this key as the start for the next round
startKey.set(key);
return;
}
try {
pfs.addPartitionToExplore(key, path);
successCount.incrementAndGet();
if (verbose) {
LOG.info("Added partition {} with path {}", key, path);
}
} catch (DataSetException e) {
errorCount.incrementAndGet();
if (verbose) {
LOG.warn(e.getMessage(), e);
}
}
count++;
}
};
pfs.getPartitions(null, consumer, false, startRow, null, partitionsPerTx + 1);
}
});
} catch (TransactionConflictException e) {
throw new DataSetException("Transaction conflict while reading partitions. This should never happen. " + "Make sure that no other programs are using this dataset at the same time.");
} catch (TransactionFailureException e) {
throw new DataSetException("Transaction failure: " + e.getMessage(), e.getCause());
} catch (RuntimeException e) {
// this looks like duplication but is needed in case this is run from a worker: see CDAP-6837
if (e.getCause() instanceof TransactionConflictException) {
throw new DataSetException("Transaction conflict while reading partitions. This should never happen. " + "Make sure that no other programs are using this dataset at the same time.");
} else if (e.getCause() instanceof TransactionFailureException) {
throw new DataSetException("Transaction failure: " + e.getMessage(), e.getCause().getCause());
} else {
throw e;
}
}
} while (// if it is null, then we consumed less than the limit in this round -> done
startKey.get() != null);
LOG.info("Added {} partitions, failed to add {} partitions.", successCount.get(), errorCount.get());
}
use of co.cask.cdap.api.annotation.Beta in project cdap by caskdata.
the class ArtifactHttpHandler method callArtifactPluginMethod.
@Beta
@POST
@Path("/namespaces/{namespace-id}/artifacts/{artifact-name}/" + "versions/{artifact-version}/plugintypes/{plugin-type}/plugins/{plugin-name}/methods/{plugin-method}")
@AuditPolicy({ AuditDetail.REQUEST_BODY, AuditDetail.RESPONSE_BODY })
public void callArtifactPluginMethod(FullHttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespaceId, @PathParam("artifact-name") String artifactName, @PathParam("artifact-version") String artifactVersion, @PathParam("plugin-name") String pluginName, @PathParam("plugin-type") String pluginType, @PathParam("plugin-method") String methodName, @QueryParam("scope") @DefaultValue("user") String scope) throws Exception {
String requestBody = request.content().toString(StandardCharsets.UTF_8);
NamespaceId namespace = Ids.namespace(namespaceId);
NamespaceId artifactNamespace = validateAndGetScopedNamespace(namespace, scope);
ArtifactId artifactId = validateAndGetArtifactId(artifactNamespace, artifactName, artifactVersion);
if (requestBody.isEmpty()) {
throw new BadRequestException("Request body is used as plugin method parameter, " + "Received empty request body.");
}
try {
PluginEndpoint pluginEndpoint = pluginService.getPluginEndpoint(namespace, Id.Artifact.fromEntityId(artifactId), pluginType, pluginName, methodName);
Object response = pluginEndpoint.invoke(GSON.fromJson(requestBody, pluginEndpoint.getMethodParameterType()));
responder.sendString(HttpResponseStatus.OK, GSON.toJson(response));
} catch (JsonSyntaxException e) {
LOG.error("Exception while invoking plugin method.", e);
responder.sendString(HttpResponseStatus.BAD_REQUEST, "Unable to deserialize request body to method parameter type");
} catch (InvocationTargetException e) {
LOG.error("Exception while invoking plugin method.", e);
if (e.getCause() instanceof javax.ws.rs.NotFoundException) {
throw new NotFoundException(e.getCause());
} else if (e.getCause() instanceof javax.ws.rs.BadRequestException) {
throw new BadRequestException(e.getCause());
} else if (e.getCause() instanceof IllegalArgumentException && e.getCause() != null) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getCause().getMessage());
} else {
Throwable rootCause = Throwables.getRootCause(e);
String message = String.format("Error while invoking plugin method %s.", methodName);
if (rootCause != null && rootCause.getMessage() != null) {
message = String.format("%s %s", message, rootCause.getMessage());
}
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, message);
}
}
}
Aggregations