use of io.netty.buffer.ByteBufInputStream in project scalecube by scalecube.
the class MessageCodec method deserialize.
/**
* Deserializes message from given byte buffer.
*
* @param bb byte buffer
*/
public static Message deserialize(ByteBuf bb) {
Schema<Message> schema = RuntimeSchema.getSchema(Message.class);
Message message = schema.newMessage();
try {
ProtostuffIOUtil.mergeFrom(new ByteBufInputStream(bb), message, schema);
} catch (Exception e) {
throw new DecoderException(e.getMessage(), e);
}
return message;
}
use of io.netty.buffer.ByteBufInputStream in project drill by axbaretto.
the class ServerAuthenticationHandler method handle.
@Override
public void handle(S connection, int rpcType, ByteBuf pBody, ByteBuf dBody, ResponseSender sender) throws RpcException {
final String remoteAddress = connection.getRemoteAddress().toString();
// exchange involves server "challenges" and client "responses" (initiated by client)
if (saslRequestTypeValue == rpcType) {
final SaslMessage saslResponse;
try {
saslResponse = SaslMessage.PARSER.parseFrom(new ByteBufInputStream(pBody));
} catch (final InvalidProtocolBufferException e) {
handleAuthFailure(connection, sender, e, saslResponseType);
return;
}
logger.trace("Received SASL message {} from {}", saslResponse.getStatus(), remoteAddress);
final SaslResponseProcessor processor = RESPONSE_PROCESSORS.get(saslResponse.getStatus());
if (processor == null) {
logger.info("Unknown message type from client from {}. Will stop authentication.", remoteAddress);
handleAuthFailure(connection, sender, new SaslException("Received unexpected message"), saslResponseType);
return;
}
final SaslResponseContext<S, T> context = new SaslResponseContext<>(saslResponse, connection, sender, requestHandler, saslResponseType);
try {
processor.process(context);
} catch (final Exception e) {
handleAuthFailure(connection, sender, e, saslResponseType);
}
} else {
// drop connection
throw new RpcException(String.format("Request of type %d is not allowed without authentication. Client on %s must authenticate " + "before making requests. Connection dropped. [Details: %s]", rpcType, remoteAddress, connection.getEncryptionCtxtString()));
}
}
use of io.netty.buffer.ByteBufInputStream in project drill by axbaretto.
the class RpcDecoder method decode.
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List<Object> out) throws Exception {
if (!ctx.channel().isOpen()) {
return;
}
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Inbound rpc message received.");
}
// now, we know the entire message is in the buffer and the buffer is constrained to this message. Additionally,
// this process should avoid reading beyond the end of this buffer so we inform the ByteBufInputStream to throw an
// exception if be go beyond readable bytes (as opposed to blocking).
final ByteBufInputStream is = new ByteBufInputStream(buffer, buffer.readableBytes());
// read the rpc header, saved in delimited format.
checkTag(is, RpcEncoder.HEADER_TAG);
final RpcHeader header = RpcHeader.parseDelimitedFrom(is);
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug(" post header read index {}", buffer.readerIndex());
}
// read the protobuf body into a buffer.
checkTag(is, RpcEncoder.PROTOBUF_BODY_TAG);
final int pBodyLength = readRawVarint32(is);
final ByteBuf pBody = buffer.slice(buffer.readerIndex(), pBodyLength);
buffer.skipBytes(pBodyLength);
pBody.retain(1);
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Read protobuf body of length {} into buffer {}.", pBodyLength, pBody);
}
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("post protobufbody read index {}", buffer.readerIndex());
}
ByteBuf dBody = null;
int dBodyLength = 0;
// read the data body.
if (buffer.readableBytes() > 0) {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Reading raw body, buffer has {} bytes available, is available {}.", buffer.readableBytes(), is.available());
}
checkTag(is, RpcEncoder.RAW_BODY_TAG);
dBodyLength = readRawVarint32(is);
if (buffer.readableBytes() != dBodyLength) {
throw new CorruptedFrameException(String.format("Expected to receive a raw body of %d bytes but received a buffer with %d bytes.", dBodyLength, buffer.readableBytes()));
}
dBody = buffer.slice();
dBody.retain(1);
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Read raw body of {}", dBody);
}
} else {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("No need to read raw body, no readable bytes left.");
}
}
// return the rpc message.
InboundRpcMessage m = new InboundRpcMessage(header.getMode(), header.getRpcType(), header.getCoordinationId(), pBody, dBody);
// move the reader index forward so the next rpc call won't try to work with it.
buffer.skipBytes(dBodyLength);
messageCounter.incrementAndGet();
if (RpcConstants.SOME_DEBUGGING) {
logger.debug("Inbound Rpc Message Decoded {}.", m);
}
out.add(m);
}
use of io.netty.buffer.ByteBufInputStream in project cdap by caskdata.
the class StreamHandler method getAndValidateConfig.
/**
* Gets stream properties from the request. If there is request is invalid, a BadRequestException will be thrown.
*/
private StreamProperties getAndValidateConfig(FullHttpRequest request) throws BadRequestException {
StreamProperties properties;
try (Reader reader = new InputStreamReader(new ByteBufInputStream(request.content()), StandardCharsets.UTF_8)) {
properties = GSON.fromJson(reader, StreamProperties.class);
} catch (Exception e) {
throw new BadRequestException("Invalid stream configuration. Please check that the " + "configuration is a valid JSON Object with a valid schema. Cause: " + e.getMessage());
}
// Validate ttl
Long ttl = properties.getTTL();
if (ttl != null && ttl < 0) {
throw new BadRequestException("Invalid TTL " + ttl + ". TTL value should be positive.");
}
// Validate format
FormatSpecification formatSpec = properties.getFormat();
if (formatSpec != null) {
String formatName = formatSpec.getName();
if (formatName == null) {
throw new BadRequestException("A format name must be specified.");
}
try {
// if a format is given, make sure it is a valid format,
// check that we can instantiate the format class
RecordFormat<?, ?> format = RecordFormats.createInitializedFormat(formatSpec);
// the request may contain a null schema, in which case the default schema of the format should be used.
// create a new specification object that is guaranteed to have a non-null schema.
formatSpec = new FormatSpecification(formatSpec.getName(), format.getSchema(), formatSpec.getSettings());
} catch (UnsupportedTypeException e) {
throw new BadRequestException("Format " + formatName + " does not support the requested schema.");
} catch (Exception e) {
throw new BadRequestException("Invalid format, unable to instantiate format " + formatName);
}
}
// Validate notification threshold
Integer threshold = properties.getNotificationThresholdMB();
if (threshold != null && threshold <= 0) {
throw new BadRequestException("Invalid threshold " + threshold + ". Threshold value should be greater than zero.");
}
// validate owner principal if one is provided
if (properties.getOwnerPrincipal() != null) {
SecurityUtil.validateKerberosPrincipal(properties.getOwnerPrincipal());
}
return new StreamProperties(ttl, formatSpec, threshold, properties.getDescription(), properties.getOwnerPrincipal());
}
use of io.netty.buffer.ByteBufInputStream in project cdap by caskdata.
the class ExploreExecutorHttpHandler method doPartitionOperation.
private void doPartitionOperation(FullHttpRequest request, HttpResponder responder, DatasetId datasetId, PartitionOperation partitionOperation) {
try (SystemDatasetInstantiator datasetInstantiator = datasetInstantiatorFactory.create()) {
Dataset dataset;
try {
dataset = datasetInstantiator.getDataset(datasetId);
} catch (Exception e) {
LOG.error("Exception instantiating dataset {}.", datasetId, e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Exception instantiating dataset " + datasetId);
return;
}
try {
if (!(dataset instanceof PartitionedFileSet)) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "not a partitioned dataset.");
return;
}
Partitioning partitioning = ((PartitionedFileSet) dataset).getPartitioning();
Reader reader = new InputStreamReader(new ByteBufInputStream(request.content()));
Map<String, String> properties = GSON.fromJson(reader, new TypeToken<Map<String, String>>() {
}.getType());
PartitionKey partitionKey;
try {
partitionKey = PartitionedFileSetArguments.getOutputPartitionKey(properties, partitioning);
} catch (Exception e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "invalid partition key: " + e.getMessage());
return;
}
if (partitionKey == null) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "no partition key was given.");
return;
}
QueryHandle handle = partitionOperation.submitOperation(partitionKey, properties);
if (handle == null) {
return;
}
JsonObject json = new JsonObject();
json.addProperty("handle", handle.getHandle());
responder.sendJson(HttpResponseStatus.OK, json.toString());
} finally {
Closeables.closeQuietly(dataset);
}
} catch (Throwable e) {
LOG.error("Got exception:", e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, e.getMessage());
}
}
Aggregations